file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
laptop.pb.go
} func (m *Laptop) GetKeyboard() *Keyboard { if m != nil { return m.Keyboard } return nil } type isLaptop_Weight interface { isLaptop_Weight() } type Laptop_WeightKg struct { WeightKg float64 `protobuf:"fixed64,10,opt,name=weight_kg,json=weightKg,proto3,oneof"` } type Laptop_WeightLb struct { WeightLb float64 `protobuf:"fixed64,11,opt,name=weight_lb,json=weightLb,proto3,oneof"` } func (*Laptop_WeightKg) isLaptop_Weight() {} func (*Laptop_WeightLb) isLaptop_Weight() {} func (m *Laptop) GetWeight() isLaptop_Weight { if m != nil { return m.Weight } return nil } func (m *Laptop) GetWeightKg() float64 { if x, ok := m.GetWeight().(*Laptop_WeightKg); ok { return x.WeightKg } return 0 } func (m *Laptop) GetWeightLb() float64 { if x, ok := m.GetWeight().(*Laptop_WeightLb); ok { return x.WeightLb } return 0 } func (m *Laptop) GetPriceUsd() float64 { if m != nil { return m.PriceUsd } return 0 } func (m *Laptop) GetReleaseYear() uint32 { if m != nil { return m.ReleaseYear } return 0 } func (m *Laptop) GetUpdatedAt() *timestamp.Timestamp { if m != nil { return m.UpdatedAt } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Laptop) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Laptop_WeightKg)(nil), (*Laptop_WeightLb)(nil), } } type CreateLaptopRequest struct { Laptop *Laptop `protobuf:"bytes,1,opt,name=laptop,proto3" json:"laptop,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateLaptopRequest) Reset() { *m = CreateLaptopRequest{} } func (m *CreateLaptopRequest) String() string { return proto.CompactTextString(m) } func (*CreateLaptopRequest) ProtoMessage() {} func (*CreateLaptopRequest) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{1} } func (m *CreateLaptopRequest) XXX_Unmarshal(b []byte) error
func (m *CreateLaptopRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopRequest.Marshal(b, m, deterministic) } func (m *CreateLaptopRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopRequest.Merge(m, src) } func (m *CreateLaptopRequest) XXX_Size() int { return xxx_messageInfo_CreateLaptopRequest.Size(m) } func (m *CreateLaptopRequest) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopRequest.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopRequest proto.InternalMessageInfo func (m *CreateLaptopRequest) GetLaptop() *Laptop { if m != nil { return m.Laptop } return nil } type CreateLaptopResponse struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateLaptopResponse) Reset() { *m = CreateLaptopResponse{} } func (m *CreateLaptopResponse) String() string { return proto.CompactTextString(m) } func (*CreateLaptopResponse) ProtoMessage() {} func (*CreateLaptopResponse) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{2} } func (m *CreateLaptopResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateLaptopResponse.Unmarshal(m, b) } func (m *CreateLaptopResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopResponse.Marshal(b, m, deterministic) } func (m *CreateLaptopResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopResponse.Merge(m, src) } func (m *CreateLaptopResponse) XXX_Size() int { return xxx_messageInfo_CreateLaptopResponse.Size(m) } func (m *CreateLaptopResponse) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopResponse.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopResponse proto.InternalMessageInfo func (m *CreateLaptopResponse) GetId() string { if m != nil { return m.ID } return "" } func init() { proto.RegisterType((*Laptop)(nil), "pc.Laptop") proto.RegisterType((*CreateLaptopRequest)(nil), "pc.CreateLaptopRequest") proto.RegisterType((*CreateLaptopResponse)(nil), "pc.CreateLaptopResponse") } func init() { proto.RegisterFile("laptop.proto", fileDescriptor_28a7e4886f546705) } var fileDescriptor_28a7e4886f546705 = []byte{ // 459 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x6f, 0xd3, 0x30, 0x14, 0xc7, 0x49, 0xd3, 0x76, 0xe9, 0x6b, 0x5a, 0x24, 0x33, 0x09, 0xd3, 0x81, 0x08, 0x3d, 0x40, 0x4e, 0xa9, 0x34, 0x4e, 0x3b, 0x6e, 0x3b, 0x80, 0xb4, 0x21, 0x4d, 0x2e, 0x3d, 0xc0, 0xa5, 0x72, 0x92, 0x47, 0x88, 0xd6, 0xd4, 0xc6, 0x76, 0x40, 0xfd, 0x37, 0xf9, 0x8b, 0x50, 0x6c, 0x67, 0xec, 0xc7, 0xcd, 0xef, 0xf3, 0xfd, 0x5a, 0xef, 0x27, 0xc4, 0x3b, 0x2e, 0x8d, 0x90, 0x99, 0x54, 0xc2, 0x08, 0x32, 0x90, 0xc5, 0xe2, 0xb9, 0x54, 0xa2, 0x40, 0xad, 0x85, 0x72, 0x70, 0x11, 0x37, 0xd8, 0x08, 0x75, 0xf0, 0xd1, 0x4c, 0x1b, 0xa1, 0x78, 0x85, 0xbd, 0xa8, 0x0b, 0x85, 0xb8, 0xf7, 0xd1, 0xfc, 0x16, 0x0f, 0xb9, 0xe0, 0xaa, 0xf4, 0xf1, 0xdb, 0x4a, 0x88, 0x6a, 0x87, 0x2b, 0x1b, 0xe5, 0xed, 0x8f, 0x95, 0xa9,
{ return xxx_messageInfo_CreateLaptopRequest.Unmarshal(m, b) }
identifier_body
renderer.rs
> { pub fn new(context: &gpukit::Context, value: T) -> Self { let buffer = context .build_buffer() .with_usage(wgpu::BufferUsages::UNIFORM) .init_with_data(std::slice::from_ref(&value)); UniformBuffer { buffer, value } } fn update(&self, context: &gpukit::Context) { self.buffer .update(context, std::slice::from_ref(&self.value)); } } #[repr(transparent)] #[derive(Debug, Copy, Clone)] struct Vertex(egui::paint::Vertex); // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. unsafe impl bytemuck::Pod for Vertex {} unsafe impl bytemuck::Zeroable for Vertex {} impl Vertex { // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. fn cast_slice(vertices: &[egui::paint::Vertex]) -> &[Vertex] { let ptr = vertices.as_ptr() as *const Vertex; let len = vertices.len(); unsafe { std::slice::from_raw_parts(ptr, len) } } } pub struct RendererDescriptor { pub context: Arc<gpukit::Context>, pub target_format: wgpu::TextureFormat, // Size of the screen: [width, height] pub size: [u32; 2], // Number of pixels per point pub pixels_per_point: f32, } impl Renderer { pub fn new(desc: RendererDescriptor) -> anyhow::Result<Renderer> { let RendererDescriptor { context, target_format, size, pixels_per_point, } = desc; let vertex_buffer = Self::create_vertex_buffer(&context, 0); let index_buffer = Self::create_index_buffer(&context, 0); let uniforms = UniformBuffer::new( &context, ScreenUniforms { width_in_points: size[0] as f32 / pixels_per_point, height_in_points: size[1] as f32 / pixels_per_point, pixels_per_point, _padding: 0, }, ); let sampler = context.device.create_sampler(&wgpu::SamplerDescriptor { label: None, address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge, mag_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Linear, ..Default::default() }); let bind_group = gpukit::BindGroup::new( &context, &Bindings { screen_uniforms: &uniforms.buffer, sampler: &sampler, }, ); let texture = context .build_texture() .with_usage(wgpu::TextureUsages::TEXTURE_BINDING) .init_with_data([1, 1], &[0]); let texture_bind_group = gpukit::BindGroup::new( &context, &TextureBindings { texture: &texture.create_view(), }, ); let vertex = context .build_shader("gukit_egui vertex shader") .init_from_glsl(include_str!("shader.vert"), gpukit::ShaderStage::Vertex)?; let fragment = context .build_shader("gukit_egui fragment shader") .init_from_glsl(include_str!("shader.frag"), gpukit::ShaderStage::Fragment)?; let pipeline = context.create_render_pipeline(gpukit::RenderPipelineDescriptor { label: Some("gpukit_egui renderer"), vertex: vertex.entry("main"), fragment: fragment.entry("main"), vertex_buffers: &[gpukit::vertex_buffer_layout![ // Position 0 => Float32x2, // Texture Coordinates 1 => Float32x2, // Color 2 => Uint32, ]], bind_group_layouts: &[&bind_group.layout, &texture_bind_group.layout], color_targets: &[wgpu::ColorTargetState { format: target_format, blend: Some(wgpu::BlendState { color: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::One, dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha, operation: wgpu::BlendOperation::Add, }, alpha: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::OneMinusDstAlpha, dst_factor: wgpu::BlendFactor::One, operation: wgpu::BlendOperation::Add, }, }), write_mask: wgpu::ColorWrites::all(), }], depth_stencil: None, primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleList, ..Default::default() }, })?; Ok(Renderer { context, pipeline, vertex_buffer, index_buffer, bind_group, screen_uniforms: uniforms, texture_bind_group, texture_version: None, texture, }) } fn create_vertex_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<Vertex> { context .build_buffer() .with_usage(wgpu::BufferUsages::VERTEX) .init_with_capacity(len) } fn create_index_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<u32> { context .build_buffer() .with_usage(wgpu::BufferUsages::INDEX) .init_with_capacity(len) } pub fn set_size(&mut self, size: [u32; 2], scale_factor: f32) { self.screen_uniforms.width_in_points = size[0] as f32 / scale_factor; self.screen_uniforms.height_in_points = size[1] as f32 / scale_factor; self.screen_uniforms.pixels_per_point = scale_factor; self.screen_uniforms.update(&self.context) } pub fn render<'encoder>( &'encoder mut self, rpass: &mut wgpu::RenderPass<'encoder>, meshes: &[egui::ClippedMesh], texture: &egui::Texture, ) { use gpukit::RenderPassExt; let offsets = self.update_buffers(meshes); self.update_texture(texture); rpass.set_pipeline(&self.pipeline); rpass.set_vertex_buffer(0, *self.vertex_buffer.slice(..)); rpass.set_index_buffer_ext(self.index_buffer.slice(..)); rpass.set_bind_group(0, &self.bind_group, &[]); rpass.set_bind_group(1, &self.texture_bind_group, &[]); for (egui::ClippedMesh(rect, mesh), offset) in meshes.iter().zip(offsets) { if Self::set_scissor_region(rpass, &self.screen_uniforms, *rect) { let index_range = offset.index..offset.index + mesh.indices.len() as u32; rpass.draw_indexed(index_range, offset.vertex as i32, 0..1); } } } fn set_scissor_region( rpass: &mut wgpu::RenderPass, screen: &ScreenUniforms, rect: egui::Rect, ) -> bool { let left = rect.left() * screen.pixels_per_point; let right = rect.right() * screen.pixels_per_point; let top = rect.top() * screen.pixels_per_point; let bottom = rect.bottom() * screen.pixels_per_point; let screen_width = screen.width_in_points * screen.pixels_per_point; let screen_height = screen.height_in_points * screen.pixels_per_point; let left = left.clamp(0.0, screen_width); let top = top.clamp(0.0, screen_height); let right = right.clamp(left, screen_width); let bottom = bottom.clamp(top, screen_height); let left = left.round() as u32; let top = top.round() as u32; let right = right.round() as u32; let bottom = bottom.round() as u32; let width = right - left; let height = bottom - top; if width == 0 || height == 0 { false } else { rpass.set_scissor_rect(left, top, width, height); true } } fn update_buffers(&mut self, meshes: &[egui::ClippedMesh]) -> Vec<BufferOffset> { let mut offsets = Vec::with_capacity(meshes.len()); // Find out how many vertices/indices we need to render let mut vertex_count = 0; let mut index_count = 0; for egui::ClippedMesh(_, mesh) in meshes { offsets.push(BufferOffset { vertex: vertex_count, index: index_count, }); vertex_count += align_to_power_of_two(mesh.vertices.len() as u32, BUFFER_ALIGNMENT); index_count += align_to_power_of_two(mesh.indices.len() as u32, BUFFER_ALIGNMENT); } // Allocate space for the vertices/indices if vertex_count as usize > self.vertex_buffer.len()
{ self.vertex_buffer = Self::create_vertex_buffer(&self.context, vertex_count as usize); }
conditional_block
renderer.rs
8Unorm>, } #[repr(C)] #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] struct ScreenUniforms { width_in_points: f32, height_in_points: f32, pixels_per_point: f32, _padding: u32, } struct UniformBuffer<T: bytemuck::Pod> { buffer: gpukit::Buffer<T>, value: T, } impl<T: bytemuck::Pod> std::ops::Deref for UniformBuffer<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.value } } impl<T: bytemuck::Pod> std::ops::DerefMut for UniformBuffer<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.value } } impl<T: bytemuck::Pod> UniformBuffer<T> { pub fn new(context: &gpukit::Context, value: T) -> Self { let buffer = context .build_buffer() .with_usage(wgpu::BufferUsages::UNIFORM) .init_with_data(std::slice::from_ref(&value)); UniformBuffer { buffer, value } } fn update(&self, context: &gpukit::Context) { self.buffer .update(context, std::slice::from_ref(&self.value)); } } #[repr(transparent)] #[derive(Debug, Copy, Clone)] struct Vertex(egui::paint::Vertex); // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. unsafe impl bytemuck::Pod for Vertex {} unsafe impl bytemuck::Zeroable for Vertex {} impl Vertex { // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. fn cast_slice(vertices: &[egui::paint::Vertex]) -> &[Vertex] { let ptr = vertices.as_ptr() as *const Vertex; let len = vertices.len(); unsafe { std::slice::from_raw_parts(ptr, len) } } } pub struct RendererDescriptor { pub context: Arc<gpukit::Context>, pub target_format: wgpu::TextureFormat, // Size of the screen: [width, height] pub size: [u32; 2], // Number of pixels per point pub pixels_per_point: f32, } impl Renderer { pub fn new(desc: RendererDescriptor) -> anyhow::Result<Renderer> { let RendererDescriptor { context, target_format, size, pixels_per_point, } = desc; let vertex_buffer = Self::create_vertex_buffer(&context, 0); let index_buffer = Self::create_index_buffer(&context, 0); let uniforms = UniformBuffer::new( &context, ScreenUniforms { width_in_points: size[0] as f32 / pixels_per_point, height_in_points: size[1] as f32 / pixels_per_point, pixels_per_point, _padding: 0, }, ); let sampler = context.device.create_sampler(&wgpu::SamplerDescriptor { label: None, address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge, mag_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Linear, ..Default::default() }); let bind_group = gpukit::BindGroup::new( &context, &Bindings { screen_uniforms: &uniforms.buffer, sampler: &sampler, }, ); let texture = context .build_texture() .with_usage(wgpu::TextureUsages::TEXTURE_BINDING) .init_with_data([1, 1], &[0]); let texture_bind_group = gpukit::BindGroup::new( &context, &TextureBindings { texture: &texture.create_view(), }, ); let vertex = context .build_shader("gukit_egui vertex shader") .init_from_glsl(include_str!("shader.vert"), gpukit::ShaderStage::Vertex)?; let fragment = context .build_shader("gukit_egui fragment shader") .init_from_glsl(include_str!("shader.frag"), gpukit::ShaderStage::Fragment)?; let pipeline = context.create_render_pipeline(gpukit::RenderPipelineDescriptor { label: Some("gpukit_egui renderer"), vertex: vertex.entry("main"), fragment: fragment.entry("main"), vertex_buffers: &[gpukit::vertex_buffer_layout![ // Position 0 => Float32x2, // Texture Coordinates 1 => Float32x2, // Color
color_targets: &[wgpu::ColorTargetState { format: target_format, blend: Some(wgpu::BlendState { color: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::One, dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha, operation: wgpu::BlendOperation::Add, }, alpha: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::OneMinusDstAlpha, dst_factor: wgpu::BlendFactor::One, operation: wgpu::BlendOperation::Add, }, }), write_mask: wgpu::ColorWrites::all(), }], depth_stencil: None, primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleList, ..Default::default() }, })?; Ok(Renderer { context, pipeline, vertex_buffer, index_buffer, bind_group, screen_uniforms: uniforms, texture_bind_group, texture_version: None, texture, }) } fn create_vertex_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<Vertex> { context .build_buffer() .with_usage(wgpu::BufferUsages::VERTEX) .init_with_capacity(len) } fn create_index_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<u32> { context .build_buffer() .with_usage(wgpu::BufferUsages::INDEX) .init_with_capacity(len) } pub fn set_size(&mut self, size: [u32; 2], scale_factor: f32) { self.screen_uniforms.width_in_points = size[0] as f32 / scale_factor; self.screen_uniforms.height_in_points = size[1] as f32 / scale_factor; self.screen_uniforms.pixels_per_point = scale_factor; self.screen_uniforms.update(&self.context) } pub fn render<'encoder>( &'encoder mut self, rpass: &mut wgpu::RenderPass<'encoder>, meshes: &[egui::ClippedMesh], texture: &egui::Texture, ) { use gpukit::RenderPassExt; let offsets = self.update_buffers(meshes); self.update_texture(texture); rpass.set_pipeline(&self.pipeline); rpass.set_vertex_buffer(0, *self.vertex_buffer.slice(..)); rpass.set_index_buffer_ext(self.index_buffer.slice(..)); rpass.set_bind_group(0, &self.bind_group, &[]); rpass.set_bind_group(1, &self.texture_bind_group, &[]); for (egui::ClippedMesh(rect, mesh), offset) in meshes.iter().zip(offsets) { if Self::set_scissor_region(rpass, &self.screen_uniforms, *rect) { let index_range = offset.index..offset.index + mesh.indices.len() as u32; rpass.draw_indexed(index_range, offset.vertex as i32, 0..1); } } } fn set_scissor_region( rpass: &mut wgpu::RenderPass, screen: &ScreenUniforms, rect: egui::Rect, ) -> bool { let left = rect.left() * screen.pixels_per_point; let right = rect.right() * screen.pixels_per_point; let top = rect.top() * screen.pixels_per_point; let bottom = rect.bottom() * screen.pixels_per_point; let screen_width = screen.width_in_points * screen.pixels_per_point; let screen_height = screen.height_in_points * screen.pixels_per_point; let left = left.clamp(0.0, screen_width); let top = top.clamp(0.0, screen_height); let right = right.clamp(left, screen_width); let bottom = bottom.clamp(top, screen_height); let left = left.round() as u32; let top = top.round() as u32; let right = right.round() as u32; let bottom = bottom.round() as u32; let width = right - left; let height = bottom - top; if width == 0 || height == 0 { false } else { rpass.set_scissor_rect(left, top
2 => Uint32, ]], bind_group_layouts: &[&bind_group.layout, &texture_bind_group.layout],
random_line_split
renderer.rs
Unorm>, } #[repr(C)] #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] struct ScreenUniforms { width_in_points: f32, height_in_points: f32, pixels_per_point: f32, _padding: u32, } struct UniformBuffer<T: bytemuck::Pod> { buffer: gpukit::Buffer<T>, value: T, } impl<T: bytemuck::Pod> std::ops::Deref for UniformBuffer<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.value } } impl<T: bytemuck::Pod> std::ops::DerefMut for UniformBuffer<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.value } } impl<T: bytemuck::Pod> UniformBuffer<T> { pub fn
(context: &gpukit::Context, value: T) -> Self { let buffer = context .build_buffer() .with_usage(wgpu::BufferUsages::UNIFORM) .init_with_data(std::slice::from_ref(&value)); UniformBuffer { buffer, value } } fn update(&self, context: &gpukit::Context) { self.buffer .update(context, std::slice::from_ref(&self.value)); } } #[repr(transparent)] #[derive(Debug, Copy, Clone)] struct Vertex(egui::paint::Vertex); // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. unsafe impl bytemuck::Pod for Vertex {} unsafe impl bytemuck::Zeroable for Vertex {} impl Vertex { // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. fn cast_slice(vertices: &[egui::paint::Vertex]) -> &[Vertex] { let ptr = vertices.as_ptr() as *const Vertex; let len = vertices.len(); unsafe { std::slice::from_raw_parts(ptr, len) } } } pub struct RendererDescriptor { pub context: Arc<gpukit::Context>, pub target_format: wgpu::TextureFormat, // Size of the screen: [width, height] pub size: [u32; 2], // Number of pixels per point pub pixels_per_point: f32, } impl Renderer { pub fn new(desc: RendererDescriptor) -> anyhow::Result<Renderer> { let RendererDescriptor { context, target_format, size, pixels_per_point, } = desc; let vertex_buffer = Self::create_vertex_buffer(&context, 0); let index_buffer = Self::create_index_buffer(&context, 0); let uniforms = UniformBuffer::new( &context, ScreenUniforms { width_in_points: size[0] as f32 / pixels_per_point, height_in_points: size[1] as f32 / pixels_per_point, pixels_per_point, _padding: 0, }, ); let sampler = context.device.create_sampler(&wgpu::SamplerDescriptor { label: None, address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge, mag_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Linear, ..Default::default() }); let bind_group = gpukit::BindGroup::new( &context, &Bindings { screen_uniforms: &uniforms.buffer, sampler: &sampler, }, ); let texture = context .build_texture() .with_usage(wgpu::TextureUsages::TEXTURE_BINDING) .init_with_data([1, 1], &[0]); let texture_bind_group = gpukit::BindGroup::new( &context, &TextureBindings { texture: &texture.create_view(), }, ); let vertex = context .build_shader("gukit_egui vertex shader") .init_from_glsl(include_str!("shader.vert"), gpukit::ShaderStage::Vertex)?; let fragment = context .build_shader("gukit_egui fragment shader") .init_from_glsl(include_str!("shader.frag"), gpukit::ShaderStage::Fragment)?; let pipeline = context.create_render_pipeline(gpukit::RenderPipelineDescriptor { label: Some("gpukit_egui renderer"), vertex: vertex.entry("main"), fragment: fragment.entry("main"), vertex_buffers: &[gpukit::vertex_buffer_layout![ // Position 0 => Float32x2, // Texture Coordinates 1 => Float32x2, // Color 2 => Uint32, ]], bind_group_layouts: &[&bind_group.layout, &texture_bind_group.layout], color_targets: &[wgpu::ColorTargetState { format: target_format, blend: Some(wgpu::BlendState { color: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::One, dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha, operation: wgpu::BlendOperation::Add, }, alpha: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::OneMinusDstAlpha, dst_factor: wgpu::BlendFactor::One, operation: wgpu::BlendOperation::Add, }, }), write_mask: wgpu::ColorWrites::all(), }], depth_stencil: None, primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleList, ..Default::default() }, })?; Ok(Renderer { context, pipeline, vertex_buffer, index_buffer, bind_group, screen_uniforms: uniforms, texture_bind_group, texture_version: None, texture, }) } fn create_vertex_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<Vertex> { context .build_buffer() .with_usage(wgpu::BufferUsages::VERTEX) .init_with_capacity(len) } fn create_index_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<u32> { context .build_buffer() .with_usage(wgpu::BufferUsages::INDEX) .init_with_capacity(len) } pub fn set_size(&mut self, size: [u32; 2], scale_factor: f32) { self.screen_uniforms.width_in_points = size[0] as f32 / scale_factor; self.screen_uniforms.height_in_points = size[1] as f32 / scale_factor; self.screen_uniforms.pixels_per_point = scale_factor; self.screen_uniforms.update(&self.context) } pub fn render<'encoder>( &'encoder mut self, rpass: &mut wgpu::RenderPass<'encoder>, meshes: &[egui::ClippedMesh], texture: &egui::Texture, ) { use gpukit::RenderPassExt; let offsets = self.update_buffers(meshes); self.update_texture(texture); rpass.set_pipeline(&self.pipeline); rpass.set_vertex_buffer(0, *self.vertex_buffer.slice(..)); rpass.set_index_buffer_ext(self.index_buffer.slice(..)); rpass.set_bind_group(0, &self.bind_group, &[]); rpass.set_bind_group(1, &self.texture_bind_group, &[]); for (egui::ClippedMesh(rect, mesh), offset) in meshes.iter().zip(offsets) { if Self::set_scissor_region(rpass, &self.screen_uniforms, *rect) { let index_range = offset.index..offset.index + mesh.indices.len() as u32; rpass.draw_indexed(index_range, offset.vertex as i32, 0..1); } } } fn set_scissor_region( rpass: &mut wgpu::RenderPass, screen: &ScreenUniforms, rect: egui::Rect, ) -> bool { let left = rect.left() * screen.pixels_per_point; let right = rect.right() * screen.pixels_per_point; let top = rect.top() * screen.pixels_per_point; let bottom = rect.bottom() * screen.pixels_per_point; let screen_width = screen.width_in_points * screen.pixels_per_point; let screen_height = screen.height_in_points * screen.pixels_per_point; let left = left.clamp(0.0, screen_width); let top = top.clamp(0.0, screen_height); let right = right.clamp(left, screen_width); let bottom = bottom.clamp(top, screen_height); let left = left.round() as u32; let top = top.round() as u32; let right = right.round() as u32; let bottom = bottom.round() as u32; let width = right - left; let height = bottom - top; if width == 0 || height == 0 { false } else { rpass.set_scissor_rect(left,
new
identifier_name
exec.rs
Stream}; use tokio::process::Child; use tokio::sync::oneshot; use tokio::time::{delay_for, timeout, Duration}; // config for executor #[derive(Debug, Clone, Deserialize)] pub struct ExecutorConf { pub path: PathBuf, pub host_ip: Option<String>, pub concurrency: bool, pub memleak_check: bool, pub script_mode: bool, } impl ExecutorConf { pub fn check(&self) { if !self.path.is_file() { eprintln!( "Config Error: executor executable file {} is invalid", self.path.display() ); exit(exitcode::CONFIG) } if let Some(ip) = &self.host_ip { use std::net::ToSocketAddrs; let addr = format!("{}:8080", ip); if let Err(e) = addr.to_socket_addrs() { eprintln!( "Config Error: invalid host ip `{}`: {}", self.host_ip.as_ref().unwrap(), e ); exit(exitcode::CONFIG) } } } } pub struct Executor { inner: ExecutorImpl, } enum ExecutorImpl { Linux(LinuxExecutor), Scripy(ScriptExecutor), } impl Executor { pub fn new(cfg: &Config) -> Self { let inner = if cfg.executor.script_mode { ExecutorImpl::Scripy(ScriptExecutor::new(cfg)) } else { ExecutorImpl::Linux(LinuxExecutor::new(cfg)) }; Self { inner } } pub async fn
(&mut self) { match self.inner { ExecutorImpl::Linux(ref mut e) => e.start().await, ExecutorImpl::Scripy(ref mut e) => e.start().await, } } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { match self.inner { ExecutorImpl::Linux(ref mut e) => e.exec(p).await, ExecutorImpl::Scripy(ref mut e) => e.exec(p, t).await, } } } struct ScriptExecutor { path_on_host: PathBuf, guest: Guest, } impl ScriptExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); Self { path_on_host: cfg.executor.path.clone(), guest, } } pub async fn start(&mut self) { self.guest.boot().await; } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { let p_text = to_prog(p, t); let tmp = temp_dir().join("HEALER_test_case_v1-1-1.c"); if let Err(e) = write(&tmp, &p_text).await { eprintln!( "Failed to write test case to tmp dir \"{}\": {}", tmp.display(), e ); exit(1); } let guest_case_file = self.guest.copy(&tmp).await; let mut executor = App::new(self.path_on_host.to_str().unwrap()); executor.arg(Arg::new_flag(guest_case_file.to_str().unwrap())); let mut exec_handle = self.guest.run_cmd(&executor).await; match timeout(Duration::new(15, 0), &mut exec_handle).await { Err(_) => Ok(ExecResult::Failed(Reason("Time out".to_string()))), Ok(_) => { let mut stdout = exec_handle.stdout.take().unwrap(); let mut output = String::new(); stdout.read_to_string(&mut output).await.unwrap(); self.parse_exec_result(output).await } } } pub async fn parse_exec_result(&mut self, out: String) -> Result<ExecResult, Option<Crash>> { let mut result_line = String::new(); for l in out.lines() { if l.contains("HEALER_EXEC_RESULT") { result_line = l.to_string(); } } if !result_line.is_empty() { let out = out.replace(&result_line, ""); if result_line.contains("success") { return Ok(ExecResult::Ok(Default::default())); } else if result_line.contains("failed") { return Ok(ExecResult::Failed(Reason(out))); } else if result_line.contains("crashed") { return Err(Some(Crash { inner: out })); } } if !self.guest.is_alive().await { Err(Some(Crash { inner: out })) } else { Ok(ExecResult::Ok(Default::default())) } } } struct LinuxExecutor { guest: Guest, port: u16, exec_handle: Option<Child>, conn: Option<TcpStream>, concurrency: bool, memleak_check: bool, executor_bin_path: PathBuf, target_path: PathBuf, host_ip: String, } impl LinuxExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); let port = free_ipv4_port() .unwrap_or_else(|| exits!(exitcode::TEMPFAIL, "No Free port for executor driver")); let host_ip = cfg .executor .host_ip .as_ref() .map(String::from) .unwrap_or_else(|| String::from(guest::LINUX_QEMU_HOST_IP_ADDR)); Self { guest, port, exec_handle: None, conn: None, concurrency: cfg.executor.concurrency, memleak_check: cfg.executor.memleak_check, executor_bin_path: cfg.executor.path.clone(), target_path: PathBuf::from(&cfg.fots_bin), host_ip, } } pub async fn start(&mut self) { // handle should be set to kill on drop self.exec_handle = None; self.guest.boot().await; self.start_executer().await } pub async fn start_executer(&mut self) { use tokio::io::ErrorKind::*; self.exec_handle = None; let target = self.guest.copy(&self.target_path).await; let (tx, rx) = oneshot::channel(); let mut retry = 0; let mut listener; loop { let host_addr = format!("{}:{}", self.host_ip, self.port); listener = match TcpListener::bind(&host_addr).await { Ok(l) => l, Err(e) => { if e.kind() == AddrInUse && retry != 5 { self.port = free_ipv4_port().unwrap(); retry += 1; continue; } else { eprintln!("Fail to listen on {}: {}", host_addr, e); exit(1); } } }; break; } let host_addr = listener.local_addr().unwrap(); tokio::spawn(async move { match listener.accept().await { Ok((conn, _addr)) => { tx.send(conn).unwrap(); } Err(e) => { eprintln!("Executor driver: fail to get client: {}", e); exit(exitcode::OSERR); } } }); let mut executor = App::new(self.executor_bin_path.to_str().unwrap()); executor .arg(Arg::new_opt("-t", OptVal::normal(target.to_str().unwrap()))) .arg(Arg::new_opt( "-a", OptVal::normal(&format!( "{}:{}", guest::LINUX_QEMU_USER_NET_HOST_IP_ADDR, self.port )), )); if self.memleak_check { executor.arg(Arg::new_flag("-m")); } if self.concurrency { executor.arg(Arg::new_flag("-c")); } self.exec_handle = Some(self.guest.run_cmd(&executor).await); self.conn = match timeout(Duration::new(32, 0), rx).await { Err(_) => { self.exec_handle = None; eprintln!("Time out: wait executor connection {}", host_addr); exit(1) } Ok(conn) => Some(conn.unwrap()), }; } pub async fn exec(&mut self, p: &Prog) -> Result<ExecResult, Option<Crash>> { // send must be success assert!(self.conn.is_some()); if let Err(e) = timeout( Duration::new(15, 0), async_send(p, self.conn.as_mut().unwrap()), ) .await { info!("Prog send blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } // async_send(p, self.conn.as_mut().unwrap()).await.unwrap(); let ret = { match timeout( Duration::new(15, 0), async_recv_result(self.conn.as_mut().unwrap()), ) .await { Err(e) => { info!("Prog recv blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } Ok(ret) => ret, } }; match ret { Ok(result) => { self.guest.clear().await; if let ExecResult::Failed(ref reason) = result { let rea = reason.to_string();
start
identifier_name
exec.rs
Stream}; use tokio::process::Child; use tokio::sync::oneshot; use tokio::time::{delay_for, timeout, Duration}; // config for executor #[derive(Debug, Clone, Deserialize)] pub struct ExecutorConf { pub path: PathBuf, pub host_ip: Option<String>, pub concurrency: bool, pub memleak_check: bool, pub script_mode: bool, } impl ExecutorConf { pub fn check(&self) { if !self.path.is_file() { eprintln!( "Config Error: executor executable file {} is invalid", self.path.display() ); exit(exitcode::CONFIG) } if let Some(ip) = &self.host_ip { use std::net::ToSocketAddrs; let addr = format!("{}:8080", ip); if let Err(e) = addr.to_socket_addrs() { eprintln!( "Config Error: invalid host ip `{}`: {}", self.host_ip.as_ref().unwrap(), e ); exit(exitcode::CONFIG) } } } } pub struct Executor { inner: ExecutorImpl, } enum ExecutorImpl { Linux(LinuxExecutor), Scripy(ScriptExecutor), } impl Executor { pub fn new(cfg: &Config) -> Self { let inner = if cfg.executor.script_mode { ExecutorImpl::Scripy(ScriptExecutor::new(cfg)) } else { ExecutorImpl::Linux(LinuxExecutor::new(cfg)) }; Self { inner } } pub async fn start(&mut self) { match self.inner { ExecutorImpl::Linux(ref mut e) => e.start().await, ExecutorImpl::Scripy(ref mut e) => e.start().await, } } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { match self.inner { ExecutorImpl::Linux(ref mut e) => e.exec(p).await, ExecutorImpl::Scripy(ref mut e) => e.exec(p, t).await, } } } struct ScriptExecutor { path_on_host: PathBuf, guest: Guest, } impl ScriptExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); Self { path_on_host: cfg.executor.path.clone(), guest, } } pub async fn start(&mut self) { self.guest.boot().await; } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { let p_text = to_prog(p, t); let tmp = temp_dir().join("HEALER_test_case_v1-1-1.c"); if let Err(e) = write(&tmp, &p_text).await { eprintln!( "Failed to write test case to tmp dir \"{}\": {}", tmp.display(), e ); exit(1); } let guest_case_file = self.guest.copy(&tmp).await; let mut executor = App::new(self.path_on_host.to_str().unwrap()); executor.arg(Arg::new_flag(guest_case_file.to_str().unwrap())); let mut exec_handle = self.guest.run_cmd(&executor).await; match timeout(Duration::new(15, 0), &mut exec_handle).await { Err(_) => Ok(ExecResult::Failed(Reason("Time out".to_string()))), Ok(_) => { let mut stdout = exec_handle.stdout.take().unwrap(); let mut output = String::new(); stdout.read_to_string(&mut output).await.unwrap(); self.parse_exec_result(output).await } } } pub async fn parse_exec_result(&mut self, out: String) -> Result<ExecResult, Option<Crash>> { let mut result_line = String::new(); for l in out.lines() { if l.contains("HEALER_EXEC_RESULT") { result_line = l.to_string(); } } if !result_line.is_empty() { let out = out.replace(&result_line, ""); if result_line.contains("success") { return Ok(ExecResult::Ok(Default::default())); } else if result_line.contains("failed") { return Ok(ExecResult::Failed(Reason(out))); } else if result_line.contains("crashed") { return Err(Some(Crash { inner: out })); } } if !self.guest.is_alive().await { Err(Some(Crash { inner: out })) } else { Ok(ExecResult::Ok(Default::default())) } } } struct LinuxExecutor { guest: Guest, port: u16, exec_handle: Option<Child>, conn: Option<TcpStream>, concurrency: bool, memleak_check: bool, executor_bin_path: PathBuf, target_path: PathBuf, host_ip: String, } impl LinuxExecutor { pub fn new(cfg: &Config) -> Self
target_path: PathBuf::from(&cfg.fots_bin), host_ip, } } pub async fn start(&mut self) { // handle should be set to kill on drop self.exec_handle = None; self.guest.boot().await; self.start_executer().await } pub async fn start_executer(&mut self) { use tokio::io::ErrorKind::*; self.exec_handle = None; let target = self.guest.copy(&self.target_path).await; let (tx, rx) = oneshot::channel(); let mut retry = 0; let mut listener; loop { let host_addr = format!("{}:{}", self.host_ip, self.port); listener = match TcpListener::bind(&host_addr).await { Ok(l) => l, Err(e) => { if e.kind() == AddrInUse && retry != 5 { self.port = free_ipv4_port().unwrap(); retry += 1; continue; } else { eprintln!("Fail to listen on {}: {}", host_addr, e); exit(1); } } }; break; } let host_addr = listener.local_addr().unwrap(); tokio::spawn(async move { match listener.accept().await { Ok((conn, _addr)) => { tx.send(conn).unwrap(); } Err(e) => { eprintln!("Executor driver: fail to get client: {}", e); exit(exitcode::OSERR); } } }); let mut executor = App::new(self.executor_bin_path.to_str().unwrap()); executor .arg(Arg::new_opt("-t", OptVal::normal(target.to_str().unwrap()))) .arg(Arg::new_opt( "-a", OptVal::normal(&format!( "{}:{}", guest::LINUX_QEMU_USER_NET_HOST_IP_ADDR, self.port )), )); if self.memleak_check { executor.arg(Arg::new_flag("-m")); } if self.concurrency { executor.arg(Arg::new_flag("-c")); } self.exec_handle = Some(self.guest.run_cmd(&executor).await); self.conn = match timeout(Duration::new(32, 0), rx).await { Err(_) => { self.exec_handle = None; eprintln!("Time out: wait executor connection {}", host_addr); exit(1) } Ok(conn) => Some(conn.unwrap()), }; } pub async fn exec(&mut self, p: &Prog) -> Result<ExecResult, Option<Crash>> { // send must be success assert!(self.conn.is_some()); if let Err(e) = timeout( Duration::new(15, 0), async_send(p, self.conn.as_mut().unwrap()), ) .await { info!("Prog send blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } // async_send(p, self.conn.as_mut().unwrap()).await.unwrap(); let ret = { match timeout( Duration::new(15, 0), async_recv_result(self.conn.as_mut().unwrap()), ) .await { Err(e) => { info!("Prog recv blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } Ok(ret) => ret, } }; match ret { Ok(result) => { self.guest.clear().await; if let ExecResult::Failed(ref reason) = result { let rea = reason.to_string();
{ let guest = Guest::new(cfg); let port = free_ipv4_port() .unwrap_or_else(|| exits!(exitcode::TEMPFAIL, "No Free port for executor driver")); let host_ip = cfg .executor .host_ip .as_ref() .map(String::from) .unwrap_or_else(|| String::from(guest::LINUX_QEMU_HOST_IP_ADDR)); Self { guest, port, exec_handle: None, conn: None, concurrency: cfg.executor.concurrency, memleak_check: cfg.executor.memleak_check, executor_bin_path: cfg.executor.path.clone(),
identifier_body
exec.rs
Stream}; use tokio::process::Child; use tokio::sync::oneshot; use tokio::time::{delay_for, timeout, Duration}; // config for executor #[derive(Debug, Clone, Deserialize)] pub struct ExecutorConf { pub path: PathBuf, pub host_ip: Option<String>, pub concurrency: bool, pub memleak_check: bool, pub script_mode: bool, } impl ExecutorConf { pub fn check(&self) { if !self.path.is_file() { eprintln!( "Config Error: executor executable file {} is invalid", self.path.display() ); exit(exitcode::CONFIG) } if let Some(ip) = &self.host_ip { use std::net::ToSocketAddrs; let addr = format!("{}:8080", ip); if let Err(e) = addr.to_socket_addrs() { eprintln!( "Config Error: invalid host ip `{}`: {}", self.host_ip.as_ref().unwrap(), e ); exit(exitcode::CONFIG) } } } } pub struct Executor { inner: ExecutorImpl, } enum ExecutorImpl { Linux(LinuxExecutor), Scripy(ScriptExecutor), } impl Executor { pub fn new(cfg: &Config) -> Self { let inner = if cfg.executor.script_mode { ExecutorImpl::Scripy(ScriptExecutor::new(cfg)) } else { ExecutorImpl::Linux(LinuxExecutor::new(cfg)) }; Self { inner } } pub async fn start(&mut self) { match self.inner { ExecutorImpl::Linux(ref mut e) => e.start().await, ExecutorImpl::Scripy(ref mut e) => e.start().await, } } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { match self.inner { ExecutorImpl::Linux(ref mut e) => e.exec(p).await, ExecutorImpl::Scripy(ref mut e) => e.exec(p, t).await, } } } struct ScriptExecutor { path_on_host: PathBuf, guest: Guest, } impl ScriptExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); Self { path_on_host: cfg.executor.path.clone(), guest, } } pub async fn start(&mut self) { self.guest.boot().await; } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { let p_text = to_prog(p, t); let tmp = temp_dir().join("HEALER_test_case_v1-1-1.c"); if let Err(e) = write(&tmp, &p_text).await { eprintln!( "Failed to write test case to tmp dir \"{}\": {}", tmp.display(), e ); exit(1); } let guest_case_file = self.guest.copy(&tmp).await; let mut executor = App::new(self.path_on_host.to_str().unwrap()); executor.arg(Arg::new_flag(guest_case_file.to_str().unwrap())); let mut exec_handle = self.guest.run_cmd(&executor).await; match timeout(Duration::new(15, 0), &mut exec_handle).await { Err(_) => Ok(ExecResult::Failed(Reason("Time out".to_string()))), Ok(_) => { let mut stdout = exec_handle.stdout.take().unwrap(); let mut output = String::new(); stdout.read_to_string(&mut output).await.unwrap(); self.parse_exec_result(output).await } } } pub async fn parse_exec_result(&mut self, out: String) -> Result<ExecResult, Option<Crash>> { let mut result_line = String::new(); for l in out.lines() { if l.contains("HEALER_EXEC_RESULT") { result_line = l.to_string(); } } if !result_line.is_empty() { let out = out.replace(&result_line, ""); if result_line.contains("success") { return Ok(ExecResult::Ok(Default::default())); } else if result_line.contains("failed") { return Ok(ExecResult::Failed(Reason(out))); } else if result_line.contains("crashed") { return Err(Some(Crash { inner: out })); } } if !self.guest.is_alive().await { Err(Some(Crash { inner: out })) } else { Ok(ExecResult::Ok(Default::default())) } } } struct LinuxExecutor { guest: Guest, port: u16, exec_handle: Option<Child>, conn: Option<TcpStream>, concurrency: bool, memleak_check: bool, executor_bin_path: PathBuf, target_path: PathBuf, host_ip: String, } impl LinuxExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); let port = free_ipv4_port() .unwrap_or_else(|| exits!(exitcode::TEMPFAIL, "No Free port for executor driver")); let host_ip = cfg .executor .host_ip .as_ref() .map(String::from) .unwrap_or_else(|| String::from(guest::LINUX_QEMU_HOST_IP_ADDR)); Self { guest, port, exec_handle: None, conn: None, concurrency: cfg.executor.concurrency, memleak_check: cfg.executor.memleak_check, executor_bin_path: cfg.executor.path.clone(), target_path: PathBuf::from(&cfg.fots_bin), host_ip, } } pub async fn start(&mut self) { // handle should be set to kill on drop self.exec_handle = None; self.guest.boot().await; self.start_executer().await } pub async fn start_executer(&mut self) { use tokio::io::ErrorKind::*; self.exec_handle = None; let target = self.guest.copy(&self.target_path).await; let (tx, rx) = oneshot::channel(); let mut retry = 0; let mut listener; loop { let host_addr = format!("{}:{}", self.host_ip, self.port); listener = match TcpListener::bind(&host_addr).await { Ok(l) => l, Err(e) => { if e.kind() == AddrInUse && retry != 5 { self.port = free_ipv4_port().unwrap(); retry += 1; continue; } else { eprintln!("Fail to listen on {}: {}", host_addr, e); exit(1); } } }; break; } let host_addr = listener.local_addr().unwrap(); tokio::spawn(async move { match listener.accept().await { Ok((conn, _addr)) => { tx.send(conn).unwrap(); } Err(e) => { eprintln!("Executor driver: fail to get client: {}", e); exit(exitcode::OSERR); } } }); let mut executor = App::new(self.executor_bin_path.to_str().unwrap()); executor .arg(Arg::new_opt("-t", OptVal::normal(target.to_str().unwrap()))) .arg(Arg::new_opt( "-a", OptVal::normal(&format!( "{}:{}", guest::LINUX_QEMU_USER_NET_HOST_IP_ADDR, self.port )), )); if self.memleak_check { executor.arg(Arg::new_flag("-m")); } if self.concurrency { executor.arg(Arg::new_flag("-c")); } self.exec_handle = Some(self.guest.run_cmd(&executor).await); self.conn = match timeout(Duration::new(32, 0), rx).await { Err(_) =>
Ok(conn) => Some(conn.unwrap()), }; } pub async fn exec(&mut self, p: &Prog) -> Result<ExecResult, Option<Crash>> { // send must be success assert!(self.conn.is_some()); if let Err(e) = timeout( Duration::new(15, 0), async_send(p, self.conn.as_mut().unwrap()), ) .await { info!("Prog send blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } // async_send(p, self.conn.as_mut().unwrap()).await.unwrap(); let ret = { match timeout( Duration::new(15, 0), async_recv_result(self.conn.as_mut().unwrap()), ) .await { Err(e) => { info!("Prog recv blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } Ok(ret) => ret, } }; match ret { Ok(result) => { self.guest.clear().await; if let ExecResult::Failed(ref reason) = result { let rea = reason.to_string();
{ self.exec_handle = None; eprintln!("Time out: wait executor connection {}", host_addr); exit(1) }
conditional_block
exec.rs
TcpStream}; use tokio::process::Child; use tokio::sync::oneshot; use tokio::time::{delay_for, timeout, Duration}; // config for executor #[derive(Debug, Clone, Deserialize)] pub struct ExecutorConf { pub path: PathBuf, pub host_ip: Option<String>, pub concurrency: bool, pub memleak_check: bool, pub script_mode: bool, } impl ExecutorConf { pub fn check(&self) { if !self.path.is_file() { eprintln!( "Config Error: executor executable file {} is invalid", self.path.display() ); exit(exitcode::CONFIG) } if let Some(ip) = &self.host_ip { use std::net::ToSocketAddrs; let addr = format!("{}:8080", ip); if let Err(e) = addr.to_socket_addrs() { eprintln!( "Config Error: invalid host ip `{}`: {}", self.host_ip.as_ref().unwrap(), e ); exit(exitcode::CONFIG) } } } } pub struct Executor { inner: ExecutorImpl, } enum ExecutorImpl { Linux(LinuxExecutor), Scripy(ScriptExecutor), } impl Executor { pub fn new(cfg: &Config) -> Self { let inner = if cfg.executor.script_mode { ExecutorImpl::Scripy(ScriptExecutor::new(cfg)) } else { ExecutorImpl::Linux(LinuxExecutor::new(cfg)) }; Self { inner } } pub async fn start(&mut self) { match self.inner { ExecutorImpl::Linux(ref mut e) => e.start().await, ExecutorImpl::Scripy(ref mut e) => e.start().await, } } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { match self.inner { ExecutorImpl::Linux(ref mut e) => e.exec(p).await, ExecutorImpl::Scripy(ref mut e) => e.exec(p, t).await, } } } struct ScriptExecutor { path_on_host: PathBuf, guest: Guest, } impl ScriptExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); Self { path_on_host: cfg.executor.path.clone(), guest, } } pub async fn start(&mut self) { self.guest.boot().await; } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { let p_text = to_prog(p, t); let tmp = temp_dir().join("HEALER_test_case_v1-1-1.c"); if let Err(e) = write(&tmp, &p_text).await { eprintln!( "Failed to write test case to tmp dir \"{}\": {}", tmp.display(), e ); exit(1); } let guest_case_file = self.guest.copy(&tmp).await; let mut executor = App::new(self.path_on_host.to_str().unwrap()); executor.arg(Arg::new_flag(guest_case_file.to_str().unwrap())); let mut exec_handle = self.guest.run_cmd(&executor).await; match timeout(Duration::new(15, 0), &mut exec_handle).await { Err(_) => Ok(ExecResult::Failed(Reason("Time out".to_string()))), Ok(_) => { let mut stdout = exec_handle.stdout.take().unwrap(); let mut output = String::new(); stdout.read_to_string(&mut output).await.unwrap(); self.parse_exec_result(output).await } } } pub async fn parse_exec_result(&mut self, out: String) -> Result<ExecResult, Option<Crash>> { let mut result_line = String::new(); for l in out.lines() { if l.contains("HEALER_EXEC_RESULT") { result_line = l.to_string(); } } if !result_line.is_empty() { let out = out.replace(&result_line, ""); if result_line.contains("success") { return Ok(ExecResult::Ok(Default::default())); } else if result_line.contains("failed") { return Ok(ExecResult::Failed(Reason(out))); } else if result_line.contains("crashed") { return Err(Some(Crash { inner: out })); } } if !self.guest.is_alive().await { Err(Some(Crash { inner: out })) } else { Ok(ExecResult::Ok(Default::default())) } } } struct LinuxExecutor { guest: Guest, port: u16, exec_handle: Option<Child>, conn: Option<TcpStream>, concurrency: bool, memleak_check: bool, executor_bin_path: PathBuf, target_path: PathBuf, host_ip: String, } impl LinuxExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); let port = free_ipv4_port() .unwrap_or_else(|| exits!(exitcode::TEMPFAIL, "No Free port for executor driver")); let host_ip = cfg .executor .host_ip .as_ref() .map(String::from) .unwrap_or_else(|| String::from(guest::LINUX_QEMU_HOST_IP_ADDR)); Self { guest, port, exec_handle: None, conn: None, concurrency: cfg.executor.concurrency, memleak_check: cfg.executor.memleak_check, executor_bin_path: cfg.executor.path.clone(), target_path: PathBuf::from(&cfg.fots_bin), host_ip, } } pub async fn start(&mut self) { // handle should be set to kill on drop self.exec_handle = None; self.guest.boot().await; self.start_executer().await } pub async fn start_executer(&mut self) { use tokio::io::ErrorKind::*; self.exec_handle = None; let target = self.guest.copy(&self.target_path).await; let (tx, rx) = oneshot::channel(); let mut retry = 0; let mut listener; loop {
listener = match TcpListener::bind(&host_addr).await { Ok(l) => l, Err(e) => { if e.kind() == AddrInUse && retry != 5 { self.port = free_ipv4_port().unwrap(); retry += 1; continue; } else { eprintln!("Fail to listen on {}: {}", host_addr, e); exit(1); } } }; break; } let host_addr = listener.local_addr().unwrap(); tokio::spawn(async move { match listener.accept().await { Ok((conn, _addr)) => { tx.send(conn).unwrap(); } Err(e) => { eprintln!("Executor driver: fail to get client: {}", e); exit(exitcode::OSERR); } } }); let mut executor = App::new(self.executor_bin_path.to_str().unwrap()); executor .arg(Arg::new_opt("-t", OptVal::normal(target.to_str().unwrap()))) .arg(Arg::new_opt( "-a", OptVal::normal(&format!( "{}:{}", guest::LINUX_QEMU_USER_NET_HOST_IP_ADDR, self.port )), )); if self.memleak_check { executor.arg(Arg::new_flag("-m")); } if self.concurrency { executor.arg(Arg::new_flag("-c")); } self.exec_handle = Some(self.guest.run_cmd(&executor).await); self.conn = match timeout(Duration::new(32, 0), rx).await { Err(_) => { self.exec_handle = None; eprintln!("Time out: wait executor connection {}", host_addr); exit(1) } Ok(conn) => Some(conn.unwrap()), }; } pub async fn exec(&mut self, p: &Prog) -> Result<ExecResult, Option<Crash>> { // send must be success assert!(self.conn.is_some()); if let Err(e) = timeout( Duration::new(15, 0), async_send(p, self.conn.as_mut().unwrap()), ) .await { info!("Prog send blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } // async_send(p, self.conn.as_mut().unwrap()).await.unwrap(); let ret = { match timeout( Duration::new(15, 0), async_recv_result(self.conn.as_mut().unwrap()), ) .await { Err(e) => { info!("Prog recv blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } Ok(ret) => ret, } }; match ret { Ok(result) => { self.guest.clear().await; if let ExecResult::Failed(ref reason) = result { let rea = reason.to_string();
let host_addr = format!("{}:{}", self.host_ip, self.port);
random_line_split
PoissonDiscSampleGenerator.py
random sample. (Optional) (Default: None) """ # -------------------------------- # User Parameters # -------------------------------- # Defines self._radius self.radius = radius # Defines self._k self.k = k # Defines both self._extent and self._dim self.extent = np.array(extent) # Defines self._seed self.seed = seed self._metric = metric.euclidean self._changes = self._create_neighbor_distances() # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def __call__(self): """Calls the generate function. Returns: (np.ndarray) """ return self.generate() def generate(self): """Generates the samples with the given user parameters. Returns: (np.ndarray) Returns an array of samples of shape (n_samples, dim) where dim is the length of the extent given. """ # -------------------------------- # Initializing Variables # -------------------------------- # Set NumPy Random Seed np.random.seed(self._seed) # Create Active List active = [] # Clear previously generated examples if len(self._samples) > 0: self._clear_previous_samples() # -------------------------------- # Begin Generating Samples # -------------------------------- # Create the first sample self._samples.append(np.random.uniform(low=np.zeros(shape=(self._dim,)), high=self._extent, size=(self._dim,))) active.append(self._samples[0]) self._grid[self._get_grid_coord(self._samples[0])] = 0 while active: # Choose Random Active Sample idx = np.random.choice(len(active)) # Make new point & confirm it is valid new_point = self._make_point(active[idx]) if new_point is None: active.pop(idx) else: # Add sample to listings and store in grid for neighboring locations. self._samples.append(new_point) active.append(new_point) self._grid[self._get_grid_coord(new_point)] = len(self._samples) - 1 # Return samples as numpy array self._samples = np.array(self._samples) return self._samples @property def radius(self): """The minimum distance between any two points. Returns: (float) """ return self._radius @property def extent(self): """The measurements of the plane where the sample will be produced (length, width, height, depth, etc.) Returns: (np.ndarray) """ return np.array(self._extent) @property def k(self): """The number of attempts each active point to make a new point. Returns: (int) """ return self._k @property def metric(self): """The distance function used to measure the distance between two points. Returns: (function) """ return self._metric @property def seed(self): """The random seed used for generating samples. Returns: (int) Returns None if no seed is set, otherwise a user-defined int. """ return self._seed @property def dimension_size(self): """The length of the extent and all of the generated sample point. Returns: (int) """ return self._dim @property def samples(self): """The samples generated by the generator. May not match specs if parameters were re-adjusted after use. Returns: (np.ndarray) """ if len(self._samples) == 0: return np.empty((0, 2)) return self._samples @radius.setter def radius(self, radius): """Setter for radius Args: radius (float): Minimum distance between two points. """ if radius <= 0: raise ValueError("Radius must be a number that is greater than 0.") self._radius = radius @extent.setter def extent(self, extent): """Setter for extent Args: extent (list<float>): The dimension lengths. """ if len(extent) < 2: raise ValueError("Extent must have a length of at least 2.") extent = np.array(extent, dtype=float) if np.any(extent <= 0): raise ValueError("All extents must be greater than 0.") self._extent = extent self._dim = len(extent) @k.setter def k(self, k): """Setter for k Args: k (int): Number of attempts for each active point to generate a new point. """ if k <= 0: raise ValueError("K must be greater than 0.") if int(k) != k: raise ValueError("K must be an integer.") self._k = k @seed.setter def seed(self, seed): if seed is not None: if type(seed) is not int:
if seed < 0: raise ValueError("Seed must be non-negative.") self._seed = seed def _clear_previous_samples(self): """Clears grid and samples for generating new samples. """ del self._grid del self._samples # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def _get_grid_coord(self, point): """Returns the grid coordinate of the point. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: The grid coordinate that the point is separated in. """ return tuple([int(point[i] / self._cell_length) for i in range(self._dim)]) def _make_point(self, active_point): """ Attempts to make a random point in proximity of active_point. Attempts to make a random point around the active_point k times. If the new point is too close to another point, it will discard and try. If it fails k times, the function returns None. Args: active_point (np.ndarray): An array of size (number_of_dimensions,). Returns: (np.ndarray). Returns an array of size (number_of_dimensions,) if succeeds. Otherwise, returns None. """ # -------------------------------- # Create Random Parameters # -------------------------------- for _ in range(self._k): # Defines radial distance from active_point. rho = np.random.uniform(self._radius, 2 * self._radius) # Defines angle from active_point. Requires multiple angles for higher dimensional planes. theta = [np.random.uniform(0, 2 * np.pi) for _ in range(self._dim - 1)] # -------------------------------- # Create New Point # -------------------------------- # Create a 2D point using first theta angle. new_point = [active_point[0] + rho * np.cos(theta[0]), active_point[1] + rho * np.sin(theta[0])] # Generate more components of the coordinate for higher dimensional planes. new_point.extend([active_point[i] + rho * np.sin(theta[i-1]) for i in range(2, active_point.shape[0])]) new_point = np.array(new_point) # Confirm point is valid if self._valid_point(new_point): return new_point return None def _valid_point(self, point): """Confirms that a point is valid. If a point is too close to another point or is outside of bounds, it will fail. Otherwise, it will succeed. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: (bool) If succeeds, returns True. Otherwise, returns False. """ # -------------------------------- # Check Bounds # -------------------------------- # Get grid point and confirm it is within range coord = self._get_grid_coord(point) if np.logical_or(np.any(point < 0), np.any(point >= self._extent)): return False # -------------------------------- # Check Distance of Neighbors # -------------------------------- for idx in self._get_neighbors(coord): # No points in grid cell if self._grid[idx] == -1: continue # Obtains point in grid cell and confirms its distance is less than the radius. near_point = self._samples[self._grid[idx]] if metric.euclidean(near_point, point) < self._radius: return False return True def _get_neighbors(self, coord): """Obtains neighboring
raise ValueError("Seed must be integer.")
conditional_block
PoissonDiscSampleGenerator.py
random sample. (Optional) (Default: None) """ # -------------------------------- # User Parameters # -------------------------------- # Defines self._radius self.radius = radius # Defines self._k self.k = k # Defines both self._extent and self._dim self.extent = np.array(extent) # Defines self._seed self.seed = seed self._metric = metric.euclidean self._changes = self._create_neighbor_distances() # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def __call__(self): """Calls the generate function. Returns: (np.ndarray) """ return self.generate() def generate(self): """Generates the samples with the given user parameters. Returns: (np.ndarray) Returns an array of samples of shape (n_samples, dim) where dim is the length of the extent given. """ # -------------------------------- # Initializing Variables # -------------------------------- # Set NumPy Random Seed np.random.seed(self._seed) # Create Active List active = [] # Clear previously generated examples if len(self._samples) > 0: self._clear_previous_samples() # -------------------------------- # Begin Generating Samples # -------------------------------- # Create the first sample self._samples.append(np.random.uniform(low=np.zeros(shape=(self._dim,)), high=self._extent, size=(self._dim,))) active.append(self._samples[0]) self._grid[self._get_grid_coord(self._samples[0])] = 0 while active: # Choose Random Active Sample idx = np.random.choice(len(active)) # Make new point & confirm it is valid new_point = self._make_point(active[idx]) if new_point is None: active.pop(idx) else: # Add sample to listings and store in grid for neighboring locations. self._samples.append(new_point) active.append(new_point) self._grid[self._get_grid_coord(new_point)] = len(self._samples) - 1 # Return samples as numpy array self._samples = np.array(self._samples) return self._samples @property def radius(self): """The minimum distance between any two points. Returns: (float) """ return self._radius @property def extent(self): """The measurements of the plane where the sample will be produced (length, width, height, depth, etc.) Returns: (np.ndarray) """ return np.array(self._extent) @property def k(self):
@property def metric(self): """The distance function used to measure the distance between two points. Returns: (function) """ return self._metric @property def seed(self): """The random seed used for generating samples. Returns: (int) Returns None if no seed is set, otherwise a user-defined int. """ return self._seed @property def dimension_size(self): """The length of the extent and all of the generated sample point. Returns: (int) """ return self._dim @property def samples(self): """The samples generated by the generator. May not match specs if parameters were re-adjusted after use. Returns: (np.ndarray) """ if len(self._samples) == 0: return np.empty((0, 2)) return self._samples @radius.setter def radius(self, radius): """Setter for radius Args: radius (float): Minimum distance between two points. """ if radius <= 0: raise ValueError("Radius must be a number that is greater than 0.") self._radius = radius @extent.setter def extent(self, extent): """Setter for extent Args: extent (list<float>): The dimension lengths. """ if len(extent) < 2: raise ValueError("Extent must have a length of at least 2.") extent = np.array(extent, dtype=float) if np.any(extent <= 0): raise ValueError("All extents must be greater than 0.") self._extent = extent self._dim = len(extent) @k.setter def k(self, k): """Setter for k Args: k (int): Number of attempts for each active point to generate a new point. """ if k <= 0: raise ValueError("K must be greater than 0.") if int(k) != k: raise ValueError("K must be an integer.") self._k = k @seed.setter def seed(self, seed): if seed is not None: if type(seed) is not int: raise ValueError("Seed must be integer.") if seed < 0: raise ValueError("Seed must be non-negative.") self._seed = seed def _clear_previous_samples(self): """Clears grid and samples for generating new samples. """ del self._grid del self._samples # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def _get_grid_coord(self, point): """Returns the grid coordinate of the point. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: The grid coordinate that the point is separated in. """ return tuple([int(point[i] / self._cell_length) for i in range(self._dim)]) def _make_point(self, active_point): """ Attempts to make a random point in proximity of active_point. Attempts to make a random point around the active_point k times. If the new point is too close to another point, it will discard and try. If it fails k times, the function returns None. Args: active_point (np.ndarray): An array of size (number_of_dimensions,). Returns: (np.ndarray). Returns an array of size (number_of_dimensions,) if succeeds. Otherwise, returns None. """ # -------------------------------- # Create Random Parameters # -------------------------------- for _ in range(self._k): # Defines radial distance from active_point. rho = np.random.uniform(self._radius, 2 * self._radius) # Defines angle from active_point. Requires multiple angles for higher dimensional planes. theta = [np.random.uniform(0, 2 * np.pi) for _ in range(self._dim - 1)] # -------------------------------- # Create New Point # -------------------------------- # Create a 2D point using first theta angle. new_point = [active_point[0] + rho * np.cos(theta[0]), active_point[1] + rho * np.sin(theta[0])] # Generate more components of the coordinate for higher dimensional planes. new_point.extend([active_point[i] + rho * np.sin(theta[i-1]) for i in range(2, active_point.shape[0])]) new_point = np.array(new_point) # Confirm point is valid if self._valid_point(new_point): return new_point return None def _valid_point(self, point): """Confirms that a point is valid. If a point is too close to another point or is outside of bounds, it will fail. Otherwise, it will succeed. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: (bool) If succeeds, returns True. Otherwise, returns False. """ # -------------------------------- # Check Bounds # -------------------------------- # Get grid point and confirm it is within range coord = self._get_grid_coord(point) if np.logical_or(np.any(point < 0), np.any(point >= self._extent)): return False # -------------------------------- # Check Distance of Neighbors # -------------------------------- for idx in self._get_neighbors(coord): # No points in grid cell if self._grid[idx] == -1: continue # Obtains point in grid cell and confirms its distance is less than the radius. near_point = self._samples[self._grid[idx]] if metric.euclidean(near_point, point) < self._radius: return False return True def _get_neighbors(self, coord): """Obtains neighboring cells
"""The number of attempts each active point to make a new point. Returns: (int) """ return self._k
identifier_body
PoissonDiscSampleGenerator.py
random sample. (Optional) (Default: None) """ # -------------------------------- # User Parameters # -------------------------------- # Defines self._radius self.radius = radius # Defines self._k self.k = k # Defines both self._extent and self._dim self.extent = np.array(extent) # Defines self._seed self.seed = seed self._metric = metric.euclidean self._changes = self._create_neighbor_distances() # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def __call__(self): """Calls the generate function. Returns: (np.ndarray) """ return self.generate() def generate(self): """Generates the samples with the given user parameters. Returns: (np.ndarray) Returns an array of samples of shape (n_samples, dim) where dim is the length of the extent given. """ # -------------------------------- # Initializing Variables # -------------------------------- # Set NumPy Random Seed np.random.seed(self._seed) # Create Active List active = [] # Clear previously generated examples if len(self._samples) > 0: self._clear_previous_samples() # -------------------------------- # Begin Generating Samples # -------------------------------- # Create the first sample self._samples.append(np.random.uniform(low=np.zeros(shape=(self._dim,)), high=self._extent, size=(self._dim,))) active.append(self._samples[0]) self._grid[self._get_grid_coord(self._samples[0])] = 0 while active: # Choose Random Active Sample idx = np.random.choice(len(active)) # Make new point & confirm it is valid new_point = self._make_point(active[idx]) if new_point is None: active.pop(idx) else: # Add sample to listings and store in grid for neighboring locations. self._samples.append(new_point) active.append(new_point) self._grid[self._get_grid_coord(new_point)] = len(self._samples) - 1 # Return samples as numpy array self._samples = np.array(self._samples) return self._samples @property def radius(self): """The minimum distance between any two points. Returns: (float) """ return self._radius @property def extent(self): """The measurements of the plane where the sample will be produced (length, width, height, depth, etc.) Returns: (np.ndarray) """ return np.array(self._extent) @property def k(self): """The number of attempts each active point to make a new point. Returns: (int) """ return self._k @property def metric(self): """The distance function used to measure the distance between two points. Returns: (function) """ return self._metric @property def seed(self): """The random seed used for generating samples. Returns: (int) Returns None if no seed is set, otherwise a user-defined int. """ return self._seed @property def dimension_size(self): """The length of the extent and all of the generated sample point. Returns: (int) """ return self._dim @property def samples(self): """The samples generated by the generator. May not match specs if parameters were re-adjusted after use. Returns: (np.ndarray) """ if len(self._samples) == 0: return np.empty((0, 2)) return self._samples @radius.setter def radius(self, radius): """Setter for radius Args: radius (float): Minimum distance between two points. """ if radius <= 0: raise ValueError("Radius must be a number that is greater than 0.") self._radius = radius @extent.setter def extent(self, extent): """Setter for extent Args: extent (list<float>): The dimension lengths. """ if len(extent) < 2: raise ValueError("Extent must have a length of at least 2.") extent = np.array(extent, dtype=float) if np.any(extent <= 0): raise ValueError("All extents must be greater than 0.") self._extent = extent self._dim = len(extent) @k.setter def k(self, k): """Setter for k Args: k (int): Number of attempts for each active point to generate a new point. """ if k <= 0: raise ValueError("K must be greater than 0.") if int(k) != k: raise ValueError("K must be an integer.") self._k = k @seed.setter def seed(self, seed): if seed is not None: if type(seed) is not int: raise ValueError("Seed must be integer.") if seed < 0: raise ValueError("Seed must be non-negative.") self._seed = seed def _clear_previous_samples(self): """Clears grid and samples for generating new samples. """ del self._grid del self._samples # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def _get_grid_coord(self, point): """Returns the grid coordinate of the point. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: The grid coordinate that the point is separated in. """ return tuple([int(point[i] / self._cell_length) for i in range(self._dim)]) def _make_point(self, active_point): """ Attempts to make a random point in proximity of active_point. Attempts to make a random point around the active_point k times. If the new point is too close to another point, it will discard and try. If it fails k times, the function returns None. Args: active_point (np.ndarray): An array of size (number_of_dimensions,). Returns: (np.ndarray). Returns an array of size (number_of_dimensions,) if succeeds. Otherwise, returns None. """ # -------------------------------- # Create Random Parameters # -------------------------------- for _ in range(self._k): # Defines radial distance from active_point. rho = np.random.uniform(self._radius, 2 * self._radius) # Defines angle from active_point. Requires multiple angles for higher dimensional planes. theta = [np.random.uniform(0, 2 * np.pi) for _ in range(self._dim - 1)] # -------------------------------- # Create New Point # -------------------------------- # Create a 2D point using first theta angle. new_point = [active_point[0] + rho * np.cos(theta[0]), active_point[1] + rho * np.sin(theta[0])] # Generate more components of the coordinate for higher dimensional planes. new_point.extend([active_point[i] + rho * np.sin(theta[i-1]) for i in range(2, active_point.shape[0])]) new_point = np.array(new_point) # Confirm point is valid if self._valid_point(new_point): return new_point return None def _valid_point(self, point): """Confirms that a point is valid. If a point is too close to another point or is outside of bounds, it will fail. Otherwise, it will succeed. Args:
(bool) If succeeds, returns True. Otherwise, returns False. """ # -------------------------------- # Check Bounds # -------------------------------- # Get grid point and confirm it is within range coord = self._get_grid_coord(point) if np.logical_or(np.any(point < 0), np.any(point >= self._extent)): return False # -------------------------------- # Check Distance of Neighbors # -------------------------------- for idx in self._get_neighbors(coord): # No points in grid cell if self._grid[idx] == -1: continue # Obtains point in grid cell and confirms its distance is less than the radius. near_point = self._samples[self._grid[idx]] if metric.euclidean(near_point, point) < self._radius: return False return True def _get_neighbors(self, coord): """Obtains neighboring cells
point (np.ndarray): An array of size (number_of_dimensions,). Returns:
random_line_split
PoissonDiscSampleGenerator.py
random sample. (Optional) (Default: None) """ # -------------------------------- # User Parameters # -------------------------------- # Defines self._radius self.radius = radius # Defines self._k self.k = k # Defines both self._extent and self._dim self.extent = np.array(extent) # Defines self._seed self.seed = seed self._metric = metric.euclidean self._changes = self._create_neighbor_distances() # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def __call__(self): """Calls the generate function. Returns: (np.ndarray) """ return self.generate() def generate(self): """Generates the samples with the given user parameters. Returns: (np.ndarray) Returns an array of samples of shape (n_samples, dim) where dim is the length of the extent given. """ # -------------------------------- # Initializing Variables # -------------------------------- # Set NumPy Random Seed np.random.seed(self._seed) # Create Active List active = [] # Clear previously generated examples if len(self._samples) > 0: self._clear_previous_samples() # -------------------------------- # Begin Generating Samples # -------------------------------- # Create the first sample self._samples.append(np.random.uniform(low=np.zeros(shape=(self._dim,)), high=self._extent, size=(self._dim,))) active.append(self._samples[0]) self._grid[self._get_grid_coord(self._samples[0])] = 0 while active: # Choose Random Active Sample idx = np.random.choice(len(active)) # Make new point & confirm it is valid new_point = self._make_point(active[idx]) if new_point is None: active.pop(idx) else: # Add sample to listings and store in grid for neighboring locations. self._samples.append(new_point) active.append(new_point) self._grid[self._get_grid_coord(new_point)] = len(self._samples) - 1 # Return samples as numpy array self._samples = np.array(self._samples) return self._samples @property def radius(self): """The minimum distance between any two points. Returns: (float) """ return self._radius @property def extent(self): """The measurements of the plane where the sample will be produced (length, width, height, depth, etc.) Returns: (np.ndarray) """ return np.array(self._extent) @property def k(self): """The number of attempts each active point to make a new point. Returns: (int) """ return self._k @property def metric(self): """The distance function used to measure the distance between two points. Returns: (function) """ return self._metric @property def seed(self): """The random seed used for generating samples. Returns: (int) Returns None if no seed is set, otherwise a user-defined int. """ return self._seed @property def dimension_size(self): """The length of the extent and all of the generated sample point. Returns: (int) """ return self._dim @property def samples(self): """The samples generated by the generator. May not match specs if parameters were re-adjusted after use. Returns: (np.ndarray) """ if len(self._samples) == 0: return np.empty((0, 2)) return self._samples @radius.setter def radius(self, radius): """Setter for radius Args: radius (float): Minimum distance between two points. """ if radius <= 0: raise ValueError("Radius must be a number that is greater than 0.") self._radius = radius @extent.setter def extent(self, extent): """Setter for extent Args: extent (list<float>): The dimension lengths. """ if len(extent) < 2: raise ValueError("Extent must have a length of at least 2.") extent = np.array(extent, dtype=float) if np.any(extent <= 0): raise ValueError("All extents must be greater than 0.") self._extent = extent self._dim = len(extent) @k.setter def k(self, k): """Setter for k Args: k (int): Number of attempts for each active point to generate a new point. """ if k <= 0: raise ValueError("K must be greater than 0.") if int(k) != k: raise ValueError("K must be an integer.") self._k = k @seed.setter def seed(self, seed): if seed is not None: if type(seed) is not int: raise ValueError("Seed must be integer.") if seed < 0: raise ValueError("Seed must be non-negative.") self._seed = seed def _clear_previous_samples(self): """Clears grid and samples for generating new samples. """ del self._grid del self._samples # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def _get_grid_coord(self, point): """Returns the grid coordinate of the point. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: The grid coordinate that the point is separated in. """ return tuple([int(point[i] / self._cell_length) for i in range(self._dim)]) def
(self, active_point): """ Attempts to make a random point in proximity of active_point. Attempts to make a random point around the active_point k times. If the new point is too close to another point, it will discard and try. If it fails k times, the function returns None. Args: active_point (np.ndarray): An array of size (number_of_dimensions,). Returns: (np.ndarray). Returns an array of size (number_of_dimensions,) if succeeds. Otherwise, returns None. """ # -------------------------------- # Create Random Parameters # -------------------------------- for _ in range(self._k): # Defines radial distance from active_point. rho = np.random.uniform(self._radius, 2 * self._radius) # Defines angle from active_point. Requires multiple angles for higher dimensional planes. theta = [np.random.uniform(0, 2 * np.pi) for _ in range(self._dim - 1)] # -------------------------------- # Create New Point # -------------------------------- # Create a 2D point using first theta angle. new_point = [active_point[0] + rho * np.cos(theta[0]), active_point[1] + rho * np.sin(theta[0])] # Generate more components of the coordinate for higher dimensional planes. new_point.extend([active_point[i] + rho * np.sin(theta[i-1]) for i in range(2, active_point.shape[0])]) new_point = np.array(new_point) # Confirm point is valid if self._valid_point(new_point): return new_point return None def _valid_point(self, point): """Confirms that a point is valid. If a point is too close to another point or is outside of bounds, it will fail. Otherwise, it will succeed. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: (bool) If succeeds, returns True. Otherwise, returns False. """ # -------------------------------- # Check Bounds # -------------------------------- # Get grid point and confirm it is within range coord = self._get_grid_coord(point) if np.logical_or(np.any(point < 0), np.any(point >= self._extent)): return False # -------------------------------- # Check Distance of Neighbors # -------------------------------- for idx in self._get_neighbors(coord): # No points in grid cell if self._grid[idx] == -1: continue # Obtains point in grid cell and confirms its distance is less than the radius. near_point = self._samples[self._grid[idx]] if metric.euclidean(near_point, point) < self._radius: return False return True def _get_neighbors(self, coord): """Obtains neighboring cells
_make_point
identifier_name
Labupdown.py
, :], allfeats=BIGfeat[:, :, s1:s2, :], updown='up'), min=0) VecUp=getVec(allfeats=BIGfeat[:, :, s1:s2, :]) torch.save(VecUp,'./VecUp.pkl') VecUp=torch.load('./VecUp.pkl') project_mapup=torch.clamp(getPmap(features[:, :, s1:s2, :],VecUp), min=0) maxv = project_mapup.view(project_mapup.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup /= maxv if d == 0: project_mapdown = torch.zeros(features[:, :, s2:s3, :].size(0), features[:, :, s2:s3, :].size(2), features[:, :, s2:s3, :].size(3)).cuda() elif d == 1: # project_mapdown = torch.clamp(pca(features[:, :, s2:s3, :], allfeats=BIGfeat[:, :, s2:s3, :], updown='down'), # min=0) VecDown=getVec(allfeats=BIGfeat[:, :, s2:s3, :]) torch.save(VecDown, './VecDown.pkl') VecDown = torch.load('./VecDown.pkl') project_mapdown=torch.clamp(getPmap(features[:, :, s2:s3, :],VecDown), min=0) maxv = project_mapdown.view(project_mapdown.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown /= maxv if ur == 0: project_mapup1 = torch.zeros(features[:, :, s1:s2, s1:s2].size(0), features[:, :, s1:s2, s1:s2].size(2), features[:, :, s1:s2, s1:s2].size(3)).cuda() # clamp 作用 小于0的,置为0 elif ur == 1: project_mapup1 = torch.clamp( pca(features[:, :, s1:s2, s1:s2], allfeats=BIGfeat[:, :, s1:s2, s1:s2], updown='up'), min=0) maxv = project_mapup1.view(project_mapup1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup1 /= maxv if ul == 0: project_mapup2 = torch.zeros(features[:, :, s1:s2, s2:s3].size(0), features[:, :, s1:s2, s2:s3].size(2), features[:, :, s1:s2, s2:s3].size(3)).cuda() elif ul == 1: project_mapup2 = torch.clamp( pca(features[:, :, s1:s2, s2:s3], allfeats=BIGfeat[:, :, s1:s2, s2:s3], updown='up'), min=0) maxv = project_mapup2.view(project_mapup2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup2 /= maxv
project_mapdown1 = torch.zeros(features[:, :, s2:s3, s1:s4].size(0), features[:, :, s2:s3, s1:s4].size(2), features[:, :, s2:s3, s1:s4].size(3)).cuda() elif dr == 1: project_mapdown1 = torch.clamp( pca(features[:, :, s2:s3, s1:s4], allfeats=BIGfeat[:, :, s2:s3, s1:s4], updown='down'), min=0) maxv = project_mapdown1.view(project_mapdown1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown1 /= maxv if dm == 0: project_mapdown2 = torch.zeros(features[:, :, s2:s3, s4:s5].size(0), features[:, :, s2:s3, s4:s5].size(2), features[:, :, s2:s3, s4:s5].size(3)).cuda() elif dm == 1: project_mapdown2 = torch.clamp( pca(features[:, :, s2:s3, s4:s5], allfeats=BIGfeat[:, :, s2:s3, s4:s5], updown='down'), min=0) maxv = project_mapdown2.view(project_mapdown2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown2 /= maxv if dl == 0: project_mapdown3 = torch.zeros(features[:, :, s2:s3, s5:s3].size(0), features[:, :, s2:s3, s5:s3].size(2), features[:, :, s2:s3, s5:s3].size(3)).cuda() elif dl == 1: project_mapdown3 = torch.clamp( pca(features[:, :, s2:s3, s5:s3], allfeats=BIGfeat[:, :, s2:s3, s5:s3], updown='down'), min=0) maxv = project_mapdown3.view(project_mapdown3.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown3 /= maxv if u == 0 and d == 0: project_mapup = torch.cat((project_mapup1, project_mapup2), 2) project_mapdown = torch.cat((project_mapdown1, project_mapdown2, project_mapdown3), 2) project_map = torch.cat((project_mapup, project_mapdown), 1) # project_map=F.adaptive_avg_pool2d(project_map,(7,7)) coordinate = [] feat = [] for i in range(project_map.size(0)): Sproject_map = project_map[i] Singlefeature = features[i, :, :, :] meanmaskvalue = Sproject_map.sum() / (Sproject_map.size(0) * Sproject_map.size(1)) Sproject_map[Sproject_map < theh] = 0 allcood = torch.nonzero(Sproject_map).cpu().numpy() Sproject_map = Sproject_map.unsqueeze(0) # Smask = Sproject_map.repeat(Singlefeature.size(0), 1, 1) SOproject_map = Sproject_map.squeeze() SOproject_map = SOproject_map.reshape(SOproject_map.size(0) * SOproject_map.size(1)) Singlefeature = Singlefeature.squeeze() Singlefeature = Singlefeature.permute(1, 2, 0) Singlefeature = Singlefeature.reshape(Singlefeature.size(0) * Singlefeature.size(1), Singlefeature.size(2)) cnt = 0 ClusterFeat = torch.zeros(Singlefeature.size(1)).unsqueeze(0) for j in range(SOproject_map.size(0)): if SOproject_map[j] > theh and cnt == 0: ClusterFeat = F.normalize(Singlefeature[j].unsqueeze(0)) cnt = cnt + 1 elif SOproject_map[j] >theh and cnt != 0: SF = F.normalize(Singlefeature[j].unsqueeze(0)) ClusterFeat = torch.cat((ClusterFeat, SF), 0) cnt = cnt + 1 ClusterFeatmean = torch.mean(ClusterFeat, dim=0) ClusterFeatmean = ClusterFeatmean.unsqueeze(0) ClusterFeatmax = torch.max(ClusterFeat, dim=0) ClusterFeatmax = ClusterFeatmax[0].unsqueeze(0) ClusterFeatmax = F.normalize(ClusterFeatmax) ClusterFeatmean = F.normalize(ClusterFeatmean) SingleClusterFeat = torch.cat((ClusterFeatmax, ClusterFeatmean), 1) SingleClusterFeat = SingleClusterFeat.squeeze() # Singlefeature = Singlefeature.mul(Smask) # Singlefeature = Singlefeature.reshape(Singlefeature.size(0), Singlefeature.size(1) * Singlefeature.size(2)) Singlefeature = SingleClusterFeat.cpu().detach().numpy() # SPFeat = np.sum(Singlefeature, axis=1, keepdims=False) / (len(allcood)) # ccc=Singlefeature.sum()/(len(aa)) cood = np.mean(allcood, axis=0, keepdims=True) feat.append(Singlefeature) coordinate.append(cood) # Sproject_map[Sproject_map > meanmaskvalue * (0.5)] = 1 project_map[i] = Sproject_map.reshape(project_map.size(1), project_map.size(2)) project_map = F.interpolate(project_map.unsqueeze(1), size=(imgs.size(2), imgs.size(3)), mode='bilinear', align_corners=False) * 255. coordinate = np.array(coordinate) coordinate = np.squeeze(coordinate) feat = np.array(feat) # tsne2 = manifold.TSNE(n_components=3, init='pca', random_state=501) # X_tsne2 = tsne2.fit_transform
if dr == 0:
random_line_split
Labupdown.py
def getPmap(features, first_compo): featuresChange=features.permute(1,0,2,3) reshaped_features=featuresChange.reshape(featuresChange.size(0),featuresChange.size(1)*featuresChange.size(2)*featuresChange.size(3)) projected_map = torch.matmul(first_compo.cuda().unsqueeze(0), reshaped_features.cuda()).view(1, features.size(0), -1) \ .view(features.size(0), features.size(2), features.size(3)) maxv = projected_map.max() minv = projected_map.min() projected_map *= (maxv + minv) / torch.abs(maxv + minv) return projected_map def getcood(features,BIGfeat,u,d,ur,ul,dr,dm,dl,theh): s1=0 s2=14 s3=28 s4=8 s5=20 if u == 0: project_mapup = torch.zeros(features[:, :, s1:s2, :].size(0), features[:, :, s1:s2, :].size(2), features[:, :, s1:s2, :].size(3)).cuda() elif u == 1: # project_mapup = torch.clamp(pca(features[:, :, s1:s2, :], allfeats=BIGfeat[:, :, s1:s2, :], updown='up'), min=0) VecUp=getVec(allfeats=BIGfeat[:, :, s1:s2, :]) torch.save(VecUp,'./VecUp.pkl') VecUp=torch.load('./VecUp.pkl') project_mapup=torch.clamp(getPmap(features[:, :, s1:s2, :],VecUp), min=0) maxv = project_mapup.view(project_mapup.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup /= maxv if d == 0: project_mapdown = torch.zeros(features[:, :, s2:s3, :].size(0), features[:, :, s2:s3, :].size(2), features[:, :, s2:s3, :].size(3)).cuda() elif d == 1: # project_mapdown = torch.clamp(pca(features[:, :, s2:s3, :], allfeats=BIGfeat[:, :, s2:s3, :], updown='down'), # min=0) VecDown=getVec(allfeats=BIGfeat[:, :, s2:s3, :]) torch.save(VecDown, './VecDown.pkl') VecDown = torch.load('./VecDown.pkl') project_mapdown=torch.clamp(getPmap(features[:, :, s2:s3, :],VecDown), min=0) maxv = project_mapdown.view(project_mapdown.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown /= maxv if ur == 0: project_mapup1 = torch.zeros(features[:, :, s1:s2, s1:s2].size(0), features[:, :, s1:s2, s1:s2].size(2), features[:, :, s1:s2, s1:s2].size(3)).cuda() # clamp 作用 小于0的,置为0 elif ur == 1: project_mapup1 = torch.clamp( pca(features[:, :, s1:s2, s1:s2], allfeats=BIGfeat[:, :, s1:s2, s1:s2], updown='up'), min=0) maxv = project_mapup1.view(project_mapup1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup1 /= maxv if ul == 0: project_mapup2 = torch.zeros(features[:, :, s1:s2, s2:s3].size(0), features[:, :, s1:s2, s2:s3].size(2), features[:, :, s1:s2, s2:s3].size(3)).cuda() elif ul == 1: project_mapup2 = torch.clamp( pca(features[:, :, s1:s2, s2:s3], allfeats=BIGfeat[:, :, s1:s2, s2:s3], updown='up'), min=0) maxv = project_mapup2.view(project_mapup2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup2 /= maxv if dr == 0: project_mapdown1 = torch.zeros(features[:, :, s2:s3, s1:s4].size(0), features[:, :, s2:s3, s1:s4].size(2), features[:, :, s2:s3, s1:s4].size(3)).cuda() elif dr == 1: project_mapdown1 = torch.clamp( pca(features[:, :, s2:s3, s1:s4], allfeats=BIGfeat[:, :, s2:s3, s1:s4], updown='down'), min=0) maxv = project_mapdown1.view(project_mapdown1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown1 /= maxv if dm == 0: project_mapdown2 = torch.zeros(features[:, :, s2:s3, s4:s5].size(0), features[:, :, s2:s3, s4:s5].size(2), features[:, :, s2:s3, s4:s5].size(3)).cuda() elif dm == 1: project_mapdown2 = torch.clamp( pca(features[:, :, s2:s3, s4:s5], allfeats=BIGfeat[:, :, s2:s3, s4:s5], updown='down'), min=0) maxv = project_mapdown2.view(project_mapdown2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown2 /= maxv if dl == 0: project_mapdown3 = torch.zeros(features[:, :, s2:s3, s5:s3].size(0), features[:, :, s2:s3, s5:s3].size(2), features[:, :, s2:s3, s5:s3].size(3)).cuda() elif dl == 1: project_mapdown3 = torch.clamp( pca(features[:, :, s2:s3, s5:s3], allfeats=BIGfeat[:, :, s2:s3, s5:s3], updown='down'), min=0) maxv = project_mapdown3.view(project_mapdown3.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown3 /= maxv if u == 0 and d == 0: project_mapup = torch.cat((project_mapup1, project_mapup2), 2) project_mapdown = torch.cat((project_mapdown1, project_mapdown2, project_mapdown3), 2) project_map = torch.cat((project_mapup, project_mapdown), 1) # project_map=F.adaptive_avg_pool2d(project_map,(7,7)) coordinate = [] feat = [] for i in range(project_map.size(0)): Sproject_map = project_map[i] Singlefeature = features[i, :, :, :] meanmaskvalue = Sproject_map.sum() / (Sproject_map.size(0) * Sproject_map.size(1)) Sproject_map[Sproject_map < theh] = 0 allcood = torch.nonzero(Sproject_map).cpu().numpy() Sproject_map = Sproject_map.unsqueeze(0) # Smask = Sproject_map.repeat(Singlefeature.size(0), 1, 1) SOproject_map = Sproject_map.squeeze() SOproject_map = SOproject_map.reshape(SOproject_map.size(0) * SOproject_map.size(1)) Singlefeature = Singlefeature.squeeze() Singlefeature = Singlefeature.permute(1, 2, 0) Singlefeature = Singlefeature.reshape(Singlefeature.size(0) * Singlefeature.size(1), Singlefeature.size(2)) cnt = 0 ClusterFeat = torch.zeros(Singlefeature.size(1)).unsqueeze(0) for j in range(SOproject_map.size(0)): if SOproject_map[j] > theh and cnt == 0: ClusterFeat = F.normalize(Singlefeature[j].unsqueeze(0)) cnt = cnt + 1 elif SOproject_map[j] >theh and cnt
allfeaturesChange = allfeats.permute(1, 0, 2, 3) allreshaped_features = allfeaturesChange.reshape(allfeaturesChange.size(0), allfeaturesChange.size(1) * allfeaturesChange.size( 2) * allfeaturesChange.size(3)) cov = torch.from_numpy(np.cov(allreshaped_features.cpu().detach())) cov = cov.type_as(allreshaped_features).cuda() eigval, eigvec = torch.eig(cov, eigenvectors=True) first_compo = eigvec[:, 0] return first_compo
identifier_body
Labupdown.py
:], allfeats=BIGfeat[:, :, s1:s2, :], updown='up'), min=0) VecUp=getVec(allfeats=BIGfeat[:, :, s1:s2, :]) torch.save(VecUp,'./VecUp.pkl') VecUp=torch.load('./VecUp.pkl') project_mapup=torch.clamp(getPmap(features[:, :, s1:s2, :],VecUp), min=0) maxv = project_mapup.view(project_mapup.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup /= maxv if d == 0: project_mapdown = torch.zeros(features[:, :, s2:s3, :].size(0), features[:, :, s2:s3, :].size(2), features[:, :, s2:s3, :].size(3)).cuda() elif d == 1: # project_mapdown = torch.clamp(pca(features[:, :, s2:s3, :], allfeats=BIGfeat[:, :, s2:s3, :], updown='down'), # min=0) VecDown=getVec(allfeats=BIGfeat[:, :, s2:s3, :]) torch.save(VecDown, './VecDown.pkl') VecDown = torch.load('./VecDown.pkl') project_mapdown=torch.clamp(getPmap(features[:, :, s2:s3, :],VecDown), min=0) maxv = project_mapdown.view(project_mapdown.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown /= maxv if ur == 0: project_mapup1 = torch.zeros(features[:, :, s1:s2, s1:s2].size(0), features[:, :, s1:s2, s1:s2].size(2), features[:, :, s1:s2, s1:s2].size(3)).cuda() # clamp 作用 小于0的,置为0 elif ur == 1: project_mapup1 = torch.clamp( pca(features[:, :, s1:s2, s1:s2], allfeats=BIGfeat[:, :, s1:s2, s1:s2], updown='up'), min=0) maxv = project_mapup1.view(project_mapup1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup1 /= maxv if ul == 0: project_mapup2 = torch.zeros(features[:, :, s1:s2, s2:s3].size(0), features[:, :, s1:s2, s2:s3].size(2), features[:, :, s1:s2, s2:s3].size(3)).cuda() elif ul == 1: project_mapup2 = torch.clamp( pca(features[:, :, s1:s2, s2:s3], allfeats=BIGfeat[:, :, s1:s2, s2:s3], updown='up'), min=0) maxv = project_mapup2.view(project_mapup2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup2 /= maxv if dr == 0: project_mapdown1 = torch.zeros(features[:, :, s2:s3, s1:s4].size(0), features[:, :, s2:s3, s1:s4].size(2), features[:, :, s2:s3, s1:s4].size(3)).cuda() elif dr == 1: project_mapdown1 = torch.clamp( pca(features[:, :, s2:s3, s1:s4], allfeats=BIGfeat[:, :, s2:s3, s1:s4], updown='down'), min=0) maxv = project_mapdown1.view(project_mapdown1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown1 /= maxv if dm == 0: project_mapdown2 = torch.zeros(features[:, :, s2:s3, s4:s5].size(0), features[:, :, s2:s3, s4:s5].size(2), features[:, :, s2:s3, s4:s5].size(3)).cuda() elif dm == 1: project_mapdown2 = torch.clamp( pca(features[:, :, s2:s3, s4:s5], allfeats=BIGfeat[:, :, s2:s3, s4:s5], updown='down'), min=0) maxv = project_mapdown2.view(project_mapdown2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown2 /= maxv if dl == 0: project_mapdown3 = torch.zeros(features[:, :, s2:s3, s5:s3].size(0), features[:, :, s2:s3, s5:s3].size(2), features[:, :, s2:s3, s5:s3].size(3)).cuda() elif dl == 1: project_mapdown3 = torch.clamp( pca(features[:, :, s2:s3, s5:s3], allfeats=BIGfeat[:, :, s2:s3, s5:s3], updown='down'), min=0) maxv = project_mapdown3.view(project_mapdown3.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown3 /= maxv if u == 0 and d == 0: project_mapup = torch.cat((project_mapup1, project_mapup2), 2) project_mapdown = torch.cat((project_mapdown1, project_mapdown2, project_mapdown3), 2) project_map = torch.cat((project_mapup, project_mapdown), 1) # project_map=F.adaptive_avg_pool2d(project_map,(7,7)) coordinate = [] feat = [] for i in range(project_map.size(0)): Sproject_map = p
ClusterFeat = torch.cat((ClusterFeat, SF), 0) cnt = cnt + 1 ClusterFeatmean = torch.mean(ClusterFeat, dim=0) ClusterFeatmean = ClusterFeatmean.unsqueeze(0) ClusterFeatmax = torch.max(ClusterFeat, dim=0) ClusterFeatmax = ClusterFeatmax[0].unsqueeze(0) ClusterFeatmax = F.normalize(ClusterFeatmax) ClusterFeatmean = F.normalize(ClusterFeatmean) SingleClusterFeat = torch.cat((ClusterFeatmax, ClusterFeatmean), 1) SingleClusterFeat = SingleClusterFeat.squeeze() # Singlefeature = Singlefeature.mul(Smask) # Singlefeature = Singlefeature.reshape(Singlefeature.size(0), Singlefeature.size(1) * Singlefeature.size(2)) Singlefeature = SingleClusterFeat.cpu().detach().numpy() # SPFeat = np.sum(Singlefeature, axis=1, keepdims=False) / (len(allcood)) # ccc=Singlefeature.sum()/(len(aa)) cood = np.mean(allcood, axis=0, keepdims=True) feat.append(Singlefeature) coordinate.append(cood) # Sproject_map[Sproject_map > meanmaskvalue * (0.5)] = 1 project_map[i] = Sproject_map.reshape(project_map.size(1), project_map.size(2)) project_ma p = F.interpolate(project_map.unsqueeze(1), size=(imgs.size(2), imgs.size(3)), mode='bilinear', align_corners=False) * 255. coordinate = np.array(coordinate) coordinate = np.squeeze(coordinate) feat = np.array(feat) # tsne2 = manifold.TSNE(n_components=3, init='pca', random_state=501) # X_tsne2 = tsne
roject_map[i] Singlefeature = features[i, :, :, :] meanmaskvalue = Sproject_map.sum() / (Sproject_map.size(0) * Sproject_map.size(1)) Sproject_map[Sproject_map < theh] = 0 allcood = torch.nonzero(Sproject_map).cpu().numpy() Sproject_map = Sproject_map.unsqueeze(0) # Smask = Sproject_map.repeat(Singlefeature.size(0), 1, 1) SOproject_map = Sproject_map.squeeze() SOproject_map = SOproject_map.reshape(SOproject_map.size(0) * SOproject_map.size(1)) Singlefeature = Singlefeature.squeeze() Singlefeature = Singlefeature.permute(1, 2, 0) Singlefeature = Singlefeature.reshape(Singlefeature.size(0) * Singlefeature.size(1), Singlefeature.size(2)) cnt = 0 ClusterFeat = torch.zeros(Singlefeature.size(1)).unsqueeze(0) for j in range(SOproject_map.size(0)): if SOproject_map[j] > theh and cnt == 0: ClusterFeat = F.normalize(Singlefeature[j].unsqueeze(0)) cnt = cnt + 1 elif SOproject_map[j] >theh and cnt != 0: SF = F.normalize(Singlefeature[j].unsqueeze(0))
conditional_block
Labupdown.py
(allfeats=None): # features: NCWH allfeaturesChange = allfeats.permute(1, 0, 2, 3) allreshaped_features = allfeaturesChange.reshape(allfeaturesChange.size(0), allfeaturesChange.size(1) * allfeaturesChange.size( 2) * allfeaturesChange.size(3)) cov = torch.from_numpy(np.cov(allreshaped_features.cpu().detach())) cov = cov.type_as(allreshaped_features).cuda() eigval, eigvec = torch.eig(cov, eigenvectors=True) first_compo = eigvec[:, 0] return first_compo def getPmap(features, first_compo): featuresChange=features.permute(1,0,2,3) reshaped_features=featuresChange.reshape(featuresChange.size(0),featuresChange.size(1)*featuresChange.size(2)*featuresChange.size(3)) projected_map = torch.matmul(first_compo.cuda().unsqueeze(0), reshaped_features.cuda()).view(1, features.size(0), -1) \ .view(features.size(0), features.size(2), features.size(3)) maxv = projected_map.max() minv = projected_map.min() projected_map *= (maxv + minv) / torch.abs(maxv + minv) return projected_map def getcood(features,BIGfeat,u,d,ur,ul,dr,dm,dl,theh): s1=0 s2=14 s3=28 s4=8 s5=20 if u == 0: project_mapup = torch.zeros(features[:, :, s1:s2, :].size(0), features[:, :, s1:s2, :].size(2), features[:, :, s1:s2, :].size(3)).cuda() elif u == 1: # project_mapup = torch.clamp(pca(features[:, :, s1:s2, :], allfeats=BIGfeat[:, :, s1:s2, :], updown='up'), min=0) VecUp=getVec(allfeats=BIGfeat[:, :, s1:s2, :]) torch.save(VecUp,'./VecUp.pkl') VecUp=torch.load('./VecUp.pkl') project_mapup=torch.clamp(getPmap(features[:, :, s1:s2, :],VecUp), min=0) maxv = project_mapup.view(project_mapup.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup /= maxv if d == 0: project_mapdown = torch.zeros(features[:, :, s2:s3, :].size(0), features[:, :, s2:s3, :].size(2), features[:, :, s2:s3, :].size(3)).cuda() elif d == 1: # project_mapdown = torch.clamp(pca(features[:, :, s2:s3, :], allfeats=BIGfeat[:, :, s2:s3, :], updown='down'), # min=0) VecDown=getVec(allfeats=BIGfeat[:, :, s2:s3, :]) torch.save(VecDown, './VecDown.pkl') VecDown = torch.load('./VecDown.pkl') project_mapdown=torch.clamp(getPmap(features[:, :, s2:s3, :],VecDown), min=0) maxv = project_mapdown.view(project_mapdown.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown /= maxv if ur == 0: project_mapup1 = torch.zeros(features[:, :, s1:s2, s1:s2].size(0), features[:, :, s1:s2, s1:s2].size(2), features[:, :, s1:s2, s1:s2].size(3)).cuda() # clamp 作用 小于0的,置为0 elif ur == 1: project_mapup1 = torch.clamp( pca(features[:, :, s1:s2, s1:s2], allfeats=BIGfeat[:, :, s1:s2, s1:s2], updown='up'), min=0) maxv = project_mapup1.view(project_mapup1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup1 /= maxv if ul == 0: project_mapup2 = torch.zeros(features[:, :, s1:s2, s2:s3].size(0), features[:, :, s1:s2, s2:s3].size(2), features[:, :, s1:s2, s2:s3].size(3)).cuda() elif ul == 1: project_mapup2 = torch.clamp( pca(features[:, :, s1:s2, s2:s3], allfeats=BIGfeat[:, :, s1:s2, s2:s3], updown='up'), min=0) maxv = project_mapup2.view(project_mapup2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup2 /= maxv if dr == 0: project_mapdown1 = torch.zeros(features[:, :, s2:s3, s1:s4].size(0), features[:, :, s2:s3, s1:s4].size(2), features[:, :, s2:s3, s1:s4].size(3)).cuda() elif dr == 1: project_mapdown1 = torch.clamp( pca(features[:, :, s2:s3, s1:s4], allfeats=BIGfeat[:, :, s2:s3, s1:s4], updown='down'), min=0) maxv = project_mapdown1.view(project_mapdown1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown1 /= maxv if dm == 0: project_mapdown2 = torch.zeros(features[:, :, s2:s3, s4:s5].size(0), features[:, :, s2:s3, s4:s5].size(2), features[:, :, s2:s3, s4:s5].size(3)).cuda() elif dm == 1: project_mapdown2 = torch.clamp( pca(features[:, :, s2:s3, s4:s5], allfeats=BIGfeat[:, :, s2:s3, s4:s5], updown='down'), min=0) maxv = project_mapdown2.view(project_mapdown2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown2 /= maxv if dl == 0: project_mapdown3 = torch.zeros(features[:, :, s2:s3, s5:s3].size(0), features[:, :, s2:s3, s5:s3].size(2), features[:, :, s2:s3, s5:s3].size(3)).cuda() elif dl == 1: project_mapdown3 = torch.clamp( pca(features[:, :, s2:s3, s5:s3], allfeats=BIGfeat[:, :, s2:s3, s5:s3], updown='down'), min=0) maxv = project_mapdown3.view(project_mapdown3.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown3 /= maxv if u == 0 and d == 0: project_mapup = torch.cat((project_mapup1, project_mapup2), 2) project_mapdown = torch.cat((project_mapdown1, project_mapdown2, project_mapdown3), 2) project_map = torch.cat((project_mapup, project_mapdown), 1) # project_map=F.adaptive_avg_pool2d(project_map,(7,7)) coordinate = [] feat = [] for i in range(project_map.size(0)): Sproject_map = project_map[i] Singlefeature = features[i, :, :, :] meanmaskvalue = Sproject_map.sum() / (Sproject_map.size(0) * Sproject_map.size(1)) Sproject_map[Sproject_map < theh] = 0 allcood = torch.nonzero(Sproject_map).cpu().numpy() Sproject_map = Sproject_map.unsqueeze(0) # Smask = Sproject_map.repeat(Singlefeature.size(0), 1, 1) SOproject_map = Sproject_map.squeeze() SOproject_map = SOproject_map.reshape(SOproject_map.size(0) * SOproject_map.size(1)) Singlefeature = Singlefeature.squeeze() Singlefeature = Singlefeature.permute(1, 2, 0) Singlefeature = Singlefeature.reshape(Singlefeature.size(0) * Singlefeature.size(1), Singlefeature.size(2)) cnt = 0 ClusterFeat = torch.zeros(Singlefeature.size(1)).unsqueeze(0) for j in range(SOproject_map.size(0)): if SOproject_map[j] > theh and cnt == 0: ClusterFeat = F.normalize(Singlefeature[j].unsqueeze(0)) cnt = cnt +
getVec
identifier_name
main.rs
config .try_parse_untrusted_http_server_port() .expect("untrusted http server port to be a valid port number"); let initialization_handler_clone = initialization_handler.clone(); tokio_handle.spawn(async move { if let Err(e) = start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) .await { error!("Unexpected error in `is_initialized` server: {:?}", e); } }); // ------------------------------------------------------------------------ // Start prometheus metrics server. if config.enable_metrics_server { let enclave_wallet = Arc::new(EnclaveAccountInfoProvider::new(node_api.clone(), tee_accountid.clone())); let metrics_handler = Arc::new(MetricsHandler::new(enclave_wallet)); let metrics_server_port = config .try_parse_metrics_server_port() .expect("metrics server port to be a valid port number"); tokio_handle.spawn(async move { if let Err(e) = start_metrics_server(metrics_handler, metrics_server_port).await { error!("Unexpected error in Prometheus metrics server: {:?}", e); } }); } // ------------------------------------------------------------------------ // Start trusted worker rpc server if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker { let direct_invocation_server_addr = config.trusted_worker_url_internal(); let enclave_for_direct_invocation = enclave.clone(); thread::spawn(move || { println!( "[+] Trusted RPC direct invocation server listening on {}", direct_invocation_server_addr ); enclave_for_direct_invocation .init_direct_invocation_server(direct_invocation_server_addr) .unwrap(); println!("[+] RPC direct invocation server shut down"); }); } // ------------------------------------------------------------------------ // Start untrusted worker rpc server. // i.e move sidechain block importing to trusted worker. if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { sidechain_start_untrusted_rpc_server( &config, enclave.clone(), sidechain_storage.clone(), tokio_handle, ); } // ------------------------------------------------------------------------ // Init parentchain specific stuff. Needed for parentchain communication. let parentchain_handler = Arc::new(ParentchainHandler::new(node_api.clone(), enclave.clone())); let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); let nonce = node_api.get_nonce_of(&tee_accountid).unwrap(); info!("Enclave nonce = {:?}", nonce); enclave .set_nonce(nonce) .expect("Could not set nonce of enclave. Returning here..."); let metadata = node_api.metadata.clone(); let runtime_spec_version = node_api.runtime_version.spec_version; let runtime_transaction_version = node_api.runtime_version.transaction_version; enclave .set_node_metadata( NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), ) .expect("Could not set the node metadata in the enclave"); // ------------------------------------------------------------------------ // Perform a remote attestation and get an unchecked extrinsic back. let trusted_url = config.trusted_worker_url_external(); if skip_ra { println!( "[!] skipping remote attestation. Registering enclave without attestation report." ); } else { println!("[!] creating remote attestation report and create enclave register extrinsic."); }; let uxt = enclave.perform_ra(&trusted_url, skip_ra).unwrap(); let mut xthex = hex::encode(uxt); xthex.insert_str(0, "0x"); // Account funds if let Err(x) = setup_account_funding(&node_api, &tee_accountid, xthex.clone(), is_development_mode) { error!("Starting worker failed: {:?}", x); // Return without registering the enclave. This will fail and the transaction will be banned for 30min. return } println!("[>] Register the enclave (send the extrinsic)"); let register_enclave_xt_hash = node_api.send_extrinsic(xthex, XtStatus::Finalized).unwrap(); println!("[<] Extrinsic got finalized. Hash: {:?}\n", register_enclave_xt_hash); let register_enclave_xt_header = node_api.get_header(register_enclave_xt_hash).unwrap().unwrap(); let we_are_primary_validateer = we_are_primary_validateer(&node_api, &register_enclave_xt_header).unwrap(); if we_are_primary_validateer { println!("[+] We are the primary validateer"); } else { println!("[+] We are NOT the primary validateer"); } initialization_handler.registered_on_parentchain(); // ------------------------------------------------------------------------ // initialize teeracle interval #[cfg(feature = "teeracle")] if WorkerModeProvider::worker_mode() == WorkerMode::Teeracle { start_interval_market_update( &node_api, run_config.teeracle_update_interval, enclave.as_ref(), &teeracle_tokio_handle, ); } if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { println!("*** [+] Finished syncing light client, syncing parentchain..."); // Syncing all parentchain blocks, this might take a while.. let mut last_synced_header = parentchain_handler.sync_parentchain(last_synced_header).unwrap(); // ------------------------------------------------------------------------ // Initialize the sidechain if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { last_synced_header = sidechain_init_block_production( enclave, &register_enclave_xt_header, we_are_primary_validateer, parentchain_handler.clone(), sidechain_storage, &last_synced_header, ) .unwrap(); } // ------------------------------------------------------------------------ // start parentchain syncing loop (subscribe to header updates) thread::Builder::new() .name("parentchain_sync_loop".to_owned()) .spawn(move || { if let Err(e) = subscribe_to_parentchain_new_headers(parentchain_handler, last_synced_header) { error!("Parentchain block syncing terminated with a failure: {:?}", e); } println!("[!] Parentchain block syncing has terminated"); }) .unwrap(); } // ------------------------------------------------------------------------ if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { spawn_worker_for_shard_polling(shard, node_api.clone(), initialization_handler); } // ------------------------------------------------------------------------ // subscribe to events and react on firing println!("*** Subscribing to events"); let (sender, receiver) = channel(); let sender2 = sender.clone(); let _eventsubscriber = thread::Builder::new() .name("eventsubscriber".to_owned()) .spawn(move || { node_api.subscribe_events(sender2).unwrap(); }) .unwrap(); println!("[+] Subscribed to events. waiting..."); let timeout = Duration::from_millis(10); loop { if let Ok(msg) = receiver.recv_timeout(timeout) { if let Ok(events) = parse_events(msg.clone()) { print_events(events, sender.clone()) } } } } /// Start polling loop to wait until we have a worker for a shard registered on /// the parentchain (TEEREX WorkerForShard). This is the pre-requisite to be /// considered initialized and ready for the next worker to start (in sidechain mode only). /// considered initialized and ready for the next worker to start. fn spawn_worker_for_shard_polling<InitializationHandler>( shard: &ShardIdentifier, node_api: ParentchainApi, initialization_handler: Arc<InitializationHandler>, ) where InitializationHandler: TrackInitialization + Sync + Send + 'static, { let shard_for_initialized = *shard; thread::spawn(move || { const POLL_INTERVAL_SECS: u64 = 2; loop { info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); if let Ok(Some(_)) = node_api.worker_for_shard(&shard_for_initialized, None) { // Set that the service is initialized. initialization_handler.worker_for_shard_registered(); println!("[+] Found `WorkerForShard` on parentchain state"); break } thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); } }); } type Events = Vec<frame_system::EventRecord<Event, Hash>>; fn parse_events(event: String) -> Result<Events, String> { let _unhex = Vec::from_hex(event).map_err(|_| "Decoding Events Failed".to_string())?; let mut _er_enc = _unhex.as_slice(); Events::decode(&mut _er_enc).map_err(|_| "Decoding Events Failed".to_string()) } fn print_events(events: Events, _sender: Sender<String>)
{ for evr in &events { debug!("Decoded: phase = {:?}, event = {:?}", evr.phase, evr.event); match &evr.event { Event::Balances(be) => { info!("[+] Received balances event"); debug!("{:?}", be); match &be { pallet_balances::Event::Transfer { from: transactor, to: dest, amount: value, } => { debug!(" Transactor: {:?}", transactor.to_ss58check()); debug!(" Destination: {:?}", dest.to_ss58check()); debug!(" Value: {:?}", value); }, _ => { trace!("Ignoring unsupported balances event"); },
identifier_body
main.rs
chain(); // ------------------------------------------------------------------------ // initialize teeracle interval #[cfg(feature = "teeracle")] if WorkerModeProvider::worker_mode() == WorkerMode::Teeracle { start_interval_market_update( &node_api, run_config.teeracle_update_interval, enclave.as_ref(), &teeracle_tokio_handle, ); } if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { println!("*** [+] Finished syncing light client, syncing parentchain..."); // Syncing all parentchain blocks, this might take a while.. let mut last_synced_header = parentchain_handler.sync_parentchain(last_synced_header).unwrap(); // ------------------------------------------------------------------------ // Initialize the sidechain if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { last_synced_header = sidechain_init_block_production( enclave, &register_enclave_xt_header, we_are_primary_validateer, parentchain_handler.clone(), sidechain_storage, &last_synced_header, ) .unwrap(); } // ------------------------------------------------------------------------ // start parentchain syncing loop (subscribe to header updates) thread::Builder::new() .name("parentchain_sync_loop".to_owned()) .spawn(move || { if let Err(e) = subscribe_to_parentchain_new_headers(parentchain_handler, last_synced_header) { error!("Parentchain block syncing terminated with a failure: {:?}", e); } println!("[!] Parentchain block syncing has terminated"); }) .unwrap(); } // ------------------------------------------------------------------------ if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { spawn_worker_for_shard_polling(shard, node_api.clone(), initialization_handler); } // ------------------------------------------------------------------------ // subscribe to events and react on firing println!("*** Subscribing to events"); let (sender, receiver) = channel(); let sender2 = sender.clone(); let _eventsubscriber = thread::Builder::new() .name("eventsubscriber".to_owned()) .spawn(move || { node_api.subscribe_events(sender2).unwrap(); }) .unwrap(); println!("[+] Subscribed to events. waiting..."); let timeout = Duration::from_millis(10); loop { if let Ok(msg) = receiver.recv_timeout(timeout) { if let Ok(events) = parse_events(msg.clone()) { print_events(events, sender.clone()) } } } } /// Start polling loop to wait until we have a worker for a shard registered on /// the parentchain (TEEREX WorkerForShard). This is the pre-requisite to be /// considered initialized and ready for the next worker to start (in sidechain mode only). /// considered initialized and ready for the next worker to start. fn spawn_worker_for_shard_polling<InitializationHandler>( shard: &ShardIdentifier, node_api: ParentchainApi, initialization_handler: Arc<InitializationHandler>, ) where InitializationHandler: TrackInitialization + Sync + Send + 'static, { let shard_for_initialized = *shard; thread::spawn(move || { const POLL_INTERVAL_SECS: u64 = 2; loop { info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); if let Ok(Some(_)) = node_api.worker_for_shard(&shard_for_initialized, None) { // Set that the service is initialized. initialization_handler.worker_for_shard_registered(); println!("[+] Found `WorkerForShard` on parentchain state"); break } thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); } }); } type Events = Vec<frame_system::EventRecord<Event, Hash>>; fn parse_events(event: String) -> Result<Events, String> { let _unhex = Vec::from_hex(event).map_err(|_| "Decoding Events Failed".to_string())?; let mut _er_enc = _unhex.as_slice(); Events::decode(&mut _er_enc).map_err(|_| "Decoding Events Failed".to_string()) } fn print_events(events: Events, _sender: Sender<String>) { for evr in &events { debug!("Decoded: phase = {:?}, event = {:?}", evr.phase, evr.event); match &evr.event { Event::Balances(be) => { info!("[+] Received balances event"); debug!("{:?}", be); match &be { pallet_balances::Event::Transfer { from: transactor, to: dest, amount: value, } => { debug!(" Transactor: {:?}", transactor.to_ss58check()); debug!(" Destination: {:?}", dest.to_ss58check()); debug!(" Value: {:?}", value); }, _ => { trace!("Ignoring unsupported balances event"); }, } }, Event::Teerex(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teerex::Event::AddedEnclave(sender, worker_url) => { println!("[+] Received AddedEnclave event"); println!(" Sender (Worker): {:?}", sender); println!(" Registered URL: {:?}", str::from_utf8(worker_url).unwrap()); }, my_node_runtime::pallet_teerex::Event::Forwarded(shard) => { println!( "[+] Received trusted call for shard {}", shard.encode().to_base58() ); }, my_node_runtime::pallet_teerex::Event::ProcessedParentchainBlock( sender, block_hash, merkle_root, block_number, ) => { info!("[+] Received ProcessedParentchainBlock event"); debug!(" From: {:?}", sender); debug!(" Block Hash: {:?}", hex::encode(block_hash)); debug!(" Merkle Root: {:?}", hex::encode(merkle_root)); debug!(" Block Number: {:?}", block_number); }, my_node_runtime::pallet_teerex::Event::ShieldFunds(incognito_account) => { info!("[+] Received ShieldFunds event"); debug!(" For: {:?}", incognito_account); }, my_node_runtime::pallet_teerex::Event::UnshieldedFunds(incognito_account) => { info!("[+] Received UnshieldedFunds event"); debug!(" For: {:?}", incognito_account); }, _ => { trace!("Ignoring unsupported pallet_teerex event"); }, } }, #[cfg(feature = "teeracle")] Event::Teeracle(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teeracle::Event::ExchangeRateUpdated( source, currency, new_value, ) => { println!("[+] Received ExchangeRateUpdated event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); println!(" Exchange rate: {:?}", new_value); }, my_node_runtime::pallet_teeracle::Event::ExchangeRateDeleted( source, currency, ) => { println!("[+] Received ExchangeRateDeleted event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); }, my_node_runtime::pallet_teeracle::Event::AddedToWhitelist( source, mrenclave, ) => { println!("[+] Received AddedToWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, my_node_runtime::pallet_teeracle::Event::RemovedFromWhitelist( source, mrenclave, ) => { println!("[+] Received RemovedFromWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, _ => { trace!("Ignoring unsupported pallet_teeracle event"); }, } }, #[cfg(feature = "sidechain")] Event::Sidechain(re) => match &re { my_node_runtime::pallet_sidechain::Event::ProposedSidechainBlock( sender, payload, ) => { info!("[+] Received ProposedSidechainBlock event"); debug!(" From: {:?}", sender); debug!(" Payload: {:?}", hex::encode(payload)); }, _ => { trace!("Ignoring unsupported pallet_sidechain event"); }, }, _ => { trace!("Ignoring event {:?}", evr); }, } } } /// Subscribe to the node API finalized heads stream and trigger a parent chain sync /// upon receiving a new header. fn subscribe_to_parentchain_new_headers<E: EnclaveBase + Sidechain>( parentchain_handler: Arc<ParentchainHandler<ParentchainApi, E>>, mut last_synced_header: Header, ) -> Result<(), Error> { let (sender, receiver) = channel();
//TODO: this should be implemented by parentchain_handler directly, and not via // exposed parentchain_api. Blocked by https://github.com/scs/substrate-api-client/issues/267. parentchain_handler .parentchain_api()
random_line_split
main.rs
ed_header, ) .unwrap(); } // ------------------------------------------------------------------------ // start parentchain syncing loop (subscribe to header updates) thread::Builder::new() .name("parentchain_sync_loop".to_owned()) .spawn(move || { if let Err(e) = subscribe_to_parentchain_new_headers(parentchain_handler, last_synced_header) { error!("Parentchain block syncing terminated with a failure: {:?}", e); } println!("[!] Parentchain block syncing has terminated"); }) .unwrap(); } // ------------------------------------------------------------------------ if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { spawn_worker_for_shard_polling(shard, node_api.clone(), initialization_handler); } // ------------------------------------------------------------------------ // subscribe to events and react on firing println!("*** Subscribing to events"); let (sender, receiver) = channel(); let sender2 = sender.clone(); let _eventsubscriber = thread::Builder::new() .name("eventsubscriber".to_owned()) .spawn(move || { node_api.subscribe_events(sender2).unwrap(); }) .unwrap(); println!("[+] Subscribed to events. waiting..."); let timeout = Duration::from_millis(10); loop { if let Ok(msg) = receiver.recv_timeout(timeout) { if let Ok(events) = parse_events(msg.clone()) { print_events(events, sender.clone()) } } } } /// Start polling loop to wait until we have a worker for a shard registered on /// the parentchain (TEEREX WorkerForShard). This is the pre-requisite to be /// considered initialized and ready for the next worker to start (in sidechain mode only). /// considered initialized and ready for the next worker to start. fn spawn_worker_for_shard_polling<InitializationHandler>( shard: &ShardIdentifier, node_api: ParentchainApi, initialization_handler: Arc<InitializationHandler>, ) where InitializationHandler: TrackInitialization + Sync + Send + 'static, { let shard_for_initialized = *shard; thread::spawn(move || { const POLL_INTERVAL_SECS: u64 = 2; loop { info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); if let Ok(Some(_)) = node_api.worker_for_shard(&shard_for_initialized, None) { // Set that the service is initialized. initialization_handler.worker_for_shard_registered(); println!("[+] Found `WorkerForShard` on parentchain state"); break } thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); } }); } type Events = Vec<frame_system::EventRecord<Event, Hash>>; fn parse_events(event: String) -> Result<Events, String> { let _unhex = Vec::from_hex(event).map_err(|_| "Decoding Events Failed".to_string())?; let mut _er_enc = _unhex.as_slice(); Events::decode(&mut _er_enc).map_err(|_| "Decoding Events Failed".to_string()) } fn print_events(events: Events, _sender: Sender<String>) { for evr in &events { debug!("Decoded: phase = {:?}, event = {:?}", evr.phase, evr.event); match &evr.event { Event::Balances(be) => { info!("[+] Received balances event"); debug!("{:?}", be); match &be { pallet_balances::Event::Transfer { from: transactor, to: dest, amount: value, } => { debug!(" Transactor: {:?}", transactor.to_ss58check()); debug!(" Destination: {:?}", dest.to_ss58check()); debug!(" Value: {:?}", value); }, _ => { trace!("Ignoring unsupported balances event"); }, } }, Event::Teerex(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teerex::Event::AddedEnclave(sender, worker_url) => { println!("[+] Received AddedEnclave event"); println!(" Sender (Worker): {:?}", sender); println!(" Registered URL: {:?}", str::from_utf8(worker_url).unwrap()); }, my_node_runtime::pallet_teerex::Event::Forwarded(shard) => { println!( "[+] Received trusted call for shard {}", shard.encode().to_base58() ); }, my_node_runtime::pallet_teerex::Event::ProcessedParentchainBlock( sender, block_hash, merkle_root, block_number, ) => { info!("[+] Received ProcessedParentchainBlock event"); debug!(" From: {:?}", sender); debug!(" Block Hash: {:?}", hex::encode(block_hash)); debug!(" Merkle Root: {:?}", hex::encode(merkle_root)); debug!(" Block Number: {:?}", block_number); }, my_node_runtime::pallet_teerex::Event::ShieldFunds(incognito_account) => { info!("[+] Received ShieldFunds event"); debug!(" For: {:?}", incognito_account); }, my_node_runtime::pallet_teerex::Event::UnshieldedFunds(incognito_account) => { info!("[+] Received UnshieldedFunds event"); debug!(" For: {:?}", incognito_account); }, _ => { trace!("Ignoring unsupported pallet_teerex event"); }, } }, #[cfg(feature = "teeracle")] Event::Teeracle(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teeracle::Event::ExchangeRateUpdated( source, currency, new_value, ) => { println!("[+] Received ExchangeRateUpdated event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); println!(" Exchange rate: {:?}", new_value); }, my_node_runtime::pallet_teeracle::Event::ExchangeRateDeleted( source, currency, ) => { println!("[+] Received ExchangeRateDeleted event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); }, my_node_runtime::pallet_teeracle::Event::AddedToWhitelist( source, mrenclave, ) => { println!("[+] Received AddedToWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, my_node_runtime::pallet_teeracle::Event::RemovedFromWhitelist( source, mrenclave, ) => { println!("[+] Received RemovedFromWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, _ => { trace!("Ignoring unsupported pallet_teeracle event"); }, } }, #[cfg(feature = "sidechain")] Event::Sidechain(re) => match &re { my_node_runtime::pallet_sidechain::Event::ProposedSidechainBlock( sender, payload, ) => { info!("[+] Received ProposedSidechainBlock event"); debug!(" From: {:?}", sender); debug!(" Payload: {:?}", hex::encode(payload)); }, _ => { trace!("Ignoring unsupported pallet_sidechain event"); }, }, _ => { trace!("Ignoring event {:?}", evr); }, } } } /// Subscribe to the node API finalized heads stream and trigger a parent chain sync /// upon receiving a new header. fn subscribe_to_parentchain_new_headers<E: EnclaveBase + Sidechain>( parentchain_handler: Arc<ParentchainHandler<ParentchainApi, E>>, mut last_synced_header: Header, ) -> Result<(), Error> { let (sender, receiver) = channel(); //TODO: this should be implemented by parentchain_handler directly, and not via // exposed parentchain_api. Blocked by https://github.com/scs/substrate-api-client/issues/267. parentchain_handler .parentchain_api() .subscribe_finalized_heads(sender) .map_err(Error::ApiClient)?; loop { let new_header: Header = match receiver.recv() { Ok(header_str) => serde_json::from_str(&header_str).map_err(Error::Serialization), Err(e) => Err(Error::ApiSubscriptionDisconnected(e)), }?; println!( "[+] Received finalized header update ({}), syncing parent chain...", new_header.number ); last_synced_header = parentchain_handler.sync_parentchain(last_synced_header)?; } } /// Get the public signing key of the TEE. fn enclave_account<E: EnclaveBase>(enclave_api: &E) -> AccountId32 { let tee_public = enclave_api.get_ecc_signing_pubkey().unwrap(); trace!("[+] Got ed25519 account of TEE = {}", tee_public.to_ss58check()); AccountId32::from(*tee_public.as_array_ref()) } /// Checks if we are the first validateer to register on the parentchain. fn
we_are_primary_validateer
identifier_name
main.rs
else if matches.is_present("shielding-key") { setup::generate_shielding_key_file(enclave.as_ref()); } else if matches.is_present("signing-key") { setup::generate_signing_key_file(enclave.as_ref()); } else if matches.is_present("dump-ra") { info!("*** Perform RA and dump cert to disk"); enclave.dump_ra_to_disk().unwrap(); } else if matches.is_present("mrenclave") { println!("{}", enclave.get_mrenclave().unwrap().encode().to_base58()); } else if let Some(sub_matches) = matches.subcommand_matches("init-shard") { setup::init_shard( enclave.as_ref(), &extract_shard(&sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref()), ); } else if let Some(sub_matches) = matches.subcommand_matches("test") { if sub_matches.is_present("provisioning-server") { println!("*** Running Enclave MU-RA TLS server\n"); enclave_run_state_provisioning_server( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url(), sub_matches.is_present("skip-ra"), ); println!("[+] Done!"); } else if sub_matches.is_present("provisioning-client") { println!("*** Running Enclave MU-RA TLS client\n"); let shard = extract_shard( &sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref(), ); enclave_request_state_provisioning( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url_external(), &shard, sub_matches.is_present("skip-ra"), ) .unwrap(); println!("[+] Done!"); } else { tests::run_enclave_tests(sub_matches); } } else { println!("For options: use --help"); } } /// FIXME: needs some discussion (restructuring?) #[allow(clippy::too_many_arguments)] fn start_worker<E, T, D, InitializationHandler, WorkerModeProvider>( config: Config, shard: &ShardIdentifier, enclave: Arc<E>, sidechain_storage: Arc<D>, node_api: ParentchainApi, tokio_handle_getter: Arc<T>, initialization_handler: Arc<InitializationHandler>, ) where T: GetTokioHandle, E: EnclaveBase + DirectRequest + Sidechain + RemoteAttestation + TlsRemoteAttestation + TeeracleApi + Clone, D: BlockPruner + FetchBlocks<SignedSidechainBlock> + Sync + Send + 'static, InitializationHandler: TrackInitialization + IsInitialized + Sync + Send + 'static, WorkerModeProvider: ProvideWorkerMode, { println!("Integritee Worker v{}", VERSION); info!("starting worker on shard {}", shard.encode().to_base58()); // ------------------------------------------------------------------------ // check for required files check_files(); // ------------------------------------------------------------------------ // initialize the enclave let mrenclave = enclave.get_mrenclave().unwrap(); println!("MRENCLAVE={}", mrenclave.to_base58()); println!("MRENCLAVE in hex {:?}", hex::encode(mrenclave)); // ------------------------------------------------------------------------ // let new workers call us for key provisioning println!("MU-RA server listening on {}", config.mu_ra_url()); let run_config = config.run_config.clone().expect("Run config missing"); let skip_ra = run_config.skip_ra; let is_development_mode = run_config.dev; let ra_url = config.mu_ra_url(); let enclave_api_key_prov = enclave.clone(); thread::spawn(move || { enclave_run_state_provisioning_server( enclave_api_key_prov.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &ra_url, skip_ra, ); info!("State provisioning server stopped."); }); let tokio_handle = tokio_handle_getter.get_handle(); #[cfg(feature = "teeracle")] let teeracle_tokio_handle = tokio_handle.clone(); // ------------------------------------------------------------------------ // Get the public key of our TEE. let tee_accountid = enclave_account(enclave.as_ref()); println!("Enclave account {:} ", &tee_accountid.to_ss58check()); // ------------------------------------------------------------------------ // Start `is_initialized` server. let untrusted_http_server_port = config .try_parse_untrusted_http_server_port() .expect("untrusted http server port to be a valid port number"); let initialization_handler_clone = initialization_handler.clone(); tokio_handle.spawn(async move { if let Err(e) = start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) .await { error!("Unexpected error in `is_initialized` server: {:?}", e); } }); // ------------------------------------------------------------------------ // Start prometheus metrics server. if config.enable_metrics_server { let enclave_wallet = Arc::new(EnclaveAccountInfoProvider::new(node_api.clone(), tee_accountid.clone())); let metrics_handler = Arc::new(MetricsHandler::new(enclave_wallet)); let metrics_server_port = config .try_parse_metrics_server_port() .expect("metrics server port to be a valid port number"); tokio_handle.spawn(async move { if let Err(e) = start_metrics_server(metrics_handler, metrics_server_port).await { error!("Unexpected error in Prometheus metrics server: {:?}", e); } }); } // ------------------------------------------------------------------------ // Start trusted worker rpc server if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker { let direct_invocation_server_addr = config.trusted_worker_url_internal(); let enclave_for_direct_invocation = enclave.clone(); thread::spawn(move || { println!( "[+] Trusted RPC direct invocation server listening on {}", direct_invocation_server_addr ); enclave_for_direct_invocation .init_direct_invocation_server(direct_invocation_server_addr) .unwrap(); println!("[+] RPC direct invocation server shut down"); }); } // ------------------------------------------------------------------------ // Start untrusted worker rpc server. // i.e move sidechain block importing to trusted worker. if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { sidechain_start_untrusted_rpc_server( &config, enclave.clone(), sidechain_storage.clone(), tokio_handle, ); } // ------------------------------------------------------------------------ // Init parentchain specific stuff. Needed for parentchain communication. let parentchain_handler = Arc::new(ParentchainHandler::new(node_api.clone(), enclave.clone())); let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); let nonce = node_api.get_nonce_of(&tee_accountid).unwrap(); info!("Enclave nonce = {:?}", nonce); enclave .set_nonce(nonce) .expect("Could not set nonce of enclave. Returning here..."); let metadata = node_api.metadata.clone(); let runtime_spec_version = node_api.runtime_version.spec_version; let runtime_transaction_version = node_api.runtime_version.transaction_version; enclave .set_node_metadata( NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), ) .expect("Could not set the node metadata in the enclave"); // ------------------------------------------------------------------------ // Perform a remote attestation and get an unchecked extrinsic back. let trusted_url = config.trusted_worker_url_external(); if skip_ra { println!( "[!] skipping remote attestation. Registering enclave without attestation report." ); } else { println!("[!] creating remote attestation report and create enclave register extrinsic."); }; let uxt = enclave.perform_ra(&trusted_url, skip_ra).unwrap(); let mut xthex = hex::encode(uxt); xthex.insert_str(0, "0x"); // Account funds if let Err(x) = setup_account_funding(&node_api, &tee_accountid, xthex.clone(), is_development_mode) { error!("Starting worker failed: {:?}", x); // Return without registering the enclave. This will fail and the transaction will be banned for 30min. return } println!("[>] Register the enclave (send the extrinsic)"); let register_enclave_xt_hash = node_api.send_extrinsic(xthex, XtStatus::Finalized).unwrap(); println!("[<] Extrinsic got finalized. Hash: {:?}\n", register_enclave_xt_hash); let register_enclave_xt_header = node_api.get_header(register_enclave_xt_hash).unwrap().unwrap(); let we_are_primary_validateer = we_are_primary_validateer(&node_api, &register_enclave_xt_header).unwrap(); if we_are_primary_validateer { println!("[+] We are the primary validateer"); } else { println!("[+] We are NOT the primary validateer"); } initialization_handler.registered_on_parentchain(); // ------------------------------------------------------------------------ // initialize teeracle interval #[cfg(feature = "teeracle")] if WorkerModeProvider::worker_mode() == WorkerMode::Teeracle
{ start_interval_market_update( &node_api, run_config.teeracle_update_interval, enclave.as_ref(), &teeracle_tokio_handle, ); }
conditional_block
products.page.ts
allSelectedProducts = []// contain all products are selected idAccount: Number newProducts:Products[]= new Array() isPageForUpdateFollowList: boolean categoryProduct: number; nameProduct: string; currentItems =new Array() map= new Map() newListId = [] addProductsToList:boolean typeListId: number; typeListName: string; selectedItemsLength: Number selectedArrayId: Number[] =[] allChecked: boolean = false isFromBuyList: boolean oneTimes=[] listId: number oneTime: ProductToList[]=[] constructor(private _location: Location, private productService: ProductsService, private followUpService: FollowUpService, private listService: ListsService, private router: Router, private route: ActivatedRoute, private alertController: AlertController) { this.route.params.subscribe(params => { this.isFromBuyList = JSON.parse(params['isFromBuyList']) this.listId = +params['listId'] console.log(this.isFromBuyList) this.isPageForUpdateFollowList = params['isForFollowList']; this.addProductsToList=params['addProducts'] this.typeListId = +params['typeListId']
} }); } ngOnInit() { this.idAccount =+ localStorage.getItem('accountId') console.log(this.idAccount) this.getAllProducts() if(this.isPageForUpdateFollowList) this.getSortedFolowList() if(this.addProductsToList) this.GetAllProductsByTypeId() if(this.isFromBuyList) this.selectedsArray = this.oneTimes } // get all products of account from DB getAllProducts() { this.productService.getAllProducts(this.idAccount).subscribe(res => { this.o = Array.of(res) this.selectedItemsLength = this.selectedsArray.length Object.keys(res).forEach(category => {this.arrKind.push(category)//arrKind if(this.selectedItemsLength == 0) this.selectedsArray.push([]) }); this.selectedItemsLength = this.selectedsArray.length if(this.allSelectedProducts.length>0) this.setSelectedArray(this.allSelectedProducts.map(p=>p)) Object.values(res).forEach(element => {this.arrProducts.push(element)});// arrProducts console.log(this.arrProducts) // console.log(" nnncj"+this.arrProducts.map(p=>p.ProductId)) }); } // if enter from followUp page, bring follow products from DB getSortedFolowList() { this.followUpService.getSortedFolowList(this.idAccount).subscribe(res=> { this.selectedsArray=res; console.log(this.selectedsArray)}) } // if enter from createList page, bring already selected products from create page getProductsFromCreateList() { this.setSelectedArray(this.allSelectedProducts.map(p=>p)) } GetAllProductsByTypeId() { this.listService.GetAllProductsByTypeId(this.typeListId).subscribe(res=> { this.setSelectedArray(res.map(p=>p.ProductId)) }) } setSelectedArray(res:number[]) { this.productService.getProductsByIdProduct(res).subscribe(products=>{ this.selectedsArray = [] console.log(products) console.log(Object.values(products)) for (let i = 0; i < this.arrKind.length; i++) { if(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) this.selectedsArray.push(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) else this.selectedsArray.push([]) } console.log(this.selectedsArray) }) } selectText(index) { let text=''; if(this.selectedsArray[index].length>0) text=Object(this.selectedsArray[index]).map(s=>s.ProductName).toString() return text; } // filter func for searchbar getItems(ev) { let val = ev.target.value; if (!val || !val.trim()) { this.currentItems = []; return; } this.query(val) } query(val?: any) { this.currentItems = [] if (!val) { return this.arrProducts; } return this.arrProducts.forEach(arr => { arr.filter((item) => { for (let key in val) { let field = item.ProductName[key]; if (!(typeof field == 'string' && field.indexOf(val[key]) >= 0)) { return item.ProductName } else if (!(field == val[key])) { return item.ProductName} } this.currentItems.push(item) return null; }); }); } // add product for this account async presentAlertPromptNewProduct() { var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'הכנס מוצר', inputs: [{ type:"textarea", placeholder: 'מוצר חדש', name:'textarea', }], buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'המשך', handler: (alertData) => { this.nameProduct = alertData.textarea this.presentAlertPromptCategory() } } ] }); await alert.present(); } async presentAlertPromptCategory() { let alertInputs=[]; this.arrKind.forEach(element => { alertInputs.push({name:element, type:'radio', value:element, label: element}) }); var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'בחר קטגוריה', inputs:alertInputs, buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'הוסף', handler: (alertData) => { this.categoryProduct = alertData; let p=new Products(); p.ProductName=this.nameProduct; console.log(this.o[0][this.categoryProduct]) p.CategoryId=this.o[0][this.categoryProduct][0].CategoryId this.newProducts.push(p) } } ] }); console.log(alert.inputs.length) await alert.present(); } addForSaveList(product: Products)// מעדכן את המוצר שנבחר מתיבת החיפוש - מוסיף או מסיר אותו אם לא קיים { console.log(this.selectedsArray) for(let i=0; i<this.selectedsArray.length; i++) // תעבור על כל המוצרים שנבחרו (ממוינים לפי קטגוריות ואם יש קטגורין=ות שעוד אין להם מוצרים שנבחרו אז הם מערכים ריקים) { var j =0; console.log(this.selectedsArray[i]) if(this.selectedsArray[i].length > 0)// אם נבחרו מוצרים מהקטגוריה הנוכחית { if(this.selectedsArray[i][0].CategoryName == product.CategoryName)//תבדוק אם המוצר שבחר מאותו קטגוריה { for(j=0; j<this.selectedsArray[i].length && this.selectedsArray[i][j].ProductId != product.ProductId; j++);//אם הם מאותו קטגוריה תבדוק אולי המוצר כבר קיים if(j === this.selectedsArray[i].length)// אם המוצר לא קיים אז תוסיף אותו { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } else// אם קיים כבר אז תמחק אותו { this.selectedsArray[i].splice(j,1) console.log(this.selectedsArray) this.showAlert("המוצר הוסר") break } } } else // אם לא נבחרו מוצרים מהקטגוריה של המוצר עוד if(this.arrProducts[i][0].CategoryName == product.CategoryName) { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } } this.currentItems = [] } async showAlert(message: string) { const alert = await this.alertController.create({ cssClass: 'my-custom-class', message: message, buttons: ['אישור'] }); await alert.present(); } // update follow up list saveList() { // this.allSelectedProducts=[] לבדוק עם מוצרים מהרשימה אם מתווסף או מה for (let i = 0; i < this.selectedsArray.length; i++)// over all Categories for checking which Categories are selected items { if (this.selectedsArray[i] != null)// if seleced items in this Categories so { Object(this.selectedsArray[i]).forEach(pr=> { if(!this.allSelectedProducts.find(item=> item == pr.ProductId))
this.typeListName = params['typeListName'] if(params['productsInList'] != undefined) { this.allSelectedProducts =(params['productsInList'].split(',')).map(x=>+x);
random_line_split
products.page.ts
allSelectedProducts = []// contain all products are selected idAccount: Number newProducts:Products[]= new Array() isPageForUpdateFollowList: boolean categoryProduct: number; nameProduct: string; currentItems =new Array() map= new Map() newListId = [] addProductsToList:boolean typeListId: number; typeListName: string; selectedItemsLength: Number selectedArrayId: Number[] =[] allChecked: boolean = false isFromBuyList: boolean oneTimes=[] listId: number oneTime: ProductToList[]=[] constructor(private _location: Location, private productService: ProductsService, private followUpService: FollowUpService, private listService: ListsService, private router: Router, private route: ActivatedRoute, private alertController: AlertController) { this.route.params.subscribe(params => { this.isFromBuyList = JSON.parse(params['isFromBuyList']) this.listId = +params['listId'] console.log(this.isFromBuyList) this.isPageForUpdateFollowList = params['isForFollowList']; this.addProductsToList=params['addProducts'] this.typeListId = +params['typeListId'] this.typeListName = params['typeListName'] if(params['productsInList'] != undefined) { this.allSelectedProducts =(params['productsInList'].split(',')).map(x=>+x); } }); } ngOnInit() { this.idAccount =+ localStorage.getItem('accountId') console.log(this.idAccount) this.getAllProducts() if(this.isPageForUpdateFollowList) this.getSortedFolowList() if(this.addProductsToList) this.GetAllProductsByTypeId() if(this.isFromBuyList) this.selectedsArray = this.oneTimes } // get all products of account from DB getAllProducts() { this.productService.getAllProducts(this.idAccount).subscribe(res => { this.o = Array.of(res) this.selectedItemsLength = this.selectedsArray.length Object.keys(res).forEach(category => {this.arrKind.push(category)//arrKind if(this.selectedItemsLength == 0) this.selectedsArray.push([]) }); this.selectedItemsLength = this.selectedsArray.length if(this.allSelectedProducts.length>0) this.setSelectedArray(this.allSelectedProducts.map(p=>p)) Object.values(res).forEach(element => {this.arrProducts.push(element)});// arrProducts console.log(this.arrProducts) // console.log(" nnncj"+this.arrProducts.map(p=>p.ProductId)) }); } // if enter from followUp page, bring follow products from DB getSortedFolowList() { this.followUpService.getSortedFolowList(this.idAccount).subscribe(res=> { this.selectedsArray=res; console.log(this.selectedsArray)}) } // if enter from createList page, bring already selected products from create page getProductsFromCreateList() { this.setSelectedArray(this.allSelectedProducts.map(p=>p)) } GetAllProductsByTypeId() { this.listService.GetAllProductsByTypeId(this.typeListId).subscribe(res=> { this.setSelectedArray(res.map(p=>p.ProductId)) }) } setS
:number[]) { this.productService.getProductsByIdProduct(res).subscribe(products=>{ this.selectedsArray = [] console.log(products) console.log(Object.values(products)) for (let i = 0; i < this.arrKind.length; i++) { if(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) this.selectedsArray.push(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) else this.selectedsArray.push([]) } console.log(this.selectedsArray) }) } selectText(index) { let text=''; if(this.selectedsArray[index].length>0) text=Object(this.selectedsArray[index]).map(s=>s.ProductName).toString() return text; } // filter func for searchbar getItems(ev) { let val = ev.target.value; if (!val || !val.trim()) { this.currentItems = []; return; } this.query(val) } query(val?: any) { this.currentItems = [] if (!val) { return this.arrProducts; } return this.arrProducts.forEach(arr => { arr.filter((item) => { for (let key in val) { let field = item.ProductName[key]; if (!(typeof field == 'string' && field.indexOf(val[key]) >= 0)) { return item.ProductName } else if (!(field == val[key])) { return item.ProductName} } this.currentItems.push(item) return null; }); }); } // add product for this account async presentAlertPromptNewProduct() { var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'הכנס מוצר', inputs: [{ type:"textarea", placeholder: 'מוצר חדש', name:'textarea', }], buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'המשך', handler: (alertData) => { this.nameProduct = alertData.textarea this.presentAlertPromptCategory() } } ] }); await alert.present(); } async presentAlertPromptCategory() { let alertInputs=[]; this.arrKind.forEach(element => { alertInputs.push({name:element, type:'radio', value:element, label: element}) }); var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'בחר קטגוריה', inputs:alertInputs, buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'הוסף', handler: (alertData) => { this.categoryProduct = alertData; let p=new Products(); p.ProductName=this.nameProduct; console.log(this.o[0][this.categoryProduct]) p.CategoryId=this.o[0][this.categoryProduct][0].CategoryId this.newProducts.push(p) } } ] }); console.log(alert.inputs.length) await alert.present(); } addForSaveList(product: Products)// מעדכן את המוצר שנבחר מתיבת החיפוש - מוסיף או מסיר אותו אם לא קיים { console.log(this.selectedsArray) for(let i=0; i<this.selectedsArray.length; i++) // תעבור על כל המוצרים שנבחרו (ממוינים לפי קטגוריות ואם יש קטגורין=ות שעוד אין להם מוצרים שנבחרו אז הם מערכים ריקים) { var j =0; console.log(this.selectedsArray[i]) if(this.selectedsArray[i].length > 0)// אם נבחרו מוצרים מהקטגוריה הנוכחית { if(this.selectedsArray[i][0].CategoryName == product.CategoryName)//תבדוק אם המוצר שבחר מאותו קטגוריה { for(j=0; j<this.selectedsArray[i].length && this.selectedsArray[i][j].ProductId != product.ProductId; j++);//אם הם מאותו קטגוריה תבדוק אולי המוצר כבר קיים if(j === this.selectedsArray[i].length)// אם המוצר לא קיים אז תוסיף אותו { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } else// אם קיים כבר אז תמחק אותו { this.selectedsArray[i].splice(j,1) console.log(this.selectedsArray) this.showAlert("המוצר הוסר") break } } } else // אם לא נבחרו מוצרים מהקטגוריה של המוצר עוד if(this.arrProducts[i][0].CategoryName == product.CategoryName) { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } } this.currentItems = [] } async showAlert(message: string) { const alert = await this.alertController.create({ cssClass: 'my-custom-class', message: message, buttons: ['אישור'] }); await alert.present(); } // update follow up list saveList() { // this.allSelectedProducts=[] לבדוק עם מוצרים מהרשימה אם מתווסף או מה for (let i = 0; i < this.selectedsArray.length; i++)// over all Categories for checking which Categories are selected items { if (this.selectedsArray[i] != null)// if seleced items in this Categories so { Object(this.selectedsArray[i]).forEach(pr=> { if(!this.allSelectedProducts.find(item=> item == pr.ProductId
electedArray(res
identifier_name
products.page.ts
allSelectedProducts = []// contain all products are selected idAccount: Number newProducts:Products[]= new Array() isPageForUpdateFollowList: boolean categoryProduct: number; nameProduct: string; currentItems =new Array() map= new Map() newListId = [] addProductsToList:boolean typeListId: number; typeListName: string; selectedItemsLength: Number selectedArrayId: Number[] =[] allChecked: boolean = false isFromBuyList: boolean oneTimes=[] listId: number oneTime: ProductToList[]=[] constructor(private _location: Location, private productService: ProductsService, private followUpService: FollowUpService, private listService: ListsService, private router: Router, private route: ActivatedRoute, private alertController: AlertController) { this.route.params.subscribe(params => { this.isFromBuyList = JSON.parse(params['isFromBuyList']) this.listId = +params['listId'] console.log(this.isFromBuyList) this.isPageForUpdateFollowList = params['isForFollowList']; this.addProductsToList=params['addProducts'] this.typeListId = +params['typeListId'] this.typeListName = params['typeListName'] if(params['productsInList'] != undefined) { this.allSelectedProducts =(params['productsInList'].split(',')).map(x=>+x); } }); } ngOnInit() { this.idAccount =+ localStorage.getItem('accountId') console.log(this.idAccount) this.getAllProducts() if(this.isPageForUpdateFollowList) this.getSortedFolowList() if(this.addProductsToList) this.GetAllProductsByTypeId() if(this.isFromBuyList) this.selectedsArray = this.oneTimes } // get all products of account from DB getAllProducts() { this.productService.getAllProducts(this.idAccount).subscribe(res => { this.o = Array.of(res) this.selectedItemsLength = this.selectedsArray.length Object.keys(res).forEach(category => {this.arrKind.push(category)//arrKind if(this.selectedItemsLength == 0) this.selectedsArray.push([]) }); this.selectedItemsLength = this.selectedsArray.length if(this.allSelectedProducts.length>0) this.setSelectedArray(this.allSelectedProducts.map(p=>p)) Object.values(res).forEach(element => {this.arrProducts.push(element)});// arrProducts console.log(this.arrProducts) // console.log(" nnncj"+this.arrProducts.map(p=>p.ProductId)) }); } // if enter from followUp page, bring follow products from DB getSortedFolowList() { this.followUpService.getSortedFolowList(this.idAccount).subscribe(res=> { this.selectedsArray=res; console.log(this.selectedsArray)}) } // if enter from createList page, bring already selected products from create page getProductsFromCreateList() { this.setSelectedArray(this.allSelectedProducts.map(p=>p)) } GetAllProductsByTypeId() { this.listService.GetAllProductsByTypeId(this.typeListId).subscribe(res=> { this.setSelectedArray(res.map(p=>p.ProductId)) }) } setSelectedArray(res:number[]) { this.productService.getProductsByIdProduct(res).subscribe(products=>{ this.selectedsArray = [] console.log(products) console.log(Object.values(products)) for (let i = 0; i < this.arrKind.length; i++) { if(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) this.selectedsArray.push(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) else this.selectedsArray.push([]) } console.log(this.selectedsArray) }) } selectText(index) { let text=''; if(this.selectedsArray[index].length>0) text=Object(this.selectedsArray[index]).map(s=>s.ProductName).toString() return text; } // filter func for searchbar getItems(ev) { let val = ev.target.value; if (!val || !val.trim()) { this.currentItems = []; return; } this.query(val) } query(val?: any) { this.currentItems = [] if (!val) { return this.arrProducts; } return this.arrProducts.forEach(arr => { arr.filter((item) => { for (let key in val) { let field = item.ProductName[key]; if (!(typeof field == 'string' && field.indexOf(val[key]) >= 0)) { return item.ProductName } else if (!(field == val[key])) { return item.ProductName} } this.currentItems.push(item) return null; }); }); } // add product for this account async presentAlertPromptNewProduct() { var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'הכנס מוצר', inputs: [{ type:"textarea", placeholder: 'מוצר חדש', name:'textarea', }], buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'המשך', handler: (alertData) => { this.nameProduct = alertData.textarea this.presentAlertPromptCategory() } } ] }); await alert.present(); } async presentAlertPromptCategory() { let alertInputs=[]; this.arrKind.forEach(element => { alertInputs.push({name:element, type:'radio', value:element, label: element}) }); var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'בחר קטגוריה', inputs:alertInputs, buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'הוסף', handler: (alertData) => { this.categoryProduct = alertData; let p=new Products(); p.ProductName=this.nameProduct; console.log(this.o[0][this.categoryProduct]) p.CategoryId=this.o[0][this.categoryProduct][0].CategoryId this.newProducts.push(p) } } ] }); console.log(alert.inputs.length) await alert.present(); } addForSaveList(product: Products)// מעדכן את המוצר שנבחר מתיבת החיפוש - מוסיף או מסיר אותו אם לא קיים { console.log(this.selectedsArray) for(let i=0; i<this.selectedsArray.length; i++) // תעבור על כל המוצרים שנבחרו (ממוינים לפי קטגוריות ואם יש קטגורין=ות שעוד אין להם מוצרים שנבחרו אז הם מערכים ריקים) { var j =0; console.log(this.selectedsArray[i]) if(this.selectedsArray[i].length > 0)// אם נבחרו מוצרים מהקטגוריה הנוכחית { if(this.selectedsArray[i][0].CategoryName == product.CategoryName)//תבדוק אם המוצר שבחר מאותו קטגוריה { for(j=0; j<this.selectedsArray[i].length && this.selectedsArray[i][j].ProductId != product.ProductId; j++);//אם הם מאותו קטגוריה תבדוק אולי המוצר כבר קיים if(j === this.selectedsArray[i].length)// אם המוצר לא קיים אז תוסיף אותו { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } else// אם קיים כבר אז תמחק אותו { this.selectedsArray[i].splice(j,1) console.log(this.selectedsArray) this.showAlert("המוצר הוסר") break } } } else // אם לא נבחרו מוצרים מהקטגוריה של המוצר עוד if(this.arrProducts[i][0].CategoryName == product.CategoryName) { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } } this.currentItems = [] } async showAlert(message: string) { const alert = await this.alertController.create({ cssClass: 'my-custom-class', message: message, buttons: ['אישור'] }); await alert.present(); } // update
this.selectedsArray.length; i++)// over all Categories for checking which Categories are selected items { if (this.selectedsArray[i] != null)// if seleced items in this Categories so { Object(this.selectedsArray[i]).forEach(pr=> { if(!this.allSelectedProducts.find(item=> item == pr.ProductId))
follow up list saveList() { // this.allSelectedProducts=[] לבדוק עם מוצרים מהרשימה אם מתווסף או מה for (let i = 0; i <
conditional_block
products.page.ts
allSelectedProducts = []// contain all products are selected idAccount: Number newProducts:Products[]= new Array() isPageForUpdateFollowList: boolean categoryProduct: number; nameProduct: string; currentItems =new Array() map= new Map() newListId = [] addProductsToList:boolean typeListId: number; typeListName: string; selectedItemsLength: Number selectedArrayId: Number[] =[] allChecked: boolean = false isFromBuyList: boolean oneTimes=[] listId: number oneTime: ProductToList[]=[] constructor(private _location: Location, private productService: ProductsService, private followUpService: FollowUpService, private listService: ListsService, private router: Router, private route: ActivatedRoute, private alertController: AlertController) { this.route.params.subscribe(params => { this.isFromBuyList = JSON.parse(params['isFromBuyList']) this.listId = +params['listId'] console.log(this.isFromBuyList) this.isPageForUpdateFollowList = params['isForFollowList']; this.addProductsToList=params['addProducts'] this.typeListId = +params['typeListId'] this.typeListName = params['typeListName'] if(params['productsInList'] != undefined) { this.allSelectedProducts =(params['productsInList'].split(',')).map(x=>+x); } }); } ngOnInit() { this.idAccount =+ localStorage.getItem('accountId') console.log(this.idAccount) this.getAllProducts() if(this.isPageForUpdateFollowList) this.getSortedFolowList() if(this.addProductsToList) this.GetAllProductsByTypeId() if(this.isFromBuyList) this.selectedsArray = this.oneTimes } // get all products of account from DB getAllProducts() { this.productService.getAllProducts(this.idAccount).subscribe(res => { this.o = Array.of(res) this.selectedItemsLength = this.selectedsArray.length Object.keys(res).forEach(category => {this.arrKind.push(category)//arrKind if(this.selectedItemsLength == 0) this.selectedsArray.push([]) }); this.selectedItemsLength = this.selectedsArray.length if(this.allSelectedProducts.length>0) this.setSelectedArray(this.allSelectedProducts.map(p=>p)) Object.values(res).forEach(element => {this.arrProducts.push(element)});// arrProducts console.log(this.arrProducts) // console.log(" nnncj"+this.arrProducts.map(p=>p.ProductId)) }); } // if enter from followUp page, bring follow products from DB getSortedFolowList() { this.followUpService.getSortedFolowList(this.idAccount).subscribe(res=> { this.selectedsArray=res; console.log(this.selectedsArray)}) } // if enter from createList page, bring already selected products from create page getProductsFromCreateList() {
GetAllProductsByTypeId() { this.listService.GetAllProductsByTypeId(this.typeListId).subscribe(res=> { this.setSelectedArray(res.map(p=>p.ProductId)) }) } setSelectedArray(res:number[]) { this.productService.getProductsByIdProduct(res).subscribe(products=>{ this.selectedsArray = [] console.log(products) console.log(Object.values(products)) for (let i = 0; i < this.arrKind.length; i++) { if(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) this.selectedsArray.push(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) else this.selectedsArray.push([]) } console.log(this.selectedsArray) }) } selectText(index) { let text=''; if(this.selectedsArray[index].length>0) text=Object(this.selectedsArray[index]).map(s=>s.ProductName).toString() return text; } // filter func for searchbar getItems(ev) { let val = ev.target.value; if (!val || !val.trim()) { this.currentItems = []; return; } this.query(val) } query(val?: any) { this.currentItems = [] if (!val) { return this.arrProducts; } return this.arrProducts.forEach(arr => { arr.filter((item) => { for (let key in val) { let field = item.ProductName[key]; if (!(typeof field == 'string' && field.indexOf(val[key]) >= 0)) { return item.ProductName } else if (!(field == val[key])) { return item.ProductName} } this.currentItems.push(item) return null; }); }); } // add product for this account async presentAlertPromptNewProduct() { var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'הכנס מוצר', inputs: [{ type:"textarea", placeholder: 'מוצר חדש', name:'textarea', }], buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'המשך', handler: (alertData) => { this.nameProduct = alertData.textarea this.presentAlertPromptCategory() } } ] }); await alert.present(); } async presentAlertPromptCategory() { let alertInputs=[]; this.arrKind.forEach(element => { alertInputs.push({name:element, type:'radio', value:element, label: element}) }); var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'בחר קטגוריה', inputs:alertInputs, buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'הוסף', handler: (alertData) => { this.categoryProduct = alertData; let p=new Products(); p.ProductName=this.nameProduct; console.log(this.o[0][this.categoryProduct]) p.CategoryId=this.o[0][this.categoryProduct][0].CategoryId this.newProducts.push(p) } } ] }); console.log(alert.inputs.length) await alert.present(); } addForSaveList(product: Products)// מעדכן את המוצר שנבחר מתיבת החיפוש - מוסיף או מסיר אותו אם לא קיים { console.log(this.selectedsArray) for(let i=0; i<this.selectedsArray.length; i++) // תעבור על כל המוצרים שנבחרו (ממוינים לפי קטגוריות ואם יש קטגורין=ות שעוד אין להם מוצרים שנבחרו אז הם מערכים ריקים) { var j =0; console.log(this.selectedsArray[i]) if(this.selectedsArray[i].length > 0)// אם נבחרו מוצרים מהקטגוריה הנוכחית { if(this.selectedsArray[i][0].CategoryName == product.CategoryName)//תבדוק אם המוצר שבחר מאותו קטגוריה { for(j=0; j<this.selectedsArray[i].length && this.selectedsArray[i][j].ProductId != product.ProductId; j++);//אם הם מאותו קטגוריה תבדוק אולי המוצר כבר קיים if(j === this.selectedsArray[i].length)// אם המוצר לא קיים אז תוסיף אותו { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } else// אם קיים כבר אז תמחק אותו { this.selectedsArray[i].splice(j,1) console.log(this.selectedsArray) this.showAlert("המוצר הוסר") break } } } else // אם לא נבחרו מוצרים מהקטגוריה של המוצר עוד if(this.arrProducts[i][0].CategoryName == product.CategoryName) { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } } this.currentItems = [] } async showAlert(message: string) { const alert = await this.alertController.create({ cssClass: 'my-custom-class', message: message, buttons: ['אישור'] }); await alert.present(); } // update follow up list saveList() { // this.allSelectedProducts=[] לבדוק עם מוצרים מהרשימה אם מתווסף או מה for (let i = 0; i < this.selectedsArray.length; i++)// over all Categories for checking which Categories are selected items { if (this.selectedsArray[i] != null)// if seleced items in this Categories so { Object(this.selectedsArray[i]).forEach(pr=> { if(!this.allSelectedProducts.find(item=> item == pr.ProductId
this.setSelectedArray(this.allSelectedProducts.map(p=>p)) }
identifier_body
material.rs
of the object. albedo: Color, } impl Lambertian { /// Create a new diffuse material from a given intrinsic object color. pub fn new(albedo: Color) -> Self { Lambertian { albedo } } } impl Material<f64> for Lambertian { fn scatter(&self, _ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // Diffuse reflection: True Lambertian reflection. // We aim for a Lambertian distribution of the reflected rays, which has a distribution of // cos(phi) instead of cos³(phi) for random vectors inside the unit sphere. // To achieve this, we pick a random point on the surface of the unit sphere, which is done // by picking a random point inside the sphere and then normalizing that point. let random_unit_vec = rtweekend::random_vec_in_unit_sphere().normalized(); // Diffuse reflection: send out a new ray from the hit position point pointing towards a // random point on the surface of the sphere tangent to that hit point. // Possible problem: the recursion depth may be too deep, so we blow up the stack. Avoid // this by limiting the number of child rays. let scatter_direction = rec.normal + random_unit_vec; let scatter = Ray::new(rec.point, scatter_direction); Some((scatter, self.albedo)) } } /// Metal (specular) material. /// /// For smooth metal surfaces, light is not randomly scattered. Instead, the angle of the incident /// ray is equal to that of the specular outgoing ray. /// /// V N ^ ^ /// \ ^ / | /// \ | / | B /// v|/ | /// S ------------x------------ /// \ | /// \ | B /// v | /// /// The elements of the figure are: /// * S: Metal surface /// * V: Incident ray /// * N: Surface normal
/// /// In our current design, N is a unit vector, but the same does not have to be true for V. /// Furthermore, V points inwards, so the sign has to be changed. /// All this is encoded in the reflect() function. pub struct Metal { /// Color of the object. albedo: Color, /// Fuziness of the specular reflections. fuzz: f64, } impl Metal { /// Create a new metallic material from a given intrinsic object color. /// /// * `albedo`: Intrinsic surface color. /// * `fuzz`: Fuzziness factor for specular reflection in the range [0.0, 1.0]. pub fn new(albedo: Color, fuzz: f64) -> Self { Metal { albedo, fuzz: rtweekend::clamp(fuzz, 0.0, 1.0), } } /// Returns the reflected ray. /// /// This basically just encodes the V + B + B term for the specular reflection. We flip the /// sign and simplify the term so it becomes V - 2B. pub fn reflect<T: Copy>(ray: &Vec3<T>, normal: &Vec3<T>) -> Vec3<T> where T: Add<Output = T> + Div<Output = T> + Mul<Output = T> + Mul<f64, Output = T> + Sub<Output = T> + From<f64> + Into<f64>, { let b = *normal * Vec3::dot(ray, normal); *ray - b * T::from(2.0) } } impl Material<f64> for Metal { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // specular reflection let direction = Metal::reflect(&ray.direction().normalized(), &rec.normal); // apply fuzzing let direction = direction + rtweekend::random_vec_in_unit_sphere() * self.fuzz; let scatter = Ray::new(rec.point, direction); if Vec3::dot(&scatter.direction(), &rec.normal) <= 0.0 { None } else { Some((scatter, self.albedo)) } } } /// Clear (dielectrics) material. /// /// Examples of dielectric materials are water, glass or diamond. /// When such a material it hit by a light ray, the ray is split into a reflected and a refracted /// (transmitted) ray. /// /// Refraction for dielectrics is described by Snell's law: /// /// η⋅sinθ = η′⋅sinθ′ /// /// where /// * θ/θ′: angles from the surface normal /// * η/η′: refractive indices (e.g. 1.0 for air, 1.3-1.7 for glass) /// /// R N /// \ ^ /// \ | /// η v| /// S ------------x------------ /// η′ |\ /// | \ /// | \ /// v v /// N´ R´ /// /// In the illustration above, R is the incident ray and N is the surface normal. θ is thus the /// angle between R and N, while θ′ is the angle between N´ and R´. In the illustration, the angles /// θ and θ′ are exactly the same, so the ray R would pass from air (η = 1.0) through air /// (η′ = 1.0). /// /// To calculate the angle θ′, we solve for sinθ′: /// /// sinθ′ = (η / η′)⋅sinθ /// /// We split R´ into two parts: one that is perpendicular to N´ and one that is parallel to N´: /// /// R´ = R′⊥ + R′∥ /// /// Solving for those parts yields: /// /// R′⊥ = (η / η′)⋅(R + cosθ⋅n) /// R′∥ = - sqrt(1 - |R′⊥|²)⋅n /// /// The next step is solving for cosθ. The dot product of two vectors can be expressed in terms of /// the cosine of the angle between them: /// /// a⋅b = |a||b| cosθ /// /// or, assuming unit vectors: /// /// a⋅b = cosθ /// /// Thus, we can rewrite R′⊥ as: /// /// R′⊥ = (η / η′)⋅(R + (-R⋅n)n) /// /// Sometimes, the refraction ratio η / η′ is too high (e.g. when a ray passes through glass and /// enters air), so a real solution to Snell's law does not exist. An example: /// /// sinθ′ = (η / η′)⋅sinθ /// /// given η = 1.5 (glass) and η´ = 1.0 (air): /// /// sinθ′ = (1.5 / 1.0)⋅sinθ /// /// Since sinθ′ can at maximum be 1.0, sinθ must at maximum be (1.0 / 1.5), otherwise the equation /// can no longer be satisfied. We can solve for sinθ using the following: /// /// sinθ = sqrt(1 - cos²θ) /// cosθ = R⋅n /// /// which yields: /// /// sinθ = sqrt(1 - (R⋅n)²) /// /// In case of sinθ > (1.0 / refraction ratio), we cannot refract and thus must reflect. This is /// called "total internal reflection". pub struct Dielectric { /// Refraction index. refraction: f64, } impl Dielectric { /// Create a new metallic material from a given intrinsic object color. /// /// * `refraction`: Refraction index. pub fn new(refraction: f64) -> Self { Dielectric { refraction } } /// Returns the refracted (trasmitted) ray. /// /// Based on Snell's law. /// /// * `ray`: Incident ray. /// * `normal`: Surface normal (in eta direction). /// * `refraction_ratio`: Refractive ratio (η over η´). pub fn refract(ray: &Vec3<f64>, normal: &Vec3<f64>, refraction_ratio: f64) -> Vec3<f64> { // the part of the refracted ray which is perpendicular to R´ let mut cos_theta = Vec3::dot(&(-(*ray)), normal); if cos_theta > 1.0 { cos_theta = 1.0; } let perpendicular = (*ray + *normal * cos_theta) * refraction_ratio; // the part that is parallel to R´ let parallel = *normal * (-((1.0 - perpendicular.length_squared
/// /// Additionally, B is an additional vector for illustration purposes. /// The reflected ray (in between N and B) has the same angle as the incident ray V with regards to // the surface. It can be computed as V + B + B.
random_line_split
material.rs
material. /// /// For smooth metal surfaces, light is not randomly scattered. Instead, the angle of the incident /// ray is equal to that of the specular outgoing ray. /// /// V N ^ ^ /// \ ^ / | /// \ | / | B /// v|/ | /// S ------------x------------ /// \ | /// \ | B /// v | /// /// The elements of the figure are: /// * S: Metal surface /// * V: Incident ray /// * N: Surface normal /// /// Additionally, B is an additional vector for illustration purposes. /// The reflected ray (in between N and B) has the same angle as the incident ray V with regards to // the surface. It can be computed as V + B + B. /// /// In our current design, N is a unit vector, but the same does not have to be true for V. /// Furthermore, V points inwards, so the sign has to be changed. /// All this is encoded in the reflect() function. pub struct Metal { /// Color of the object. albedo: Color, /// Fuziness of the specular reflections. fuzz: f64, } impl Metal { /// Create a new metallic material from a given intrinsic object color. /// /// * `albedo`: Intrinsic surface color. /// * `fuzz`: Fuzziness factor for specular reflection in the range [0.0, 1.0]. pub fn new(albedo: Color, fuzz: f64) -> Self { Metal { albedo, fuzz: rtweekend::clamp(fuzz, 0.0, 1.0), } } /// Returns the reflected ray. /// /// This basically just encodes the V + B + B term for the specular reflection. We flip the /// sign and simplify the term so it becomes V - 2B. pub fn reflect<T: Copy>(ray: &Vec3<T>, normal: &Vec3<T>) -> Vec3<T> where T: Add<Output = T> + Div<Output = T> + Mul<Output = T> + Mul<f64, Output = T> + Sub<Output = T> + From<f64> + Into<f64>, { let b = *normal * Vec3::dot(ray, normal); *ray - b * T::from(2.0) } } impl Material<f64> for Metal { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // specular reflection let direction = Metal::reflect(&ray.direction().normalized(), &rec.normal); // apply fuzzing let direction = direction + rtweekend::random_vec_in_unit_sphere() * self.fuzz; let scatter = Ray::new(rec.point, direction); if Vec3::dot(&scatter.direction(), &rec.normal) <= 0.0 { None } else { Some((scatter, self.albedo)) } } } /// Clear (dielectrics) material. /// /// Examples of dielectric materials are water, glass or diamond. /// When such a material it hit by a light ray, the ray is split into a reflected and a refracted /// (transmitted) ray. /// /// Refraction for dielectrics is described by Snell's law: /// /// η⋅sinθ = η′⋅sinθ′ /// /// where /// * θ/θ′: angles from the surface normal /// * η/η′: refractive indices (e.g. 1.0 for air, 1.3-1.7 for glass) /// /// R N /// \ ^ /// \ | /// η v| /// S ------------x------------ /// η′ |\ /// | \ /// | \ /// v v /// N´ R´ /// /// In the illustration above, R is the incident ray and N is the surface normal. θ is thus the /// angle between R and N, while θ′ is the angle between N´ and R´. In the illustration, the angles /// θ and θ′ are exactly the same, so the ray R would pass from air (η = 1.0) through air /// (η′ = 1.0). /// /// To calculate the angle θ′, we solve for sinθ′: /// /// sinθ′ = (η / η′)⋅sinθ /// /// We split R´ into two parts: one that is perpendicular to N´ and one that is parallel to N´: /// /// R´ = R′⊥ + R′∥ /// /// Solving for those parts yields: /// /// R′⊥ = (η / η′)⋅(R + cosθ⋅n) /// R′∥ = - sqrt(1 - |R′⊥|²)⋅n /// /// The next step is solving for cosθ. The dot product of two vectors can be expressed in terms of /// the cosine of the angle between them: /// /// a⋅b = |a||b| cosθ /// /// or, assuming unit vectors: /// /// a⋅b = cosθ /// /// Thus, we can rewrite R′⊥ as: /// /// R′⊥ = (η / η′)⋅(R + (-R⋅n)n) /// /// Sometimes, the refraction ratio η / η′ is too high (e.g. when a ray passes through glass and /// enters air), so a real solution to Snell's law does not exist. An example: /// /// sinθ′ = (η / η′)⋅sinθ /// /// given η = 1.5 (glass) and η´ = 1.0 (air): /// /// sinθ′ = (1.5 / 1.0)⋅sinθ /// /// Since sinθ′ can at maximum be 1.0, sinθ must at maximum be (1.0 / 1.5), otherwise the equation /// can no longer be satisfied. We can solve for sinθ using the following: /// /// sinθ = sqrt(1 - cos²θ) /// cosθ = R⋅n /// /// which yields: /// /// sinθ = sqrt(1 - (R⋅n)²) /// /// In case of sinθ > (1.0 / refraction ratio), we cannot refract and thus must reflect. This is /// called "total internal reflection". pub struct Dielectric { /// Refraction index. refraction: f64, } impl Dielectric { /// Create a new metallic material from a given intrinsic object color. /// /// * `refraction`: Refraction index. pub fn new(refraction: f64) -> Self { Dielectric { refraction } } /// Returns the refracted (trasmitted) ray. /// /// Based on Snell's law. /// /// * `ray`: Incident ray. /// * `normal`: Surface normal (in eta direction). /// * `refraction_ratio`: Refractive ratio (η over η´). pub fn refract(ray: &Vec3<f64>, normal: &Vec3<f64>, refraction_ratio: f64) -> Vec3<f64> { // the part of the refracted ray which is perpendicular to R´ let mut cos_theta = Vec3::dot(&(-(*ray)), normal); if cos_theta > 1.0 { cos_theta = 1.0; } let perpendicular = (*ray + *normal * cos_theta) * refraction_ratio; // the part that is parallel to R´ let parallel = *normal * (-((1.0 - perpendicular.length_squared()).abs()).sqrt()); perpendicular + parallel } /// Returns the reflectance on a dielectric surface at a given angle. /// /// Based on the polynomial approximation by Christophe Schlick. /// /// * `angle` - Angle of incoming ray. /// * `refraction_ratio`: - Refractive ratio (η over η´). pub fn reflectance(angle: f64, refraction_ratio: f64) -> f64 { let r0 = (1.0 - refraction_ratio) / (1.0 + refraction_ratio); let r0 = r0 * r0; r0 + (1.0 - r0) * (1.0 - angle).powf(5.0) } } impl Material<f64> for Dielectric { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // assume the material where the ray originates from is air let eta = 1.0; let eta_prime = self.refraction; let refraction_ratio = if rec.front_face { eta / eta_prime } else { eta_prime }; // Total internal reflection: if // // (η / η′)⋅sinθ > 1.0 // // we must not refract (
and have to reflect) instead!
conditional_block
material.rs
the object. albedo: Color, } impl Lambertian { /// Create a new diffuse material from a given intrinsic object color. pub fn new(albedo: Color) -> Self
} impl Material<f64> for Lambertian { fn scatter(&self, _ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // Diffuse reflection: True Lambertian reflection. // We aim for a Lambertian distribution of the reflected rays, which has a distribution of // cos(phi) instead of cos³(phi) for random vectors inside the unit sphere. // To achieve this, we pick a random point on the surface of the unit sphere, which is done // by picking a random point inside the sphere and then normalizing that point. let random_unit_vec = rtweekend::random_vec_in_unit_sphere().normalized(); // Diffuse reflection: send out a new ray from the hit position point pointing towards a // random point on the surface of the sphere tangent to that hit point. // Possible problem: the recursion depth may be too deep, so we blow up the stack. Avoid // this by limiting the number of child rays. let scatter_direction = rec.normal + random_unit_vec; let scatter = Ray::new(rec.point, scatter_direction); Some((scatter, self.albedo)) } } /// Metal (specular) material. /// /// For smooth metal surfaces, light is not randomly scattered. Instead, the angle of the incident /// ray is equal to that of the specular outgoing ray. /// /// V N ^ ^ /// \ ^ / | /// \ | / | B /// v|/ | /// S ------------x------------ /// \ | /// \ | B /// v | /// /// The elements of the figure are: /// * S: Metal surface /// * V: Incident ray /// * N: Surface normal /// /// Additionally, B is an additional vector for illustration purposes. /// The reflected ray (in between N and B) has the same angle as the incident ray V with regards to // the surface. It can be computed as V + B + B. /// /// In our current design, N is a unit vector, but the same does not have to be true for V. /// Furthermore, V points inwards, so the sign has to be changed. /// All this is encoded in the reflect() function. pub struct Metal { /// Color of the object. albedo: Color, /// Fuziness of the specular reflections. fuzz: f64, } impl Metal { /// Create a new metallic material from a given intrinsic object color. /// /// * `albedo`: Intrinsic surface color. /// * `fuzz`: Fuzziness factor for specular reflection in the range [0.0, 1.0]. pub fn new(albedo: Color, fuzz: f64) -> Self { Metal { albedo, fuzz: rtweekend::clamp(fuzz, 0.0, 1.0), } } /// Returns the reflected ray. /// /// This basically just encodes the V + B + B term for the specular reflection. We flip the /// sign and simplify the term so it becomes V - 2B. pub fn reflect<T: Copy>(ray: &Vec3<T>, normal: &Vec3<T>) -> Vec3<T> where T: Add<Output = T> + Div<Output = T> + Mul<Output = T> + Mul<f64, Output = T> + Sub<Output = T> + From<f64> + Into<f64>, { let b = *normal * Vec3::dot(ray, normal); *ray - b * T::from(2.0) } } impl Material<f64> for Metal { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // specular reflection let direction = Metal::reflect(&ray.direction().normalized(), &rec.normal); // apply fuzzing let direction = direction + rtweekend::random_vec_in_unit_sphere() * self.fuzz; let scatter = Ray::new(rec.point, direction); if Vec3::dot(&scatter.direction(), &rec.normal) <= 0.0 { None } else { Some((scatter, self.albedo)) } } } /// Clear (dielectrics) material. /// /// Examples of dielectric materials are water, glass or diamond. /// When such a material it hit by a light ray, the ray is split into a reflected and a refracted /// (transmitted) ray. /// /// Refraction for dielectrics is described by Snell's law: /// /// η⋅sinθ = η′⋅sinθ′ /// /// where /// * θ/θ′: angles from the surface normal /// * η/η′: refractive indices (e.g. 1.0 for air, 1.3-1.7 for glass) /// /// R N /// \ ^ /// \ | /// η v| /// S ------------x------------ /// η′ |\ /// | \ /// | \ /// v v /// N´ R´ /// /// In the illustration above, R is the incident ray and N is the surface normal. θ is thus the /// angle between R and N, while θ′ is the angle between N´ and R´. In the illustration, the angles /// θ and θ′ are exactly the same, so the ray R would pass from air (η = 1.0) through air /// (η′ = 1.0). /// /// To calculate the angle θ′, we solve for sinθ′: /// /// sinθ′ = (η / η′)⋅sinθ /// /// We split R´ into two parts: one that is perpendicular to N´ and one that is parallel to N´: /// /// R´ = R′⊥ + R′∥ /// /// Solving for those parts yields: /// /// R′⊥ = (η / η′)⋅(R + cosθ⋅n) /// R′∥ = - sqrt(1 - |R′⊥|²)⋅n /// /// The next step is solving for cosθ. The dot product of two vectors can be expressed in terms of /// the cosine of the angle between them: /// /// a⋅b = |a||b| cosθ /// /// or, assuming unit vectors: /// /// a⋅b = cosθ /// /// Thus, we can rewrite R′⊥ as: /// /// R′⊥ = (η / η′)⋅(R + (-R⋅n)n) /// /// Sometimes, the refraction ratio η / η′ is too high (e.g. when a ray passes through glass and /// enters air), so a real solution to Snell's law does not exist. An example: /// /// sinθ′ = (η / η′)⋅sinθ /// /// given η = 1.5 (glass) and η´ = 1.0 (air): /// /// sinθ′ = (1.5 / 1.0)⋅sinθ /// /// Since sinθ′ can at maximum be 1.0, sinθ must at maximum be (1.0 / 1.5), otherwise the equation /// can no longer be satisfied. We can solve for sinθ using the following: /// /// sinθ = sqrt(1 - cos²θ) /// cosθ = R⋅n /// /// which yields: /// /// sinθ = sqrt(1 - (R⋅n)²) /// /// In case of sinθ > (1.0 / refraction ratio), we cannot refract and thus must reflect. This is /// called "total internal reflection". pub struct Dielectric { /// Refraction index. refraction: f64, } impl Dielectric { /// Create a new metallic material from a given intrinsic object color. /// /// * `refraction`: Refraction index. pub fn new(refraction: f64) -> Self { Dielectric { refraction } } /// Returns the refracted (trasmitted) ray. /// /// Based on Snell's law. /// /// * `ray`: Incident ray. /// * `normal`: Surface normal (in eta direction). /// * `refraction_ratio`: Refractive ratio (η over η´). pub fn refract(ray: &Vec3<f64>, normal: &Vec3<f64>, refraction_ratio: f64) -> Vec3<f64> { // the part of the refracted ray which is perpendicular to R´ let mut cos_theta = Vec3::dot(&(-(*ray)), normal); if cos_theta > 1.0 { cos_theta = 1.0; } let perpendicular = (*ray + *normal * cos_theta) * refraction_ratio; // the part that is parallel to R´ let parallel = *normal * (-((1.0 - perpendicular
{ Lambertian { albedo } }
identifier_body
material.rs
the object. albedo: Color, } impl Lambertian { /// Create a new diffuse material from a given intrinsic object color. pub fn new(albedo: Color) -> Self { Lambertian { albedo } } } impl Material<f64> for Lambertian { fn scatter(&self, _ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // Diffuse reflection: True Lambertian reflection. // We aim for a Lambertian distribution of the reflected rays, which has a distribution of // cos(phi) instead of cos³(phi) for random vectors inside the unit sphere. // To achieve this, we pick a random point on the surface of the unit sphere, which is done // by picking a random point inside the sphere and then normalizing that point. let random_unit_vec = rtweekend::random_vec_in_unit_sphere().normalized(); // Diffuse reflection: send out a new ray from the hit position point pointing towards a // random point on the surface of the sphere tangent to that hit point. // Possible problem: the recursion depth may be too deep, so we blow up the stack. Avoid // this by limiting the number of child rays. let scatter_direction = rec.normal + random_unit_vec; let scatter = Ray::new(rec.point, scatter_direction); Some((scatter, self.albedo)) } } /// Metal (specular) material. /// /// For smooth metal surfaces, light is not randomly scattered. Instead, the angle of the incident /// ray is equal to that of the specular outgoing ray. /// /// V N ^ ^ /// \ ^ / | /// \ | / | B /// v|/ | /// S ------------x------------ /// \ | /// \ | B /// v | /// /// The elements of the figure are: /// * S: Metal surface /// * V: Incident ray /// * N: Surface normal /// /// Additionally, B is an additional vector for illustration purposes. /// The reflected ray (in between N and B) has the same angle as the incident ray V with regards to // the surface. It can be computed as V + B + B. /// /// In our current design, N is a unit vector, but the same does not have to be true for V. /// Furthermore, V points inwards, so the sign has to be changed. /// All this is encoded in the reflect() function. pub struct M
{ /// Color of the object. albedo: Color, /// Fuziness of the specular reflections. fuzz: f64, } impl Metal { /// Create a new metallic material from a given intrinsic object color. /// /// * `albedo`: Intrinsic surface color. /// * `fuzz`: Fuzziness factor for specular reflection in the range [0.0, 1.0]. pub fn new(albedo: Color, fuzz: f64) -> Self { Metal { albedo, fuzz: rtweekend::clamp(fuzz, 0.0, 1.0), } } /// Returns the reflected ray. /// /// This basically just encodes the V + B + B term for the specular reflection. We flip the /// sign and simplify the term so it becomes V - 2B. pub fn reflect<T: Copy>(ray: &Vec3<T>, normal: &Vec3<T>) -> Vec3<T> where T: Add<Output = T> + Div<Output = T> + Mul<Output = T> + Mul<f64, Output = T> + Sub<Output = T> + From<f64> + Into<f64>, { let b = *normal * Vec3::dot(ray, normal); *ray - b * T::from(2.0) } } impl Material<f64> for Metal { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // specular reflection let direction = Metal::reflect(&ray.direction().normalized(), &rec.normal); // apply fuzzing let direction = direction + rtweekend::random_vec_in_unit_sphere() * self.fuzz; let scatter = Ray::new(rec.point, direction); if Vec3::dot(&scatter.direction(), &rec.normal) <= 0.0 { None } else { Some((scatter, self.albedo)) } } } /// Clear (dielectrics) material. /// /// Examples of dielectric materials are water, glass or diamond. /// When such a material it hit by a light ray, the ray is split into a reflected and a refracted /// (transmitted) ray. /// /// Refraction for dielectrics is described by Snell's law: /// /// η⋅sinθ = η′⋅sinθ′ /// /// where /// * θ/θ′: angles from the surface normal /// * η/η′: refractive indices (e.g. 1.0 for air, 1.3-1.7 for glass) /// /// R N /// \ ^ /// \ | /// η v| /// S ------------x------------ /// η′ |\ /// | \ /// | \ /// v v /// N´ R´ /// /// In the illustration above, R is the incident ray and N is the surface normal. θ is thus the /// angle between R and N, while θ′ is the angle between N´ and R´. In the illustration, the angles /// θ and θ′ are exactly the same, so the ray R would pass from air (η = 1.0) through air /// (η′ = 1.0). /// /// To calculate the angle θ′, we solve for sinθ′: /// /// sinθ′ = (η / η′)⋅sinθ /// /// We split R´ into two parts: one that is perpendicular to N´ and one that is parallel to N´: /// /// R´ = R′⊥ + R′∥ /// /// Solving for those parts yields: /// /// R′⊥ = (η / η′)⋅(R + cosθ⋅n) /// R′∥ = - sqrt(1 - |R′⊥|²)⋅n /// /// The next step is solving for cosθ. The dot product of two vectors can be expressed in terms of /// the cosine of the angle between them: /// /// a⋅b = |a||b| cosθ /// /// or, assuming unit vectors: /// /// a⋅b = cosθ /// /// Thus, we can rewrite R′⊥ as: /// /// R′⊥ = (η / η′)⋅(R + (-R⋅n)n) /// /// Sometimes, the refraction ratio η / η′ is too high (e.g. when a ray passes through glass and /// enters air), so a real solution to Snell's law does not exist. An example: /// /// sinθ′ = (η / η′)⋅sinθ /// /// given η = 1.5 (glass) and η´ = 1.0 (air): /// /// sinθ′ = (1.5 / 1.0)⋅sinθ /// /// Since sinθ′ can at maximum be 1.0, sinθ must at maximum be (1.0 / 1.5), otherwise the equation /// can no longer be satisfied. We can solve for sinθ using the following: /// /// sinθ = sqrt(1 - cos²θ) /// cosθ = R⋅n /// /// which yields: /// /// sinθ = sqrt(1 - (R⋅n)²) /// /// In case of sinθ > (1.0 / refraction ratio), we cannot refract and thus must reflect. This is /// called "total internal reflection". pub struct Dielectric { /// Refraction index. refraction: f64, } impl Dielectric { /// Create a new metallic material from a given intrinsic object color. /// /// * `refraction`: Refraction index. pub fn new(refraction: f64) -> Self { Dielectric { refraction } } /// Returns the refracted (trasmitted) ray. /// /// Based on Snell's law. /// /// * `ray`: Incident ray. /// * `normal`: Surface normal (in eta direction). /// * `refraction_ratio`: Refractive ratio (η over η´). pub fn refract(ray: &Vec3<f64>, normal: &Vec3<f64>, refraction_ratio: f64) -> Vec3<f64> { // the part of the refracted ray which is perpendicular to R´ let mut cos_theta = Vec3::dot(&(-(*ray)), normal); if cos_theta > 1.0 { cos_theta = 1.0; } let perpendicular = (*ray + *normal * cos_theta) * refraction_ratio; // the part that is parallel to R´ let parallel = *normal * (-((1.0 - perpendicular
etal
identifier_name
singleVisit.component.ts
; private accentureOfficeHeight; private mapZoomLevel = 3; private googleMapDesktopAPIKey = 'AIzaSyD5yFBB69RwOsb6sVRsQEapd9ynwCuoBYo'; private googleMapInfoWindow; constructor( private googleMaps: GoogleMaps, private selectvisit: SelectVisitServices, private headerService: HeaderService, private _translate: TranslateService, private _router: Router) { if(window.cordova){ this.markerIconBasePath = 'www/resources/images/singlevisit/'; this.visitLocationWidth = 17; this.visitLocationHeight = 19; this.accentureOfficeWidth = 11; this.accentureOfficeHeight = 15; }else{ this.markerIconBasePath = './resources/images/singlevisit/'; this.visitLocationWidth = 27; this.visitLocationHeight = 29; this.accentureOfficeWidth = 21; this.accentureOfficeHeight = 15; } this.officeLocationMarkerUrl = this.markerIconBasePath+'AccentureOffice.png'; this.visitLocationMarkerUrl = [ this.markerIconBasePath+'pin_1.png', this.markerIconBasePath+'pin_2.png', this.markerIconBasePath+'pin_3.png', this.markerIconBasePath+'pin_4.png', this.markerIconBasePath+'pin_5.png', this.markerIconBasePath+'pin_6.png', this.markerIconBasePath+'pin_0.png']; } ngOnInit(){ let obj = { title:this._translate.instant('visitLocation'), isHomeBtn: false, isBackBtn: true, isMenuBtn: true }; this.headerService.setHeaderObject(obj); this.visitStartDate = this.selectvisit.selectedVisitObj.VisitStartDate; this.visitEndDate = this.selectvisit.selectedVisitObj.VisitEndDate; this.defaultShowInfoWindow = false; if(this.headerService.isSideMenuClick){ this.displayFooterSection = false; } } // Load map only after view is initialized ngAfterViewInit() { this.loadMap(); } loadMap() { this.selectvisit.getVisitLocationDetails(this.selectvisit.selectedVisitObj.VisitID) .subscribe(locations => this.successLocationCallback(locations), error => this.errorMessage = <any>error); } private successLocationCallback(locations: any[]){ // create a new map by passing HTMLElement let element: HTMLElement = document.getElementById('flatMapContainerPhone'); if(window.cordova){ this.mapInstance = this.googleMaps.create(element); // listen to MAP_READY event // You must wait for this event to fire before adding something to the map or modifying it in anyway this.mapInstance.one(GoogleMapsEvent.MAP_READY).then( () => { console.log('Map is ready!'); this.fetchMapCoordinatesAndDisplay(locations); } ); }else{ let me = this; if( !this.isApiLoaded() ){ this.loadApi(); this._apiLoadingPromise.then( () => { me.__onGoogleLoaded(locations); }); }else{ me.__onGoogleLoaded(locations); } } }; private fetchMapCoordinatesAndDisplay(locations){ this.headerService.setGoogleMapInstance(this.mapInstance); // Now you can add elements to the map like the marker this.filterVisitAndOfficeLocations(locations|| []); this.addVisitLocationMarker(); this.addAccentureLocationMarker(); this.zoomToCurrentLocation(); } private _loadGoogleMapAPIScript(){ let script = (<any>document).createElement('script'); script.async = true; script.defer = true; script.src = `https://maps.googleapis.com/maps/api/js?callback=__onGoogleLoaded&key=${this.googleMapDesktopAPIKey}`; script.type = 'text/javascript'; (<any>document).getElementsByTagName('head')[0].appendChild(script); } isApiLoaded(): boolean { return (<any>window).google ? true : false; } loadApi(): void{ if(!this._apiLoadingPromise){ this._apiLoadingPromise = new Promise( (resolve) => { (<any>window)['__onGoogleLoaded'] = (ev) => { console.log('google maps api loaded'); resolve('google maps api loaded'); } this._loadGoogleMapAPIScript(); }); } } public __onGoogleLoaded(locations){ let mapProp = { center: new google.maps.LatLng(51.508742,-0.120850), zoom: this.mapZoomLevel, tilt: 30 }; let element: HTMLElement = document.getElementById('flatMapContainerPhone'); this.mapInstance = new google.maps.Map(element, mapProp); this.fetchMapCoordinatesAndDisplay(locations); } private fetchPositionLatLong(latitude, longtitude){ let position; if(window.cordova){ position = new LatLng(latitude, longtitude); }else{ position = new google.maps.LatLng(latitude, longtitude); } return position; } private filterVisitAndOfficeLocations(clientvisitLocations: any[]): void { let me = this; //visit Location let clientLocationGrouped = _.groupBy(clientvisitLocations[0], function(locationObj: any){ return locationObj.CityName; }); _.forEach(clientLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); me.displayVisitLocation.push(locationObj); }); //Accenture Offices let visitLocationGrouped = _.groupBy(clientvisitLocations[1], function(locationObj: any){ return locationObj.CityName; }); let accentureOffices = []; _.forEach(visitLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); accentureOffices.push(locationObj); }); accentureOffices.map(function(locationObj, key){ let filteredData = _.filter(me.displayVisitLocation, function(o) { return o.CityName === locationObj.CityName; }); if(!filteredData.length){ me.displayAccentureOfficeLocation.push(locationObj); } }); } private addVisitLocationMarker() : void{ let me = this, counter = 0; me.displayVisitLocation.map(function(locationObj, key){ if(!counter){ me.selectedMapCityName = locationObj.CityName + ' '+ moment(locationObj.StartDate).format('Do MMM YYYY'); } // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longtitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: (counter<6) ? me.visitLocationMarkerUrl[counter] : me.visitLocationMarkerUrl[6], detail: locationObj, width: me.visitLocationWidth, height: me.visitLocationHeight }); counter++; }); } private addAccentureLocationMarker(): void{ let me = this; _.forEach(me.displayAccentureOfficeLocation, function(locationObj, key){ // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: me.officeLocationMarkerUrl, width: me.accentureOfficeWidth, height: me.accentureOfficeHeight }); }); }; private zoomToCurrentLocation(){ if(window.cordova){ //for mobile this.mapInstance.getMyLocation().then(res => { console.log('Give it to me' + res.latLng); let position = { target: res.latLng, zoom: this.mapZoomLevel, tilt: 30 }; if(this.mapInstance){ this.mapInstance.animateCamera(position); } }); }else{ //for desktop // Try HTML5 geolocation. let me = this; if (navigator.geolocation) { navigator.geolocation.getCurrentPosition(function(position) { let pos = new google.maps.LatLng(position.coords.latitude, position.coords.longitude); if(me.mapInstance){ me.mapInstance.setCenter(pos); } }, function() { //user denied permission }); } } } private addMapMarker(markerObj: ISingleVisitAddMarkerParams){ let me = this; if(this.mapInstance){ if(window.cordova){ //for mobile // create new marker let markerOptions: MarkerOptions = { position: markerObj.position, title: markerObj.title, icon: { url: markerObj.icon, size: { width: markerObj.width, height: markerObj.height } } }; this.mapInstance.addMarker(markerOptions) .then((marker: Marker) => { if(typeof markerObj.detail !== 'undefined'){ marker.set('custominfo', markerObj.detail); } if(!me.defaultShowInfoWindow)
{ marker.showInfoWindow(); me.defaultShowInfoWindow = true; }
conditional_block
singleVisit.component.ts
MarkerUrl: Array<string>; public visitStartDate: string; public visitEndDate: string; public selectedMapCityName; private displayVisitLocation: any = []; private displayAccentureOfficeLocation: any = []; private defaultShowInfoWindow; private _apiLoadingPromise: Promise<any>; private visitLocationWidth; private visitLocationHeight; private accentureOfficeWidth; private accentureOfficeHeight; private mapZoomLevel = 3; private googleMapDesktopAPIKey = 'AIzaSyD5yFBB69RwOsb6sVRsQEapd9ynwCuoBYo'; private googleMapInfoWindow; constructor( private googleMaps: GoogleMaps, private selectvisit: SelectVisitServices, private headerService: HeaderService, private _translate: TranslateService, private _router: Router) { if(window.cordova){ this.markerIconBasePath = 'www/resources/images/singlevisit/'; this.visitLocationWidth = 17; this.visitLocationHeight = 19; this.accentureOfficeWidth = 11; this.accentureOfficeHeight = 15; }else{ this.markerIconBasePath = './resources/images/singlevisit/'; this.visitLocationWidth = 27; this.visitLocationHeight = 29; this.accentureOfficeWidth = 21; this.accentureOfficeHeight = 15; } this.officeLocationMarkerUrl = this.markerIconBasePath+'AccentureOffice.png'; this.visitLocationMarkerUrl = [ this.markerIconBasePath+'pin_1.png', this.markerIconBasePath+'pin_2.png', this.markerIconBasePath+'pin_3.png', this.markerIconBasePath+'pin_4.png', this.markerIconBasePath+'pin_5.png', this.markerIconBasePath+'pin_6.png', this.markerIconBasePath+'pin_0.png']; } ngOnInit(){ let obj = { title:this._translate.instant('visitLocation'), isHomeBtn: false, isBackBtn: true, isMenuBtn: true }; this.headerService.setHeaderObject(obj); this.visitStartDate = this.selectvisit.selectedVisitObj.VisitStartDate; this.visitEndDate = this.selectvisit.selectedVisitObj.VisitEndDate; this.defaultShowInfoWindow = false; if(this.headerService.isSideMenuClick){ this.displayFooterSection = false; } } // Load map only after view is initialized ngAfterViewInit() { this.loadMap(); } loadMap() { this.selectvisit.getVisitLocationDetails(this.selectvisit.selectedVisitObj.VisitID) .subscribe(locations => this.successLocationCallback(locations), error => this.errorMessage = <any>error); } private successLocationCallback(locations: any[]){ // create a new map by passing HTMLElement let element: HTMLElement = document.getElementById('flatMapContainerPhone'); if(window.cordova){ this.mapInstance = this.googleMaps.create(element); // listen to MAP_READY event // You must wait for this event to fire before adding something to the map or modifying it in anyway this.mapInstance.one(GoogleMapsEvent.MAP_READY).then( () => { console.log('Map is ready!'); this.fetchMapCoordinatesAndDisplay(locations); } ); }else{ let me = this; if( !this.isApiLoaded() ){ this.loadApi(); this._apiLoadingPromise.then( () => { me.__onGoogleLoaded(locations); }); }else{ me.__onGoogleLoaded(locations); } } }; private fetchMapCoordinatesAndDisplay(locations){ this.headerService.setGoogleMapInstance(this.mapInstance); // Now you can add elements to the map like the marker this.filterVisitAndOfficeLocations(locations|| []); this.addVisitLocationMarker(); this.addAccentureLocationMarker(); this.zoomToCurrentLocation(); } private _loadGoogleMapAPIScript(){ let script = (<any>document).createElement('script'); script.async = true; script.defer = true; script.src = `https://maps.googleapis.com/maps/api/js?callback=__onGoogleLoaded&key=${this.googleMapDesktopAPIKey}`; script.type = 'text/javascript'; (<any>document).getElementsByTagName('head')[0].appendChild(script); } isApiLoaded(): boolean { return (<any>window).google ? true : false; } loadApi(): void{ if(!this._apiLoadingPromise){ this._apiLoadingPromise = new Promise( (resolve) => { (<any>window)['__onGoogleLoaded'] = (ev) => { console.log('google maps api loaded'); resolve('google maps api loaded'); } this._loadGoogleMapAPIScript(); }); } } public __onGoogleLoaded(locations){ let mapProp = { center: new google.maps.LatLng(51.508742,-0.120850), zoom: this.mapZoomLevel, tilt: 30 }; let element: HTMLElement = document.getElementById('flatMapContainerPhone'); this.mapInstance = new google.maps.Map(element, mapProp); this.fetchMapCoordinatesAndDisplay(locations); } private fetchPositionLatLong(latitude, longtitude){ let position; if(window.cordova){ position = new LatLng(latitude, longtitude); }else{ position = new google.maps.LatLng(latitude, longtitude); } return position; } private filterVisitAndOfficeLocations(clientvisitLocations: any[]): void { let me = this; //visit Location let clientLocationGrouped = _.groupBy(clientvisitLocations[0], function(locationObj: any){ return locationObj.CityName; }); _.forEach(clientLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); me.displayVisitLocation.push(locationObj); }); //Accenture Offices
let visitLocationGrouped = _.groupBy(clientvisitLocations[1], function(locationObj: any){ return locationObj.CityName; }); let accentureOffices = []; _.forEach(visitLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); accentureOffices.push(locationObj); }); accentureOffices.map(function(locationObj, key){ let filteredData = _.filter(me.displayVisitLocation, function(o) { return o.CityName === locationObj.CityName; }); if(!filteredData.length){ me.displayAccentureOfficeLocation.push(locationObj); } }); } private addVisitLocationMarker() : void{ let me = this, counter = 0; me.displayVisitLocation.map(function(locationObj, key){ if(!counter){ me.selectedMapCityName = locationObj.CityName + ' '+ moment(locationObj.StartDate).format('Do MMM YYYY'); } // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longtitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: (counter<6) ? me.visitLocationMarkerUrl[counter] : me.visitLocationMarkerUrl[6], detail: locationObj, width: me.visitLocationWidth, height: me.visitLocationHeight }); counter++; }); } private addAccentureLocationMarker(): void{ let me = this; _.forEach(me.displayAccentureOfficeLocation, function(locationObj, key){ // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: me.officeLocationMarkerUrl, width: me.accentureOfficeWidth, height: me.accentureOfficeHeight }); }); }; private zoomToCurrentLocation(){ if(window.cordova){ //for mobile this.mapInstance.getMyLocation().then(res => { console.log('Give it to me' + res.latLng); let position = { target: res.latLng, zoom: this.mapZoomLevel, tilt: 30 }; if(this.mapInstance){ this.mapInstance.animateCamera(position); } }); }else{ //for desktop // Try HTML5 geolocation. let me = this; if (navigator.geolocation) { navigator.geolocation.getCurrentPosition(function(position) { let pos = new google.maps.LatLng(position.coords.latitude, position.coords.longitude); if(me.mapInstance){ me.mapInstance.setCenter(pos); } }, function() { //user denied permission }); } } } private addMapMarker(markerObj: ISingleVisitAddMarkerParams){ let me = this; if(this.mapInstance){ if(window.cordova){ //for mobile // create new marker let markerOptions: MarkerOptions = { position: markerObj.position, title: markerObj.title, icon: { url: markerObj.icon, size: { width: markerObj.width, height:
random_line_split
singleVisit.component.ts
Url: Array<string>; public visitStartDate: string; public visitEndDate: string; public selectedMapCityName; private displayVisitLocation: any = []; private displayAccentureOfficeLocation: any = []; private defaultShowInfoWindow; private _apiLoadingPromise: Promise<any>; private visitLocationWidth; private visitLocationHeight; private accentureOfficeWidth; private accentureOfficeHeight; private mapZoomLevel = 3; private googleMapDesktopAPIKey = 'AIzaSyD5yFBB69RwOsb6sVRsQEapd9ynwCuoBYo'; private googleMapInfoWindow; constructor( private googleMaps: GoogleMaps, private selectvisit: SelectVisitServices, private headerService: HeaderService, private _translate: TranslateService, private _router: Router) { if(window.cordova){ this.markerIconBasePath = 'www/resources/images/singlevisit/'; this.visitLocationWidth = 17; this.visitLocationHeight = 19; this.accentureOfficeWidth = 11; this.accentureOfficeHeight = 15; }else{ this.markerIconBasePath = './resources/images/singlevisit/'; this.visitLocationWidth = 27; this.visitLocationHeight = 29; this.accentureOfficeWidth = 21; this.accentureOfficeHeight = 15; } this.officeLocationMarkerUrl = this.markerIconBasePath+'AccentureOffice.png'; this.visitLocationMarkerUrl = [ this.markerIconBasePath+'pin_1.png', this.markerIconBasePath+'pin_2.png', this.markerIconBasePath+'pin_3.png', this.markerIconBasePath+'pin_4.png', this.markerIconBasePath+'pin_5.png', this.markerIconBasePath+'pin_6.png', this.markerIconBasePath+'pin_0.png']; } ngOnInit(){ let obj = { title:this._translate.instant('visitLocation'), isHomeBtn: false, isBackBtn: true, isMenuBtn: true }; this.headerService.setHeaderObject(obj); this.visitStartDate = this.selectvisit.selectedVisitObj.VisitStartDate; this.visitEndDate = this.selectvisit.selectedVisitObj.VisitEndDate; this.defaultShowInfoWindow = false; if(this.headerService.isSideMenuClick){ this.displayFooterSection = false; } } // Load map only after view is initialized ngAfterViewInit()
loadMap() { this.selectvisit.getVisitLocationDetails(this.selectvisit.selectedVisitObj.VisitID) .subscribe(locations => this.successLocationCallback(locations), error => this.errorMessage = <any>error); } private successLocationCallback(locations: any[]){ // create a new map by passing HTMLElement let element: HTMLElement = document.getElementById('flatMapContainerPhone'); if(window.cordova){ this.mapInstance = this.googleMaps.create(element); // listen to MAP_READY event // You must wait for this event to fire before adding something to the map or modifying it in anyway this.mapInstance.one(GoogleMapsEvent.MAP_READY).then( () => { console.log('Map is ready!'); this.fetchMapCoordinatesAndDisplay(locations); } ); }else{ let me = this; if( !this.isApiLoaded() ){ this.loadApi(); this._apiLoadingPromise.then( () => { me.__onGoogleLoaded(locations); }); }else{ me.__onGoogleLoaded(locations); } } }; private fetchMapCoordinatesAndDisplay(locations){ this.headerService.setGoogleMapInstance(this.mapInstance); // Now you can add elements to the map like the marker this.filterVisitAndOfficeLocations(locations|| []); this.addVisitLocationMarker(); this.addAccentureLocationMarker(); this.zoomToCurrentLocation(); } private _loadGoogleMapAPIScript(){ let script = (<any>document).createElement('script'); script.async = true; script.defer = true; script.src = `https://maps.googleapis.com/maps/api/js?callback=__onGoogleLoaded&key=${this.googleMapDesktopAPIKey}`; script.type = 'text/javascript'; (<any>document).getElementsByTagName('head')[0].appendChild(script); } isApiLoaded(): boolean { return (<any>window).google ? true : false; } loadApi(): void{ if(!this._apiLoadingPromise){ this._apiLoadingPromise = new Promise( (resolve) => { (<any>window)['__onGoogleLoaded'] = (ev) => { console.log('google maps api loaded'); resolve('google maps api loaded'); } this._loadGoogleMapAPIScript(); }); } } public __onGoogleLoaded(locations){ let mapProp = { center: new google.maps.LatLng(51.508742,-0.120850), zoom: this.mapZoomLevel, tilt: 30 }; let element: HTMLElement = document.getElementById('flatMapContainerPhone'); this.mapInstance = new google.maps.Map(element, mapProp); this.fetchMapCoordinatesAndDisplay(locations); } private fetchPositionLatLong(latitude, longtitude){ let position; if(window.cordova){ position = new LatLng(latitude, longtitude); }else{ position = new google.maps.LatLng(latitude, longtitude); } return position; } private filterVisitAndOfficeLocations(clientvisitLocations: any[]): void { let me = this; //visit Location let clientLocationGrouped = _.groupBy(clientvisitLocations[0], function(locationObj: any){ return locationObj.CityName; }); _.forEach(clientLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); me.displayVisitLocation.push(locationObj); }); //Accenture Offices let visitLocationGrouped = _.groupBy(clientvisitLocations[1], function(locationObj: any){ return locationObj.CityName; }); let accentureOffices = []; _.forEach(visitLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); accentureOffices.push(locationObj); }); accentureOffices.map(function(locationObj, key){ let filteredData = _.filter(me.displayVisitLocation, function(o) { return o.CityName === locationObj.CityName; }); if(!filteredData.length){ me.displayAccentureOfficeLocation.push(locationObj); } }); } private addVisitLocationMarker() : void{ let me = this, counter = 0; me.displayVisitLocation.map(function(locationObj, key){ if(!counter){ me.selectedMapCityName = locationObj.CityName + ' '+ moment(locationObj.StartDate).format('Do MMM YYYY'); } // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longtitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: (counter<6) ? me.visitLocationMarkerUrl[counter] : me.visitLocationMarkerUrl[6], detail: locationObj, width: me.visitLocationWidth, height: me.visitLocationHeight }); counter++; }); } private addAccentureLocationMarker(): void{ let me = this; _.forEach(me.displayAccentureOfficeLocation, function(locationObj, key){ // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: me.officeLocationMarkerUrl, width: me.accentureOfficeWidth, height: me.accentureOfficeHeight }); }); }; private zoomToCurrentLocation(){ if(window.cordova){ //for mobile this.mapInstance.getMyLocation().then(res => { console.log('Give it to me' + res.latLng); let position = { target: res.latLng, zoom: this.mapZoomLevel, tilt: 30 }; if(this.mapInstance){ this.mapInstance.animateCamera(position); } }); }else{ //for desktop // Try HTML5 geolocation. let me = this; if (navigator.geolocation) { navigator.geolocation.getCurrentPosition(function(position) { let pos = new google.maps.LatLng(position.coords.latitude, position.coords.longitude); if(me.mapInstance){ me.mapInstance.setCenter(pos); } }, function() { //user denied permission }); } } } private addMapMarker(markerObj: ISingleVisitAddMarkerParams){ let me = this; if(this.mapInstance){ if(window.cordova){ //for mobile // create new marker let markerOptions: MarkerOptions = { position: markerObj.position, title: markerObj.title, icon: { url: markerObj.icon, size: { width: markerObj.width,
{ this.loadMap(); }
identifier_body
singleVisit.component.ts
Url: Array<string>; public visitStartDate: string; public visitEndDate: string; public selectedMapCityName; private displayVisitLocation: any = []; private displayAccentureOfficeLocation: any = []; private defaultShowInfoWindow; private _apiLoadingPromise: Promise<any>; private visitLocationWidth; private visitLocationHeight; private accentureOfficeWidth; private accentureOfficeHeight; private mapZoomLevel = 3; private googleMapDesktopAPIKey = 'AIzaSyD5yFBB69RwOsb6sVRsQEapd9ynwCuoBYo'; private googleMapInfoWindow; constructor( private googleMaps: GoogleMaps, private selectvisit: SelectVisitServices, private headerService: HeaderService, private _translate: TranslateService, private _router: Router) { if(window.cordova){ this.markerIconBasePath = 'www/resources/images/singlevisit/'; this.visitLocationWidth = 17; this.visitLocationHeight = 19; this.accentureOfficeWidth = 11; this.accentureOfficeHeight = 15; }else{ this.markerIconBasePath = './resources/images/singlevisit/'; this.visitLocationWidth = 27; this.visitLocationHeight = 29; this.accentureOfficeWidth = 21; this.accentureOfficeHeight = 15; } this.officeLocationMarkerUrl = this.markerIconBasePath+'AccentureOffice.png'; this.visitLocationMarkerUrl = [ this.markerIconBasePath+'pin_1.png', this.markerIconBasePath+'pin_2.png', this.markerIconBasePath+'pin_3.png', this.markerIconBasePath+'pin_4.png', this.markerIconBasePath+'pin_5.png', this.markerIconBasePath+'pin_6.png', this.markerIconBasePath+'pin_0.png']; } ngOnInit(){ let obj = { title:this._translate.instant('visitLocation'), isHomeBtn: false, isBackBtn: true, isMenuBtn: true }; this.headerService.setHeaderObject(obj); this.visitStartDate = this.selectvisit.selectedVisitObj.VisitStartDate; this.visitEndDate = this.selectvisit.selectedVisitObj.VisitEndDate; this.defaultShowInfoWindow = false; if(this.headerService.isSideMenuClick){ this.displayFooterSection = false; } } // Load map only after view is initialized ngAfterViewInit() { this.loadMap(); } loadMap() { this.selectvisit.getVisitLocationDetails(this.selectvisit.selectedVisitObj.VisitID) .subscribe(locations => this.successLocationCallback(locations), error => this.errorMessage = <any>error); } private successLocationCallback(locations: any[]){ // create a new map by passing HTMLElement let element: HTMLElement = document.getElementById('flatMapContainerPhone'); if(window.cordova){ this.mapInstance = this.googleMaps.create(element); // listen to MAP_READY event // You must wait for this event to fire before adding something to the map or modifying it in anyway this.mapInstance.one(GoogleMapsEvent.MAP_READY).then( () => { console.log('Map is ready!'); this.fetchMapCoordinatesAndDisplay(locations); } ); }else{ let me = this; if( !this.isApiLoaded() ){ this.loadApi(); this._apiLoadingPromise.then( () => { me.__onGoogleLoaded(locations); }); }else{ me.__onGoogleLoaded(locations); } } }; private fetchMapCoordinatesAndDisplay(locations){ this.headerService.setGoogleMapInstance(this.mapInstance); // Now you can add elements to the map like the marker this.filterVisitAndOfficeLocations(locations|| []); this.addVisitLocationMarker(); this.addAccentureLocationMarker(); this.zoomToCurrentLocation(); } private _loadGoogleMapAPIScript(){ let script = (<any>document).createElement('script'); script.async = true; script.defer = true; script.src = `https://maps.googleapis.com/maps/api/js?callback=__onGoogleLoaded&key=${this.googleMapDesktopAPIKey}`; script.type = 'text/javascript'; (<any>document).getElementsByTagName('head')[0].appendChild(script); } isApiLoaded(): boolean { return (<any>window).google ? true : false; } loadApi(): void{ if(!this._apiLoadingPromise){ this._apiLoadingPromise = new Promise( (resolve) => { (<any>window)['__onGoogleLoaded'] = (ev) => { console.log('google maps api loaded'); resolve('google maps api loaded'); } this._loadGoogleMapAPIScript(); }); } } public __onGoogleLoaded(locations){ let mapProp = { center: new google.maps.LatLng(51.508742,-0.120850), zoom: this.mapZoomLevel, tilt: 30 }; let element: HTMLElement = document.getElementById('flatMapContainerPhone'); this.mapInstance = new google.maps.Map(element, mapProp); this.fetchMapCoordinatesAndDisplay(locations); } private fetchPositionLatLong(latitude, longtitude){ let position; if(window.cordova){ position = new LatLng(latitude, longtitude); }else{ position = new google.maps.LatLng(latitude, longtitude); } return position; } private filterVisitAndOfficeLocations(clientvisitLocations: any[]): void { let me = this; //visit Location let clientLocationGrouped = _.groupBy(clientvisitLocations[0], function(locationObj: any){ return locationObj.CityName; }); _.forEach(clientLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); me.displayVisitLocation.push(locationObj); }); //Accenture Offices let visitLocationGrouped = _.groupBy(clientvisitLocations[1], function(locationObj: any){ return locationObj.CityName; }); let accentureOffices = []; _.forEach(visitLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); accentureOffices.push(locationObj); }); accentureOffices.map(function(locationObj, key){ let filteredData = _.filter(me.displayVisitLocation, function(o) { return o.CityName === locationObj.CityName; }); if(!filteredData.length){ me.displayAccentureOfficeLocation.push(locationObj); } }); } private
() : void{ let me = this, counter = 0; me.displayVisitLocation.map(function(locationObj, key){ if(!counter){ me.selectedMapCityName = locationObj.CityName + ' '+ moment(locationObj.StartDate).format('Do MMM YYYY'); } // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longtitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: (counter<6) ? me.visitLocationMarkerUrl[counter] : me.visitLocationMarkerUrl[6], detail: locationObj, width: me.visitLocationWidth, height: me.visitLocationHeight }); counter++; }); } private addAccentureLocationMarker(): void{ let me = this; _.forEach(me.displayAccentureOfficeLocation, function(locationObj, key){ // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: me.officeLocationMarkerUrl, width: me.accentureOfficeWidth, height: me.accentureOfficeHeight }); }); }; private zoomToCurrentLocation(){ if(window.cordova){ //for mobile this.mapInstance.getMyLocation().then(res => { console.log('Give it to me' + res.latLng); let position = { target: res.latLng, zoom: this.mapZoomLevel, tilt: 30 }; if(this.mapInstance){ this.mapInstance.animateCamera(position); } }); }else{ //for desktop // Try HTML5 geolocation. let me = this; if (navigator.geolocation) { navigator.geolocation.getCurrentPosition(function(position) { let pos = new google.maps.LatLng(position.coords.latitude, position.coords.longitude); if(me.mapInstance){ me.mapInstance.setCenter(pos); } }, function() { //user denied permission }); } } } private addMapMarker(markerObj: ISingleVisitAddMarkerParams){ let me = this; if(this.mapInstance){ if(window.cordova){ //for mobile // create new marker let markerOptions: MarkerOptions = { position: markerObj.position, title: markerObj.title, icon: { url: markerObj.icon, size: { width: markerObj.width, height
addVisitLocationMarker
identifier_name
main.js
// // arrs[3][7] = 2; // squArr = [[1][6],[1][7],[2][7],[3][7]]; // break; // case 3: // // |-字形 // // arrs[1][6] = 2; // // arrs[2][6] = 2; // // arrs[3][6] = 2; // // arrs[2][7] = 2; // squArr = [[1][6],[2][6],[3][6],[2][7]]; // break; // case 4: // // z字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[2][8] = 2; // squArr = [[1][6],[1][7],[2][7],[2][8]]; // break; // // } switch(index){ case 0: //田字形 //arrs[1][6]=2; //arrs[1][7]=2; //arrs[2][6]=2; //arrs[2][7]=2; squArr = [[1,6],[1,7],[2,6],[2,7]]; Chgcount = 0; break; case 1: //一字型 // for(var i=5;i<=8;i++){ // arrs[1][i]=2; // } squArr = [[1,5],[1,6],[1,7],[1,8]]; Chgcount = 0; break; case 2: //7字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[3][7]=2; squArr = [[1,6],[1,7],[2,7],[3,7]]; Chgcount = 0; break; case 3: // |-型 // arrs[1][6]=2; // arrs[2][6]=2; // arrs[3][6]=2; // arrs[2][7]=2; squArr = [[1,6],[2,6],[3,6],[2,7]]; Chgcount = 0; break; case 4: //z字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[2][8]=2; squArr = [[1,6],[1,7],[2,7],[2,8]]; Chgcount = 0; break; } } //给块赋值的方法 function block() { for (var i = 0; i < squArr.length; i++) { arrs[squArr[i][0]][squArr[i][1]] = 2; } //重绘 draw(); } //隐藏原来的块 function hidden() { //遍历squArr //把原来的块隐藏 for (var i = 0; i < squArr.length; i++) { //原本的块隐藏 // console.log(squArr[i][0]+","+squArr[i][1]) arrs[squArr[i][0]][squArr[i][1]] = 0; } } //移动给对应坐标 function move(direction) { for (var i = 0; i < squArr.length; i++) { if (direction == "down") { target[i] = [squArr[i][0] + 1, squArr[i][1]]; } else if (direction == "left") { target[i] = [squArr[i][0], squArr[i][1] - 1]; } else if (direction == "right") { target[i] = [squAr
移动 能移动则把移动完数组的值赋给squarr if(canMove()){ squArr = target; //绘出来 block(); } } } } //拿到显示分数的盒子 var scoreBox = document.getElementById("score"); //显示的分数值 var score = 0; //消除的方法 function clear(){ //遍历整个数组 for(var i=22;i>=1;i--){ var isClear = true; for(var j=12;j>=1;j--){ if(arrs[i][j] != 2){ isClear = false; break; } } if(isClear){ score += 10; //显示分数 scoreBox.innerText = score; //当分数大于100 难道增加 if(score>=50){ datatime = 200; } for(var k=i;k>1;k--){ for(var j=12;j>=1;j--){ arrs[k][j] = arrs[k-1][j]; draw(); } } i++; } // if(isClear == true){ // for(var j=0;j<arrs[i].length;j++){ // arrs[i][j] = 0; // draw(); // } // // } } //draw(); } //形状变化的方法 //定义一个变量来记录变化的次数 var Chgcount; function change(){ //根据块的坐标发生改变的 squArr来发生改变 target来保存改变完的坐标 //隐藏原来的坐标 hidden(); //改变位置 根据随机数生成的图形来变化 switch(index){ case 0: //田字形 不需要改变 //target = squArr; break; case 1: //一字形 第一个点是不会改变的 根据第一点来变化 //squArr = [[1,5],[1,6],[1,7],[1,8]]; // target[0][0] = squArr[0][0]+0; // target[0][1] = squArr[0][1]-0; // target[1][0] = squArr[1][0]+1; // target[1][1] = squArr[1][1]-1; // target[2][0] = squArr[2][0]+2; // target[2][1] = squArr[2][1]-2; // target[3][0] = squArr[3][0]+3; // target[3][1] = squArr[3][1]-3; if(Chgcount%2){ //当为真的时候 值为1的时候变横条 否则变竖条 //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]; target[i][1] =squArr[0][1]+i; } }else{ //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]+i; target[i][1] =squArr[0][1]; } } break; case 2: //7字形 有四种形态 //squArr = [[1,6],[1,7],[2,7],[3,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]-1; break; case 1: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]+1;
r[i][0], squArr[i][1] + 1]; } else if (direction == "up") { //形状变化 给移动的数组赋变形后的值 change(); //判断是否能
identifier_body
main.js
var j = 0; j < 14; j++) { //当是第一行或者是最后一行或者是第一列或者是最后一列 为墙赋值为1 if (i == 0 || i == 23 || j == 0 || j == 13) { arrs[i][j] = 1; } } } } //先拿到所有的单元格 数组 var tds = document.getElementsByTagName("td"); //控制难度的时间 datatime = 500; //随机生成颜色的方法 function randomColor(){ var r,g,b; r=Math.floor(Math.random()*166+90); g=Math.floor(Math.random()*166+90); b=Math.floor(Math.random()*166+90); return "rgba("+r+","+g+","+b+",1)" } //绘图的方法 function draw() { //先遍历二维数组 for (var i = 0; i < arrs.length; i++) { for (var j = 0; j < arrs[i].length; j++) { //当二维数组里面的值是1 为墙 if (arrs[i][j] == 1) { //设置墙的背景为红色 js tds[i * 14 + j].style.background = "blue"; //jQuery中的$("td")是拿到的第一个td元素 //$("td")[i * 14 + j].css("background", "red"); 不起作用 拿到的不是数组 //当值为0 为空白 设置为白色 } else if (arrs[i][j] == 0) { tds[i * 14 + j].style.background = "none"; //$("td")[i * 14 + j].css("background", "white"); //当值为2 设置为蓝色 } else if (arrs[i][j] == 2) { // tds[i * 14 + j].style.background = randomColor(); tds[i * 14 + j].style.background = "red"; //$("td")[i * 14 + j].css("background", "blue"); } } } } //初始的块 var squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //定义一个二维数组用来保存随机生成的块的坐标 移动的块 var target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //随机数变量 var index; // console.log(squArr) //随机生成图形的方法 需要根据你传过来的值来生成不一样的图形 function randomBlock() { //初始化初值 squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //生成图形 Math.floor向下取整 Math.random生成随机数0-1不包含1 index = Math.floor(Math.random() * 5); //根据随机生成的数进行判断 // switch(index){ // case 0: // // 田字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][6] = 2; // // arrs[2][7] = 2; // squArr = [[1,6],[1,7],[2,6],[2,7]]; // // for (var i = 0; i < squArr.length; i++) { // // arrs[squArr[i][0]][squArr[i][1]] = 2; // // } // break; // case 1: // // 一字形 // // arrs[1][5] = 2; // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[1][8] = 2; // // for (var i = 5; i <= 8; i++) { // // arrs[1][i] = 2; // // } // squArr = [[1][5],[1][6],[1][7],[1][8]]; // break; // case 2: // // T字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[3][7] = 2; // squArr = [[1][6],[1][7],[2][7],[3][7]]; // break; // case 3: // // |-字形 // // arrs[1][6] = 2; // // arrs[2][6] = 2; // // arrs[3][6] = 2; // // arrs[2][7] = 2; // squArr = [[1][6],[2][6],[3][6],[2][7]]; // break; // case 4: // // z字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[2][8] = 2; // squArr = [[1][6],[1][7],[2][7],[2][8]]; // break; // // } switch(index){ case 0: //田字形 //arrs[1][6]=2; //arrs[1][7]=2; //arrs[2][6]=2; //arrs[2][7]=2; squArr = [[1,6],[1,7],[2,6],[2,7]]; Chgcount = 0; break; case 1: //一字型 // for(var i=5;i<=8;i++){ // arrs[1][i]=2; // } squArr = [[1,5],[1,6],[1,7],[1,8]]; Chgcount = 0; break; case 2: //7字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[3][7]=2; squArr = [[1,6],[1,7],[2,7],[3,7]]; Chgcount = 0; break; case 3: // |-型 // arrs[1][6]=2; // arrs[2][6]=2; // arrs[3][6]=2; // arrs[2][7]=2; squArr = [[1,6],[2,6],[3,6],[2,7]]; Chgcount = 0; break; case 4: //z字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[2][8]=2; squArr = [[1,6],[1,7],[2,7],[2,8]]; Chgcount = 0; break; } } //给块赋值的方法 function block() { for (var i = 0; i < squArr.length; i++) { arrs[squArr[i][0]][squArr[i][1]] = 2; } //重绘 draw(); } //隐藏原来的块 function hidden() { //遍历squArr //把原来的块隐藏 for (var i = 0; i < squArr.length; i++) { //原本的块隐藏 // console.log(squArr[i][0]+","+squArr[i][1]) arrs[squArr[i][0]][squArr[i][1]] = 0; } } //移动给对应坐标 function move(direction) { for (var i = 0; i < squArr.length; i++) { if (direction == "down") { target[i] = [squArr[i][0] + 1, squArr[i][1]]; } else if (direction == "left") { target[i] = [squArr[i][0], squArr[i][1] - 1]; } else if (direction == "right") { target[i] = [squArr[i][0], squArr[i][1
or (
identifier_name
main.js
][0]+1; target[3][1] = squArr[2][1]; break; } break; case 3: // |-字形 //squArr = [[1,6],[2,6],[3,6],[2,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]+1; target[3][1] = squArr[1][1]; break; case 1: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]-1; break; case 2: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]-1; target[3][1] = squArr[1][1]; break; case 3: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]+1; break; } break; case 4: // z字形 //squArr = [[1,6],[1,7],[2,7],[2,8]]; switch(Chgcount%2){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; case 1: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; } break; } } //自动往下掉的方法 function autoDown() { //把原本的坐标还原 target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; hidden(); move("down"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } else { //多画一次 block(); clear(); //判断是否gameover var isGameOver = false; for(var i=0;i<target.length;i++){ if(target[i][0] == 1 || target[i][0] == 2){ isGameOver = true; break; } } if(isGameOver){ alert("Game over"); if(confirm("是否继续游戏?")){ isBegin = false; isGameOver = false; //清除定时器 clearInterval(timer); //初始化 init(); //随机生成块 randomBlock(); //给块的坐标位置赋值 // console.log(squArr) block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); // isBegin = true; // } else { // alert("当前游戏已经开始"); // } } } randomBlock(); } //赋值 block(); //遍历二维数组 // for (var i = 0; i < arrs.length; i++) { // for (var j = 0; j < arrs[i].length; j++) { // } // } } function canMove() { var isCanMove = false; //先判断底下是否有东西 遍历移动的坐标 查看移动的坐标上是否值为1或2 for (var i = 0; i < 4; i++) { if (arrs[target[i][0]][target[i][1]] != 1 && arrs[target[i][0]][target[i][1]] != 2) { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; isCanMove = true; } else { return false; } // if (tds[target[i][0] * 14 + target[i][1]].style.background != "blue" && tds.style.background != "red") { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; } return isCanMove; } //调用绘图方法 draw(); //当点击开始的时候执行 $("#action").click(function() { if (isBegin == false) { console.log(arrs) //随机生成块 randomBlock(); //给块的坐标位置赋值 block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } }) var count = 0; //暂停按钮 $("#pause").click(function() { // clearInterval(timer); // hidden(); // isBegin = false; if (count%2 == 0) { $("#pause").text("继续"); clearInterval(timer); } else { timer = setInterval("autoDown()",datatime); $("#pause").text("暂停"); } count++; }) //是否开始游戏 var isBegin = false; //定时器设置 var timer; //由键盘控制移动和变化 //键盘按下事件 window.onkeydown = function(event) { //拿到对应的键盘对象 event = window.event || event; target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //拿到键盘上是哪个键 //得到键盘的ASCII码 event.keyCode //alert(event.keyCode); switch (event.keyCode) { //回车的时候开始游戏 case 13: if (isBegin == false) { //随机生成块 randomBlock(); //给块的坐标位置赋值 console.log(squArr)
block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } break; //按左箭头的时候 case 37: conso
conditional_block
main.js
] = squArr[2][1]-2; // target[3][0] = squArr[3][0]+3; // target[3][1] = squArr[3][1]-3; if(Chgcount%2){ //当为真的时候 值为1的时候变横条 否则变竖条 //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]; target[i][1] =squArr[0][1]+i; } }else{ //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]+i; target[i][1] =squArr[0][1]; } } break; case 2: //7字形 有四种形态 //squArr = [[1,6],[1,7],[2,7],[3,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]-1; break; case 1: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]+1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]-1; target[3][1] = squArr[2][1]; break; case 2: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]-1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; case 3: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; } break; case 3: // |-字形 //squArr = [[1,6],[2,6],[3,6],[2,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]+1; target[3][1] = squArr[1][1]; break; case 1: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]-1; break; case 2: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]-1; target[3][1] = squArr[1][1]; break; case 3: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]+1; break; } break; case 4: // z字形 //squArr = [[1,6],[1,7],[2,7],[2,8]]; switch(Chgcount%2){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; case 1: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; } break; } } //自动往下掉的方法 function autoDown() { //把原本的坐标还原 target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; hidden(); move("down"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } else { //多画一次 block(); clear(); //判断是否gameover var isGameOver = false; for(var i=0;i<target.length;i++){ if(target[i][0] == 1 || target[i][0] == 2){ isGameOver = true; break; } } if(isGameOver){ alert("Game over"); if(confirm("是否继续游戏?")){ isBegin = false; isGameOver = false; //清除定时器 clearInterval(timer); //初始化 init(); //随机生成块 randomBlock(); //给块的坐标位置赋值 // console.log(squArr)
block(); //重新绘图 draw();
random_line_split
model.go
Ind *Ind `xml:"w:ind,omitempty"` Jc *Jc `xml:"w:jc,omitempty"` RPr *RPr `xml:"w:rPr,omitempty"` } //Ind 首行:缩进、行高 type Ind struct { XMLName xml.Name `xml:"w:ind"` FirstLineChars int64 `xml:"w:firstLineChars,attr"` //首行缩进字符数,100是一个字符 LeftChars int64 `xml:"w:leftChars,attr"` //左缩进字符数,100是一个字符 RightChars int64 `xml:"w:rightChars,attr"` //右缩进字符数,100是一个字符 // FirstLine int64 `xml:"w:firstLine,attr"` } // <w:ind w:firstLineChars="200" w:firstLine="420" /> //R 块 type R struct { XMLName xml.Name `xml:"w:r"` RPr *RPr `xml:"w:rPr,omitempty"` //块属性 T *T `xml:"w:t,omitempty"` //文字 Drawing *Drawing `xml:"w:drawing,omitempty"` //图片 } //Rfunc run sect func (r *R) Rfunc() { } //SnapToGrid 对齐网格 type SnapToGrid struct { XMLName xml.Name `xml:"w:snapToGrid"` Val string `xml:"w:val,attr,omitempty"` } //RPr 文字块属性 type RPr struct { XMLName xm
e `xml:"w:rPr"` RFonts *RFonts `xml:"w:rFonts,omitempty"` B *Bold `xml:"w:b,omitempty"` // BCs string `xml:"w:bCs,omitempty"` Color *Color `xml:"w:color"` Sz *Sz `xml:"w:sz"` SzCs *SzCs `xml:"w:szCs"` } //Bold 加粗 type Bold struct { XMLName xml.Name `xml:"w:b"` Val string `xml:"w:val,attr,omitempty"` } //RFonts <w:rFonts w:ascii="微软雅黑" w:eastAsia="微软雅黑" w:hAnsi="微软雅黑"/> type RFonts struct { XMLName xml.Name `xml:"w:rFonts"` ASCII string `xml:"w:ascii,attr,omitempty"` EastAsia string `xml:"w:eastAsia,attr,omitempty"` HAnsi string `xml:"w:hAnsi,attr,omitempty"` } //Color <w:color w:val="3A3838"/> type Color struct { XMLName xml.Name `xml:"w:color"` Val string `xml:"w:val,attr,omitempty"` } //Sz 字号大小,如:14号字为28,Word上的字号乘以2 <w:sz w:val="56"/> type Sz struct { XMLName xml.Name `xml:"w:sz"` Val int64 `xml:"w:val,attr,omitempty"` } //SzCs 未知 <w:szCs w:val="56"/> type SzCs struct { XMLName xml.Name `xml:"w:szCs"` Val int64 `xml:"w:val,attr,omitempty"` } // Spacing 行间距 // <w:spacing w:before="360" w:after="120" w:line="480" w:lineRule="auto" w:beforeAutospacing="0" w:afterAutospacing="0"/> // http://officeopenxml.com/WPspacing.php // Values are in twentieths of a point. A normal single-spaced paragaph has a w:line value of 240, or 12 points. // To specify units in hundreths of a line, use attributes 'afterLines'/'beforeLines'. // The space between adjacent paragraphs will be the greater of the 'line' spacing of each paragraph, the spacing // after the first paragraph, and the spacing before the second paragraph. So if the first paragraph specifies 240 // after and the second 80 before, and they are both single-spaced ('line' value of 240), then the space between // the paragraphs will be 240. // Specifies how the spacing between lines as specified in the line attribute is calculated. // Note: If the value of the lineRule attribute is atLeast or exactly, then the value of the line attribute is interpreted as 240th of a point. If the value of lineRule is auto, then the value of line is interpreted as 240th of a line. type Spacing struct { XMLName xml.Name `xml:"w:spacing"` Before int64 `xml:"w:before,attr,omitempty"` After int64 `xml:"w:after,attr,omitempty"` Line int64 `xml:"w:line,attr,omitempty"` LineRule LineRule `xml:"w:lineRule,attr,omitempty"` BeforeAutospacing int64 `xml:"w:beforeAutospacing"` AfterAutospacing int64 `xml:"w:afterAutospacing"` } //Jc 对齐方式 <w:jc w:val="left"/> type Jc struct { XMLName xml.Name `xml:"w:jc"` Val string `xml:"w:val,attr,omitempty"` } //T 文本 type T struct { XMLName xml.Name `xml:"w:t"` Space string `xml:"xml:space,attr,omitempty"` //"preserve" // Space string `xml:"w:space,attr,omitempty"` Text string `xml:",chardata"` } //Drawing 绘图 type Drawing struct { XMLName xml.Name `xml:"w:drawing"` Inline *Inline `xml:"wp:inline,omitempty"` //插入图片 Anchor *Anchor `xml:"wp:anchor,omitempty"` //插入形状 } //Inline 绘图边框 type Inline struct { XMLName xml.Name `xml:"wp:inline"` DistT int64 `xml:"distT,attr"` DistB int64 `xml:"distB,attr"` DistL int64 `xml:"distL,attr"` DistR int64 `xml:"distR,attr"` Extent *Extent `xml:"wp:extent"` EffectExtent *EffectExtent `xml:"wp:effectExtent"` DocPr *DocPr `xml:"wp:docPr"` CNvGraphicFramePr *CNvGraphicFramePr `xml:"wp:cNvGraphicFramePr"` Graphic *Graphic `xml:"a:graphic"` } //Extent 绘图范围 type Extent struct { XMLName xml.Name `xml:"wp:extent"` CX int64 `xml:"cx,attr"` CY int64 `xml:"cy,attr"` } //EffectExtent 绘图有效范围 type EffectExtent struct { XMLName xml.Name `xml:"wp:effectExtent"` L int64 `xml:"l,attr"` //左边距 T int64 `xml:"t,attr"` //上边距 R int64 `xml:"r,attr"` //右边距 B int64 `xml:"b,attr"` //下边距 } //WrapNone 不断行 type WrapNone struct { XMLName xml.Name `xml:"wp:wrapNone"` } //DocPr 文档属性,唯一就行,好像没鸟用 type DocPr struct { XMLName xml.Name `xml:"wp:docPr"` ID int64 `xml:"id,attr"` Name string `xml:"name,attr"` } //CNvGraphicFramePr 图形框架属性 type CNvGraphicFramePr struct { XMLName xml.Name `xml:"wp:cNvGraphicFramePr"` GraphicFrameLocks *GraphicFrameLocks `xml:"a:graphicFrameLocks"` } //GraphicFrameLocks 图形框架锁 type GraphicFrameLocks struct { XMLName xml.Name `xml:"a:graphicFrameLocks"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" NoChangeAspect int64 `xml:"noChangeAspect,attr"` } //Graphic 图形 type Graphic struct { XMLName xml.Name `xml:"a:graphic"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" GraphicData *GraphicData `xml:"a:graphicData"` } //GraphicData 图形数据 type GraphicData struct { XMLName xml.Name `xml:"a:graphicData"` //uri = "http://schemas.openxmlformats.org/drawingml/2006/picture" 插入图片 //uri = "http://schemas.microsoft.com/office/word/2010/wordprocessingShape" 插入形状 URI string `xml:"uri,
l.Nam
identifier_name
model.go
Ind *Ind `xml:"w:ind,omitempty"` Jc *Jc `xml:"w:jc,omitempty"` RPr *RPr `xml:"w:rPr,omitempty"` } //Ind 首行:缩进、行高 type Ind struct { XMLName xml.Name `xml:"w:ind"` FirstLineChars int64 `xml:"w:firstLineChars,attr"` //首行缩进字符数,100是一个字符 LeftChars int64 `xml:"w:leftChars,attr"` //左缩进字符数,100是一个字符 RightChars int64 `xml:"w:rightChars,attr"` //右缩进字符数,100是一个字符 // FirstLine int64 `xml:"w:firstLine,attr"` } // <w:ind w:firstLineChars="200" w:firstLine="420" /> //R 块 type R struct { XMLName xml.Name `xml:"w:r"` RPr *RPr `xml:"w:rPr,omitempty"` //块属性 T *T `xml:"w:t,omitempty"` //文字 Drawing *Drawing `xml:"w:drawing,omitempty"` //图片 } //Rfunc run sect func (r *R) Rfunc() { } //SnapToGrid 对齐网格 type SnapToGrid struct { XMLName xml.Name `xml:"w:snapToGrid"` Val string `xml:"w:val,attr,omitempty"` } //RPr 文字块属性 type RPr struct { XMLName xml.Name `
:"w:rPr"` RFonts *RFonts `xml:"w:rFonts,omitempty"` B *Bold `xml:"w:b,omitempty"` // BCs string `xml:"w:bCs,omitempty"` Color *Color `xml:"w:color"` Sz *Sz `xml:"w:sz"` SzCs *SzCs `xml:"w:szCs"` } //Bold 加粗 type Bold struct { XMLName xml.Name `xml:"w:b"` Val string `xml:"w:val,attr,omitempty"` } //RFonts <w:rFonts w:ascii="微软雅黑" w:eastAsia="微软雅黑" w:hAnsi="微软雅黑"/> type RFonts struct { XMLName xml.Name `xml:"w:rFonts"` ASCII string `xml:"w:ascii,attr,omitempty"` EastAsia string `xml:"w:eastAsia,attr,omitempty"` HAnsi string `xml:"w:hAnsi,attr,omitempty"` } //Color <w:color w:val="3A3838"/> type Color struct { XMLName xml.Name `xml:"w:color"` Val string `xml:"w:val,attr,omitempty"` } //Sz 字号大小,如:14号字为28,Word上的字号乘以2 <w:sz w:val="56"/> type Sz struct { XMLName xml.Name `xml:"w:sz"` Val int64 `xml:"w:val,attr,omitempty"` } //SzCs 未知 <w:szCs w:val="56"/> type SzCs struct { XMLName xml.Name `xml:"w:szCs"` Val int64 `xml:"w:val,attr,omitempty"` } // Spacing 行间距 // <w:spacing w:before="360" w:after="120" w:line="480" w:lineRule="auto" w:beforeAutospacing="0" w:afterAutospacing="0"/> // http://officeopenxml.com/WPspacing.php // Values are in twentieths of a point. A normal single-spaced paragaph has a w:line value of 240, or 12 points. // To specify units in hundreths of a line, use attributes 'afterLines'/'beforeLines'. // The space between adjacent paragraphs will be the greater of the 'line' spacing of each paragraph, the spacing // after the first paragraph, and the spacing before the second paragraph. So if the first paragraph specifies 240 // after and the second 80 before, and they are both single-spaced ('line' value of 240), then the space between // the paragraphs will be 240. // Specifies how the spacing between lines as specified in the line attribute is calculated. // Note: If the value of the lineRule attribute is atLeast or exactly, then the value of the line attribute is interpreted as 240th of a point. If the value of lineRule is auto, then the value of line is interpreted as 240th of a line. type Spacing struct { XMLName xml.Name `xml:"w:spacing"` Before int64 `xml:"w:before,attr,omitempty"` After int64 `xml:"w:after,attr,omitempty"` Line int64 `xml:"w:line,attr,omitempty"` LineRule LineRule `xml:"w:lineRule,attr,omitempty"` BeforeAutospacing int64 `xml:"w:beforeAutospacing"` AfterAutospacing int64 `xml:"w:afterAutospacing"` } //Jc 对齐方式 <w:jc w:val="left"/> type Jc struct { XMLName xml.Name `xml:"w:jc"` Val string `xml:"w:val,attr,omitempty"` } //T 文本 type T struct { XMLName xml.Name `xml:"w:t"` Space string `xml:"xml:space,attr,omitempty"` //"preserve" // Space string `xml:"w:space,attr,omitempty"` Text string `xml:",chardata"` } //Drawing 绘图 type Drawing struct { XMLName xml.Name `xml:"w:drawing"` Inline *Inline `xml:"wp:inline,omitempty"` //插入图片 Anchor *Anchor `xml:"wp:anchor,omitempty"` //插入形状 } //Inline 绘图边框 type Inline struct { XMLName xml.Name `xml:"wp:inline"` DistT int64 `xml:"distT,attr"` DistB int64 `xml:"distB,attr"` DistL int64 `xml:"distL,attr"` DistR int64 `xml:"distR,attr"` Extent *Extent `xml:"wp:extent"` EffectExtent *EffectExtent `xml:"wp:effectExtent"` DocPr *DocPr `xml:"wp:docPr"` CNvGraphicFramePr *CNvGraphicFramePr `xml:"wp:cNvGraphicFramePr"` Graphic *Graphic `xml:"a:graphic"` } //Extent 绘图范围 type Extent struct { XMLName xml.Name `xml:"wp:extent"` CX int64 `xml:"cx,attr"` CY int64 `xml:"cy,attr"` } //EffectExtent 绘图有效范围 type EffectExtent struct { XMLName xml.Name `xml:"wp:effectExtent"` L int64 `xml:"l,attr"` //左边距 T int64 `xml:"t,attr"` //上边距 R int64 `xml:"r,attr"` //右边距 B int64 `xml:"b,attr"` //下边距 } //WrapNone 不断行 type WrapNone struct { XMLName xml.Name `xml:"wp:wrapNone"` } //DocPr 文档属性,唯一就行,好像没鸟用 type DocPr struct { XMLName xml.Name `xml:"wp:docPr"` ID int64 `xml:"id,attr"` Name string `xml:"name,attr"` } //CNvGraphicFramePr 图形框架属性 type CNvGraphicFramePr struct { XMLName xml.Name `xml:"wp:cNvGraphicFramePr"` GraphicFrameLocks *GraphicFrameLocks `xml:"a:graphicFrameLocks"` } //GraphicFrameLocks 图形框架锁 type GraphicFrameLocks struct { XMLName xml.Name `xml:"a:graphicFrameLocks"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" NoChangeAspect int64 `xml:"noChangeAspect,attr"` } //Graphic 图形 type Graphic struct { XMLName xml.Name `xml:"a:graphic"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" GraphicData *GraphicData `xml:"a:graphicData"` } //GraphicData 图形数据 type GraphicData struct { XMLName xml.Name `xml:"a:graphicData"` //uri = "http://schemas.openxmlformats.org/drawingml/2006/picture" 插入图片 //uri = "http://schemas.microsoft.com/office/word/2010/wordprocessingShape" 插入形状 URI string `xml:"uri,attr"`
xml
identifier_body
model.go
Ind *Ind `xml:"w:ind,omitempty"` Jc *Jc `xml:"w:jc,omitempty"` RPr *RPr `xml:"w:rPr,omitempty"` } //Ind 首行:缩进、行高 type Ind struct { XMLName xml.Name `xml:"w:ind"` FirstLineChars int64 `xml:"w:firstLineChars,attr"` //首行缩进字符数,100是一个字符 LeftChars int64 `xml:"w:leftChars,attr"` //左缩进字符数,100是一个字符 RightChars int64 `xml:"w:rightChars,attr"` //右缩进字符数,100是一个字符 // FirstLine int64 `xml:"w:firstLine,attr"` } // <w:ind w:firstLineChars="200" w:firstLine="420" /> //R 块 type R struct { XMLName xml.Name `xml:"w:r"` RPr *RPr `xml:"w:rPr,omitempty"` //块属性 T *T `xml:"w:t,omitempty"` //文字 Drawing *Drawing `xml:"w:drawing,omitempty"` //图片 } //Rfunc run sect func (r *R) Rfunc() { } //SnapToGrid 对齐网格 type SnapToGrid struct { XMLName xml.Name `xml:"w:snapToGrid"` Val string `xml:"w:val,attr,omitempty"` } //RPr 文字块属性 type RPr struct { XMLName xml.Name `xml:"w:rPr"` RFonts *RFonts `xml:"w:rFonts,omitempty"` B *Bold `xml:"w:b,omitempty"` // BCs string `xml:"w:bCs,omitempty"` Color *Color `xml:"w:color"` Sz *Sz `xml:"w:sz"` SzCs *SzCs `xml:"w:szCs"` } //Bold 加粗 type Bold struct { XMLName xml.Name `xml:"w:b"` Val string `xml:"w:val,attr,omitempty"` } //RFonts <w:rFonts w:ascii="微软雅黑" w:eastAsia="微软雅黑" w:hAnsi="微软雅黑"/> type RFonts struct { XMLName xml.Name `xml:"w:rFonts"` ASCII string `xml:"w:ascii,attr,omitempty"` EastAsia string `xml:"w:eastAsia,attr,omitempty"` HAnsi string `xml:"w:hAnsi,attr,omitempty"` } //Color <w:color w:val="3A3838"/> type Color struct { XMLName xml.Name `xml:"w:color"` Val string `xml:"w:val,attr,omitempty"` } //Sz 字号大小,如:14号字为28,Word上的字号乘以2 <w:sz w:val="56"/> type Sz struct { XMLName xml.Name `xml:"w:sz"` Val int64 `xml:"w:val,attr,omitempty"` } //SzCs 未知 <w:szCs w:val="56"/> type SzCs struct { XMLName xml.Name `xml:"w:szCs"` Val int64 `xml:"w:val,attr,omitempty"` } // Spacing 行间距 // <w:spacing w:before="360" w:after="120" w:line="480" w:lineRule="auto" w:beforeAutospacing="0" w:afterAutospacing="0"/> // http://officeopenxml.com/WPspacing.php // Values are in twentieths of a point. A normal single-spaced paragaph has a w:line value of 240, or 12 points. // To specify units in hundreths of a line, use attributes 'afterLines'/'beforeLines'. // The space between adjacent paragraphs will be the greater of the 'line' spacing of each paragraph, the spacing // after the first paragraph, and the spacing before the second paragraph. So if the first paragraph specifies 240 // after and the second 80 before, and they are both single-spaced ('line' value of 240), then the space between // the paragraphs will be 240. // Specifies how the spacing between lines as specified in the line attribute is calculated. // Note: If the value of the lineRule attribute is atLeast or exactly, then the value of the line attribute is interpreted as 240th of a point. If the value of lineRule is auto, then the value of line is interpreted as 240th of a line. type Spacing struct { XMLName xml.Name `xml:"w:spacing"` Before int64 `xml:"w:before,attr,omitempty"` After int64 `xml:"w:after,attr,omitempty"` Line int64 `xml:"w:line,attr,omitempty"` LineRule LineRule `xml:"w:lineRule,attr,omitempty"` BeforeAutospacing int64 `xml:"w:beforeAutospacing"` AfterAutospacing int64 `xml:"w:afterAutospacing"` }
XMLName xml.Name `xml:"w:jc"` Val string `xml:"w:val,attr,omitempty"` } //T 文本 type T struct { XMLName xml.Name `xml:"w:t"` Space string `xml:"xml:space,attr,omitempty"` //"preserve" // Space string `xml:"w:space,attr,omitempty"` Text string `xml:",chardata"` } //Drawing 绘图 type Drawing struct { XMLName xml.Name `xml:"w:drawing"` Inline *Inline `xml:"wp:inline,omitempty"` //插入图片 Anchor *Anchor `xml:"wp:anchor,omitempty"` //插入形状 } //Inline 绘图边框 type Inline struct { XMLName xml.Name `xml:"wp:inline"` DistT int64 `xml:"distT,attr"` DistB int64 `xml:"distB,attr"` DistL int64 `xml:"distL,attr"` DistR int64 `xml:"distR,attr"` Extent *Extent `xml:"wp:extent"` EffectExtent *EffectExtent `xml:"wp:effectExtent"` DocPr *DocPr `xml:"wp:docPr"` CNvGraphicFramePr *CNvGraphicFramePr `xml:"wp:cNvGraphicFramePr"` Graphic *Graphic `xml:"a:graphic"` } //Extent 绘图范围 type Extent struct { XMLName xml.Name `xml:"wp:extent"` CX int64 `xml:"cx,attr"` CY int64 `xml:"cy,attr"` } //EffectExtent 绘图有效范围 type EffectExtent struct { XMLName xml.Name `xml:"wp:effectExtent"` L int64 `xml:"l,attr"` //左边距 T int64 `xml:"t,attr"` //上边距 R int64 `xml:"r,attr"` //右边距 B int64 `xml:"b,attr"` //下边距 } //WrapNone 不断行 type WrapNone struct { XMLName xml.Name `xml:"wp:wrapNone"` } //DocPr 文档属性,唯一就行,好像没鸟用 type DocPr struct { XMLName xml.Name `xml:"wp:docPr"` ID int64 `xml:"id,attr"` Name string `xml:"name,attr"` } //CNvGraphicFramePr 图形框架属性 type CNvGraphicFramePr struct { XMLName xml.Name `xml:"wp:cNvGraphicFramePr"` GraphicFrameLocks *GraphicFrameLocks `xml:"a:graphicFrameLocks"` } //GraphicFrameLocks 图形框架锁 type GraphicFrameLocks struct { XMLName xml.Name `xml:"a:graphicFrameLocks"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" NoChangeAspect int64 `xml:"noChangeAspect,attr"` } //Graphic 图形 type Graphic struct { XMLName xml.Name `xml:"a:graphic"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" GraphicData *GraphicData `xml:"a:graphicData"` } //GraphicData 图形数据 type GraphicData struct { XMLName xml.Name `xml:"a:graphicData"` //uri = "http://schemas.openxmlformats.org/drawingml/2006/picture" 插入图片 //uri = "http://schemas.microsoft.com/office/word/2010/wordprocessingShape" 插入形状 URI string `xml:"uri,attr"`
//Jc 对齐方式 <w:jc w:val="left"/> type Jc struct {
random_line_split
minesweeper.py
'X', 'Y', 'Z'] def __init__(self, height, width, mines): """initializes the Minesweeper instance with a width, height, and the number of mines. Sets up a default game table, generates random mine locations and updates another table for the solution.""" self.x = int(width) self.y = int(height) self.table_state = [ ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)] self.mine_locations = self.generate_mines(int(mines)) self.final_table = self.generate_answer() @staticmethod def print_table(table, exploded_at=[-1, -1]): """prints the table, regardless whether it's a game state table or the answer table.""" # color codes just to look pretty NORMAL = '\33[10m' BLUE_START = '\33[104m' RED_START = '\33[31m' PURPLE_START = '\33[35m' GREEN_START = '\33[92m' ORANGE_START = '\33[93m' END = '\033[0m' s = ' %s' % BLUE_START # print number headers along x-axis for i in range(0, width): s += " %s" % i if i < 10: s += " " * 2 else: s += " " s += "%s\n" % END # print letters for y-axis, + the relevant values in each coordinate # depending on table. for y in range(0, height): s += "%s %s %s \t" % (BLUE_START, Minesweeper.letters[y], END) for x in range(0, width): value = table[y][x] if value == "0": s += "%s%s%s" % (NORMAL, value, END) elif value == "1": s += "%s%s%s" % (GREEN_START, value, END) elif value == "2": s += "%s%s%s" % (ORANGE_START, value, END) elif value == "3": s += "%s%s%s" % (RED_START, value, END) elif value == "4" or value == "5" or value == "6" or value == "7" or value == "8": s += "%s%s%s" % (PURPLE_START, value, END) # special elif value == "-": s += "%s%s%s" % (NORMAL, value, END) elif value == Minesweeper.BOMB: if y == exploded_at[0] and x == exploded_at[1]: # Make the bomb at the casualty site explode! s += "%s%s%s" % (RED_START, Minesweeper.EXPLOSION, END) else: # show normal bomb s += "%s%s%s" % (RED_START, value, END) elif value == Minesweeper.FLAG: s += "%s%s%s" % (RED_START, value, END) s += " " * 3 s += "\n" # use tabbing to space them nicely print s.expandtabs(3) def generate_mines(self, number): """generate a list of viable coordinates for mines, and randomly choose them.""" mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates.
return mine_locations def generate_answer(self): ft = deepcopy(self.table_state) for x in range(0, self.x): for y in range(0, self.y): # get the number or mine with neighbours ft[y][x] = self.get_neighbour(y, x) return ft def get_neighbour(self, y, x): """populate answer table with numbers and mines""" if [y, x] in self.mine_locations: return Minesweeper.BOMB count = 0 # (x-1, y-1), (x, y-1), (x+1, y-1), # (x-1, y), (x, y), (x+1, y), # (x-1, y+1), (x, y+1), (x+1, y+1) for xe in range(x - 1, x + 2): for ye in range(y - 1, y + 2): if [ye, xe] in self.mine_locations: count += 1 return str(count) def flags_nearby(self, y, x): """ gets number of flags nearby """ count = 0 l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: continue if self.table_state[ye][xe] == Minesweeper.FLAG: count += 1 return str(count) def special_open_neighbours(self, y, x): """Open neighbours if the flag number matches the count.""" if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB def open_neighbours(self, y, x): """Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.""" if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe) def check_status(self): count = 0 flag_count = 0 for i in [item for sublist in self.table_state for item in sublist]: if i == '-': count += 1 if i == Minesweeper.FLAG: count += 1 flag_count += 1 print "%d tiles remaining. (%d flagged)" % (count - flag_count, flag_count) return count == len(self.mine_locations) def flag(self, y, x): """set a flag to the desired coordinates.""" if self.table_state[y][x] == '-': self.table_state[y][x] = Minesweeper.FLAG Minesweeper.print_table(self.table_state) def tease_user(self, y, x): """come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.""" self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state) def show_answer_board(self, coords): """prints the answer table with print_table.""" Minesweeper.print_table(self.final_table, coords) def open_tile(self, y, x): """opens a tile at the respective coordinates on the table_state list.""" # Find the letter index and convert into a y-coordinate. # Checks if it is a mine if [y, x] in self.mine_locations: # explode self.show_answer_board([y, x])
choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1
conditional_block
minesweeper.py
'X', 'Y', 'Z'] def __init__(self, height, width, mines): """initializes the Minesweeper instance with a width, height, and the number of mines. Sets up a default game table, generates random mine locations and updates another table for the solution.""" self.x = int(width) self.y = int(height) self.table_state = [ ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)] self.mine_locations = self.generate_mines(int(mines)) self.final_table = self.generate_answer() @staticmethod def print_table(table, exploded_at=[-1, -1]): """prints the table, regardless whether it's a game state table or the answer table.""" # color codes just to look pretty NORMAL = '\33[10m' BLUE_START = '\33[104m' RED_START = '\33[31m' PURPLE_START = '\33[35m' GREEN_START = '\33[92m' ORANGE_START = '\33[93m' END = '\033[0m' s = ' %s' % BLUE_START # print number headers along x-axis for i in range(0, width): s += " %s" % i if i < 10: s += " " * 2 else: s += " " s += "%s\n" % END # print letters for y-axis, + the relevant values in each coordinate # depending on table. for y in range(0, height): s += "%s %s %s \t" % (BLUE_START, Minesweeper.letters[y], END) for x in range(0, width): value = table[y][x] if value == "0": s += "%s%s%s" % (NORMAL, value, END) elif value == "1": s += "%s%s%s" % (GREEN_START, value, END) elif value == "2": s += "%s%s%s" % (ORANGE_START, value, END) elif value == "3": s += "%s%s%s" % (RED_START, value, END) elif value == "4" or value == "5" or value == "6" or value == "7" or value == "8": s += "%s%s%s" % (PURPLE_START, value, END) # special elif value == "-": s += "%s%s%s" % (NORMAL, value, END) elif value == Minesweeper.BOMB: if y == exploded_at[0] and x == exploded_at[1]: # Make the bomb at the casualty site explode! s += "%s%s%s" % (RED_START, Minesweeper.EXPLOSION, END) else: # show normal bomb s += "%s%s%s" % (RED_START, value, END) elif value == Minesweeper.FLAG: s += "%s%s%s" % (RED_START, value, END) s += " " * 3 s += "\n" # use tabbing to space them nicely print s.expandtabs(3) def generate_mines(self, number): """generate a list of viable coordinates for mines, and randomly choose them.""" mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates. choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1 return mine_locations def generate_answer(self): ft = deepcopy(self.table_state) for x in range(0, self.x): for y in range(0, self.y): # get the number or mine with neighbours ft[y][x] = self.get_neighbour(y, x) return ft def get_neighbour(self, y, x): """populate answer table with numbers and mines""" if [y, x] in self.mine_locations: return Minesweeper.BOMB count = 0 # (x-1, y-1), (x, y-1), (x+1, y-1), # (x-1, y), (x, y), (x+1, y), # (x-1, y+1), (x, y+1), (x+1, y+1) for xe in range(x - 1, x + 2): for ye in range(y - 1, y + 2): if [ye, xe] in self.mine_locations: count += 1 return str(count) def flags_nearby(self, y, x): """ gets number of flags nearby """ count = 0 l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: continue if self.table_state[ye][xe] == Minesweeper.FLAG: count += 1 return str(count) def special_open_neighbours(self, y, x): """Open neighbours if the flag number matches the count.""" if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range(
if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB def open_neighbours(self, y, x): """Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.""" if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe) def check_status(self): count = 0 flag_count = 0 for i in [item for sublist in self.table_state for item in sublist]: if i == '-': count += 1 if i == Minesweeper.FLAG: count += 1 flag_count += 1 print "%d tiles remaining. (%d flagged)" % (count - flag_count, flag_count) return count == len(self.mine_locations) def flag(self, y, x): """set a flag to the desired coordinates.""" if self.table_state[y][x] == '-': self.table_state[y][x] = Minesweeper.FLAG Minesweeper.print_table(self.table_state) def tease_user(self, y, x): """come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.""" self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state) def show_answer_board(self, coords): """prints the answer table with print_table.""" Minesweeper.print_table(self.final_table, coords) def open_tile(self, y, x): """opens a tile at the respective coordinates on the table_state list.""" # Find the letter index and convert into a y-coordinate. # Checks if it is a mine if [y, x] in self.mine_locations: # explode self.show_answer_board([y, x])
x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged
random_line_split
minesweeper.py
'X', 'Y', 'Z'] def
(self, height, width, mines): """initializes the Minesweeper instance with a width, height, and the number of mines. Sets up a default game table, generates random mine locations and updates another table for the solution.""" self.x = int(width) self.y = int(height) self.table_state = [ ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)] self.mine_locations = self.generate_mines(int(mines)) self.final_table = self.generate_answer() @staticmethod def print_table(table, exploded_at=[-1, -1]): """prints the table, regardless whether it's a game state table or the answer table.""" # color codes just to look pretty NORMAL = '\33[10m' BLUE_START = '\33[104m' RED_START = '\33[31m' PURPLE_START = '\33[35m' GREEN_START = '\33[92m' ORANGE_START = '\33[93m' END = '\033[0m' s = ' %s' % BLUE_START # print number headers along x-axis for i in range(0, width): s += " %s" % i if i < 10: s += " " * 2 else: s += " " s += "%s\n" % END # print letters for y-axis, + the relevant values in each coordinate # depending on table. for y in range(0, height): s += "%s %s %s \t" % (BLUE_START, Minesweeper.letters[y], END) for x in range(0, width): value = table[y][x] if value == "0": s += "%s%s%s" % (NORMAL, value, END) elif value == "1": s += "%s%s%s" % (GREEN_START, value, END) elif value == "2": s += "%s%s%s" % (ORANGE_START, value, END) elif value == "3": s += "%s%s%s" % (RED_START, value, END) elif value == "4" or value == "5" or value == "6" or value == "7" or value == "8": s += "%s%s%s" % (PURPLE_START, value, END) # special elif value == "-": s += "%s%s%s" % (NORMAL, value, END) elif value == Minesweeper.BOMB: if y == exploded_at[0] and x == exploded_at[1]: # Make the bomb at the casualty site explode! s += "%s%s%s" % (RED_START, Minesweeper.EXPLOSION, END) else: # show normal bomb s += "%s%s%s" % (RED_START, value, END) elif value == Minesweeper.FLAG: s += "%s%s%s" % (RED_START, value, END) s += " " * 3 s += "\n" # use tabbing to space them nicely print s.expandtabs(3) def generate_mines(self, number): """generate a list of viable coordinates for mines, and randomly choose them.""" mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates. choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1 return mine_locations def generate_answer(self): ft = deepcopy(self.table_state) for x in range(0, self.x): for y in range(0, self.y): # get the number or mine with neighbours ft[y][x] = self.get_neighbour(y, x) return ft def get_neighbour(self, y, x): """populate answer table with numbers and mines""" if [y, x] in self.mine_locations: return Minesweeper.BOMB count = 0 # (x-1, y-1), (x, y-1), (x+1, y-1), # (x-1, y), (x, y), (x+1, y), # (x-1, y+1), (x, y+1), (x+1, y+1) for xe in range(x - 1, x + 2): for ye in range(y - 1, y + 2): if [ye, xe] in self.mine_locations: count += 1 return str(count) def flags_nearby(self, y, x): """ gets number of flags nearby """ count = 0 l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: continue if self.table_state[ye][xe] == Minesweeper.FLAG: count += 1 return str(count) def special_open_neighbours(self, y, x): """Open neighbours if the flag number matches the count.""" if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB def open_neighbours(self, y, x): """Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.""" if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe) def check_status(self): count = 0 flag_count = 0 for i in [item for sublist in self.table_state for item in sublist]: if i == '-': count += 1 if i == Minesweeper.FLAG: count += 1 flag_count += 1 print "%d tiles remaining. (%d flagged)" % (count - flag_count, flag_count) return count == len(self.mine_locations) def flag(self, y, x): """set a flag to the desired coordinates.""" if self.table_state[y][x] == '-': self.table_state[y][x] = Minesweeper.FLAG Minesweeper.print_table(self.table_state) def tease_user(self, y, x): """come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.""" self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state) def show_answer_board(self, coords): """prints the answer table with print_table.""" Minesweeper.print_table(self.final_table, coords) def open_tile(self, y, x): """opens a tile at the respective coordinates on the table_state list.""" # Find the letter index and convert into a y-coordinate. # Checks if it is a mine if [y, x] in self.mine_locations: # explode self.show_answer_board([y, x
__init__
identifier_name
minesweeper.py
'X', 'Y', 'Z'] def __init__(self, height, width, mines): """initializes the Minesweeper instance with a width, height, and the number of mines. Sets up a default game table, generates random mine locations and updates another table for the solution.""" self.x = int(width) self.y = int(height) self.table_state = [ ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)] self.mine_locations = self.generate_mines(int(mines)) self.final_table = self.generate_answer() @staticmethod def print_table(table, exploded_at=[-1, -1]): """prints the table, regardless whether it's a game state table or the answer table.""" # color codes just to look pretty NORMAL = '\33[10m' BLUE_START = '\33[104m' RED_START = '\33[31m' PURPLE_START = '\33[35m' GREEN_START = '\33[92m' ORANGE_START = '\33[93m' END = '\033[0m' s = ' %s' % BLUE_START # print number headers along x-axis for i in range(0, width): s += " %s" % i if i < 10: s += " " * 2 else: s += " " s += "%s\n" % END # print letters for y-axis, + the relevant values in each coordinate # depending on table. for y in range(0, height): s += "%s %s %s \t" % (BLUE_START, Minesweeper.letters[y], END) for x in range(0, width): value = table[y][x] if value == "0": s += "%s%s%s" % (NORMAL, value, END) elif value == "1": s += "%s%s%s" % (GREEN_START, value, END) elif value == "2": s += "%s%s%s" % (ORANGE_START, value, END) elif value == "3": s += "%s%s%s" % (RED_START, value, END) elif value == "4" or value == "5" or value == "6" or value == "7" or value == "8": s += "%s%s%s" % (PURPLE_START, value, END) # special elif value == "-": s += "%s%s%s" % (NORMAL, value, END) elif value == Minesweeper.BOMB: if y == exploded_at[0] and x == exploded_at[1]: # Make the bomb at the casualty site explode! s += "%s%s%s" % (RED_START, Minesweeper.EXPLOSION, END) else: # show normal bomb s += "%s%s%s" % (RED_START, value, END) elif value == Minesweeper.FLAG: s += "%s%s%s" % (RED_START, value, END) s += " " * 3 s += "\n" # use tabbing to space them nicely print s.expandtabs(3) def generate_mines(self, number): """generate a list of viable coordinates for mines, and randomly choose them.""" mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates. choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1 return mine_locations def generate_answer(self): ft = deepcopy(self.table_state) for x in range(0, self.x): for y in range(0, self.y): # get the number or mine with neighbours ft[y][x] = self.get_neighbour(y, x) return ft def get_neighbour(self, y, x):
def flags_nearby(self, y, x): """ gets number of flags nearby """ count = 0 l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: continue if self.table_state[ye][xe] == Minesweeper.FLAG: count += 1 return str(count) def special_open_neighbours(self, y, x): """Open neighbours if the flag number matches the count.""" if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB def open_neighbours(self, y, x): """Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.""" if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe) def check_status(self): count = 0 flag_count = 0 for i in [item for sublist in self.table_state for item in sublist]: if i == '-': count += 1 if i == Minesweeper.FLAG: count += 1 flag_count += 1 print "%d tiles remaining. (%d flagged)" % (count - flag_count, flag_count) return count == len(self.mine_locations) def flag(self, y, x): """set a flag to the desired coordinates.""" if self.table_state[y][x] == '-': self.table_state[y][x] = Minesweeper.FLAG Minesweeper.print_table(self.table_state) def tease_user(self, y, x): """come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.""" self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state) def show_answer_board(self, coords): """prints the answer table with print_table.""" Minesweeper.print_table(self.final_table, coords) def open_tile(self, y, x): """opens a tile at the respective coordinates on the table_state list.""" # Find the letter index and convert into a y-coordinate. # Checks if it is a mine if [y, x] in self.mine_locations: # explode self.show_answer_board([y, x
"""populate answer table with numbers and mines""" if [y, x] in self.mine_locations: return Minesweeper.BOMB count = 0 # (x-1, y-1), (x, y-1), (x+1, y-1), # (x-1, y), (x, y), (x+1, y), # (x-1, y+1), (x, y+1), (x+1, y+1) for xe in range(x - 1, x + 2): for ye in range(y - 1, y + 2): if [ye, xe] in self.mine_locations: count += 1 return str(count)
identifier_body
IBRAHIM_OLADOKUN.py
AndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) train = train.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) test = test.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) # # Exploring variables # In[25]: # Correlation matrix g = sns.heatmap(train.corr(),annot=False, fmt = ".2f", cmap = "coolwarm") # This shows that the Target has the highest correlation with age, previous late payments, and the number of dependants # # Exploring UnsecLines # In[27]: dataset.UnsecLines.describe() # In[28]: dataset.UnsecLines = pd.qcut(dataset.UnsecLines.values, 5).codes # In[29]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="UnsecLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # This shows that there is an almost exponential relationship between this variable and our target # # Exploring Age # In[31]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "age") # In[32]: dataset.age = pd.qcut(dataset.age.values, 5).codes # In[33]: # Exploring age feature vs Target g = sns.factorplot(x="age",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # The age has an inverse relationship to default risk # # Exploring Late3059 # In[35]: # Explore UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[36]: for i in range(len(dataset)): if dataset.Late3059[i] >= 6: dataset.Late3059[i] = 6 # In[38]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # Due to very high standard deviations i decided to group customers who have 6 or more late payments together. Which shows that this has boosted the predictive capacity and reduced the variance of Late3059 # # Exploring DebtRatio # In[40]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "DebtRatio") # In[41]: dataset.DebtRatio = pd.qcut(dataset.DebtRatio.values, 5).codes # In[42]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="DebtRatio",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[43]: dataset.MonthlyIncome.isnull().sum() # In[44]: g = sns.heatmap(dataset[["MonthlyIncome","Unknown","UnsecLines","OpenCredit","PropLines"]].corr(),cmap="BrBG",annot=True) # In[45]: g = sns.heatmap(dataset[["MonthlyIncome","age","DebtRatio","Deps","Target"]].corr(),cmap="BrBG",annot=True) # In[46]: g = sns.heatmap(dataset[["MonthlyIncome","Late3059","Late6089","Late90"]].corr(),cmap="BrBG",annot=True) # MonthlyIncome has no strong correlation with any other variable so the NaN values cannot be accurately estimated. Thus, i will fill the NaN with the median value # In[47]: dataset.MonthlyIncome.median() # In[48]: #Fill Embarked nan values of dataset set with 'S' most frequent value dataset.MonthlyIncome = dataset.MonthlyIncome.fillna(dataset.MonthlyIncome.median()) # In[49]: dataset.MonthlyIncome = pd.qcut(dataset.MonthlyIncome.values, 5).codes # In[50]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="MonthlyIncome",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring OpenCredit # In[51]: dataset.OpenCredit.describe() # In[52]: dataset.OpenCredit = pd.qcut(dataset.OpenCredit.values, 5).codes # In[53]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="OpenCredit",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late90 # In[54]: dataset.Late90.describe() # In[55]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[56]: for i in range(len(dataset)): if dataset.Late90[i] >= 5: dataset.Late90[i] = 5 # In[57]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring PropLines # In[58]: dataset.PropLines.describe() # In[59]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[60]: for i in range(len(dataset)): if dataset.PropLines[i] >= 6: dataset.PropLines[i] = 6 # In[61]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late6089 # In[62]: # Exploring Late6089 feature quantiles vs Target g = sns.factorplot(x="Late6089",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[63]: for i in range(len(dataset)):
if dataset.Late6089[i] >= 3: dataset.Late6089[i] = 3
conditional_block
IBRAHIM_OLADOKUN.py
suspected that other variables contains errors (Age) # In[11]: test.isnull().sum() # The test data also contains several NaN values # # Target distribution # In[13]: ax = sns.countplot(x = train.SeriousDlqin2yrs ,palette="Set3") sns.set(font_scale=1.5) ax.set_ylim(top = 150000) ax.set_xlabel('Financial difficulty in 2 years') ax.set_ylabel('Frequency') fig = plt.gcf() fig.set_size_inches(10,5) ax.set_ylim(top=160000) plt.show() # Our target variable distribution of the above plot is very skewed i.e the right and left disribution are shaped differently from each other # # Detecting outliers # In[14]: def detect_outliers(df,n,features): outlier_indices = [] # iterate over features(columns) for col in features: # 1st quartile (25%)
# Interquartile range (IQR) IQR = Q3 - Q1 # outlier step outlier_step = 1.5 * IQR # Determine a list of indices of outliers for feature col outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step )].index # append the found outlier indices for col to the list of outlier indices outlier_indices.extend(outlier_list_col) # select observations containing more than 2 outliers outlier_indices = Counter(outlier_indices) multiple_outliers = list( k for k, v in outlier_indices.items() if v > n ) return multiple_outliers # detect outliers from Age, SibSp , Parch and Fare # These are the numerical features present in the dataset Outliers_to_drop = detect_outliers(train,2,["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "Unnamed: 0", "NumberOfDependents"]) # In[17]: train.loc[Outliers_to_drop] # 3527 outliers were detected in the training set, which represents 2.53% of our training data.I will drop these outliers # In[18]: train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True) # # Merging datasets # In[21]: train_len = len(train) dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True) # In[22]: dataset.shape # In[24]: dataset = dataset.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) train = train.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) test = test.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) # # Exploring variables # In[25]: # Correlation matrix g = sns.heatmap(train.corr(),annot=False, fmt = ".2f", cmap = "coolwarm") # This shows that the Target has the highest correlation with age, previous late payments, and the number of dependants # # Exploring UnsecLines # In[27]: dataset.UnsecLines.describe() # In[28]: dataset.UnsecLines = pd.qcut(dataset.UnsecLines.values, 5).codes # In[29]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="UnsecLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # This shows that there is an almost exponential relationship between this variable and our target # # Exploring Age # In[31]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "age") # In[32]: dataset.age = pd.qcut(dataset.age.values, 5).codes # In[33]: # Exploring age feature vs Target g = sns.factorplot(x="age",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # The age has an inverse relationship to default risk # # Exploring Late3059 # In[35]: # Explore UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[36]: for i in range(len(dataset)): if dataset.Late3059[i] >= 6: dataset.Late3059[i] = 6 # In[38]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # Due to very high standard deviations i decided to group customers who have 6 or more late payments together. Which shows that this has boosted the predictive capacity and reduced the variance of Late3059 # # Exploring DebtRatio # In[40]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "DebtRatio") # In[41]: dataset.DebtRatio = pd.qcut(dataset.DebtRatio.values, 5).codes # In[42]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="DebtRatio",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[43]: dataset.MonthlyIncome.isnull().sum() # In[44]: g = sns.heatmap(dataset[["MonthlyIncome","Unknown","UnsecLines","OpenCredit","PropLines"]].corr(),cmap="BrBG",annot=True) # In[45]: g = sns.heatmap(dataset[["MonthlyIncome","age","DebtRatio","Deps","Target"]].corr(),cmap="BrBG",annot=True) # In[46]: g = sns.heatmap(dataset[["MonthlyIncome","Late3059","Late6089","Late90"]].corr(),cmap="BrBG",annot=True) # MonthlyIncome has no strong correlation with any other variable so the NaN values cannot be
Q1 = np.percentile(df[col], 25) # 3rd quartile (75%) Q3 = np.percentile(df[col],75)
random_line_split
IBRAHIM_OLADOKUN.py
that other variables contains errors (Age) # In[11]: test.isnull().sum() # The test data also contains several NaN values # # Target distribution # In[13]: ax = sns.countplot(x = train.SeriousDlqin2yrs ,palette="Set3") sns.set(font_scale=1.5) ax.set_ylim(top = 150000) ax.set_xlabel('Financial difficulty in 2 years') ax.set_ylabel('Frequency') fig = plt.gcf() fig.set_size_inches(10,5) ax.set_ylim(top=160000) plt.show() # Our target variable distribution of the above plot is very skewed i.e the right and left disribution are shaped differently from each other # # Detecting outliers # In[14]: def detect_outliers(df,n,features):
# select observations containing more than 2 outliers outlier_indices = Counter(outlier_indices) multiple_outliers = list( k for k, v in outlier_indices.items() if v > n ) return multiple_outliers # detect outliers from Age, SibSp , Parch and Fare # These are the numerical features present in the dataset Outliers_to_drop = detect_outliers(train,2,["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "Unnamed: 0", "NumberOfDependents"]) # In[17]: train.loc[Outliers_to_drop] # 3527 outliers were detected in the training set, which represents 2.53% of our training data.I will drop these outliers # In[18]: train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True) # # Merging datasets # In[21]: train_len = len(train) dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True) # In[22]: dataset.shape # In[24]: dataset = dataset.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) train = train.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) test = test.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) # # Exploring variables # In[25]: # Correlation matrix g = sns.heatmap(train.corr(),annot=False, fmt = ".2f", cmap = "coolwarm") # This shows that the Target has the highest correlation with age, previous late payments, and the number of dependants # # Exploring UnsecLines # In[27]: dataset.UnsecLines.describe() # In[28]: dataset.UnsecLines = pd.qcut(dataset.UnsecLines.values, 5).codes # In[29]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="UnsecLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # This shows that there is an almost exponential relationship between this variable and our target # # Exploring Age # In[31]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "age") # In[32]: dataset.age = pd.qcut(dataset.age.values, 5).codes # In[33]: # Exploring age feature vs Target g = sns.factorplot(x="age",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # The age has an inverse relationship to default risk # # Exploring Late3059 # In[35]: # Explore UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[36]: for i in range(len(dataset)): if dataset.Late3059[i] >= 6: dataset.Late3059[i] = 6 # In[38]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # Due to very high standard deviations i decided to group customers who have 6 or more late payments together. Which shows that this has boosted the predictive capacity and reduced the variance of Late3059 # # Exploring DebtRatio # In[40]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "DebtRatio") # In[41]: dataset.DebtRatio = pd.qcut(dataset.DebtRatio.values, 5).codes # In[42]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="DebtRatio",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[43]: dataset.MonthlyIncome.isnull().sum() # In[44]: g = sns.heatmap(dataset[["MonthlyIncome","Unknown","UnsecLines","OpenCredit","PropLines"]].corr(),cmap="BrBG",annot=True) # In[45]: g = sns.heatmap(dataset[["MonthlyIncome","age","DebtRatio","Deps","Target"]].corr(),cmap="BrBG",annot=True) # In[46]: g = sns.heatmap(dataset[["MonthlyIncome","Late3059","Late6089","Late90"]].corr(),cmap="BrBG",annot=True) # MonthlyIncome has no strong correlation with any other variable so the NaN values cannot
outlier_indices = [] # iterate over features(columns) for col in features: # 1st quartile (25%) Q1 = np.percentile(df[col], 25) # 3rd quartile (75%) Q3 = np.percentile(df[col],75) # Interquartile range (IQR) IQR = Q3 - Q1 # outlier step outlier_step = 1.5 * IQR # Determine a list of indices of outliers for feature col outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step )].index # append the found outlier indices for col to the list of outlier indices outlier_indices.extend(outlier_list_col)
identifier_body
IBRAHIM_OLADOKUN.py
that other variables contains errors (Age) # In[11]: test.isnull().sum() # The test data also contains several NaN values # # Target distribution # In[13]: ax = sns.countplot(x = train.SeriousDlqin2yrs ,palette="Set3") sns.set(font_scale=1.5) ax.set_ylim(top = 150000) ax.set_xlabel('Financial difficulty in 2 years') ax.set_ylabel('Frequency') fig = plt.gcf() fig.set_size_inches(10,5) ax.set_ylim(top=160000) plt.show() # Our target variable distribution of the above plot is very skewed i.e the right and left disribution are shaped differently from each other # # Detecting outliers # In[14]: def
(df,n,features): outlier_indices = [] # iterate over features(columns) for col in features: # 1st quartile (25%) Q1 = np.percentile(df[col], 25) # 3rd quartile (75%) Q3 = np.percentile(df[col],75) # Interquartile range (IQR) IQR = Q3 - Q1 # outlier step outlier_step = 1.5 * IQR # Determine a list of indices of outliers for feature col outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step )].index # append the found outlier indices for col to the list of outlier indices outlier_indices.extend(outlier_list_col) # select observations containing more than 2 outliers outlier_indices = Counter(outlier_indices) multiple_outliers = list( k for k, v in outlier_indices.items() if v > n ) return multiple_outliers # detect outliers from Age, SibSp , Parch and Fare # These are the numerical features present in the dataset Outliers_to_drop = detect_outliers(train,2,["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "Unnamed: 0", "NumberOfDependents"]) # In[17]: train.loc[Outliers_to_drop] # 3527 outliers were detected in the training set, which represents 2.53% of our training data.I will drop these outliers # In[18]: train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True) # # Merging datasets # In[21]: train_len = len(train) dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True) # In[22]: dataset.shape # In[24]: dataset = dataset.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) train = train.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) test = test.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) # # Exploring variables # In[25]: # Correlation matrix g = sns.heatmap(train.corr(),annot=False, fmt = ".2f", cmap = "coolwarm") # This shows that the Target has the highest correlation with age, previous late payments, and the number of dependants # # Exploring UnsecLines # In[27]: dataset.UnsecLines.describe() # In[28]: dataset.UnsecLines = pd.qcut(dataset.UnsecLines.values, 5).codes # In[29]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="UnsecLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # This shows that there is an almost exponential relationship between this variable and our target # # Exploring Age # In[31]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "age") # In[32]: dataset.age = pd.qcut(dataset.age.values, 5).codes # In[33]: # Exploring age feature vs Target g = sns.factorplot(x="age",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # The age has an inverse relationship to default risk # # Exploring Late3059 # In[35]: # Explore UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[36]: for i in range(len(dataset)): if dataset.Late3059[i] >= 6: dataset.Late3059[i] = 6 # In[38]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # Due to very high standard deviations i decided to group customers who have 6 or more late payments together. Which shows that this has boosted the predictive capacity and reduced the variance of Late3059 # # Exploring DebtRatio # In[40]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "DebtRatio") # In[41]: dataset.DebtRatio = pd.qcut(dataset.DebtRatio.values, 5).codes # In[42]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="DebtRatio",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[43]: dataset.MonthlyIncome.isnull().sum() # In[44]: g = sns.heatmap(dataset[["MonthlyIncome","Unknown","UnsecLines","OpenCredit","PropLines"]].corr(),cmap="BrBG",annot=True) # In[45]: g = sns.heatmap(dataset[["MonthlyIncome","age","DebtRatio","Deps","Target"]].corr(),cmap="BrBG",annot=True) # In[46]: g = sns.heatmap(dataset[["MonthlyIncome","Late3059","Late6089","Late90"]].corr(),cmap="BrBG",annot=True) # MonthlyIncome has no strong correlation with any other variable so the NaN values cannot
detect_outliers
identifier_name
putio-ftp-connector.py
): c = self.buffer self.buffer = '' return c def read(self, size=65536): if self.req == None: self.req = urllib2.Request(self.download_url) if self.seekpos: self.req.headers['Range'] = 'bytes=%s-' % (self.seekpos) if self.read_size > self.total_size: return self.read_size = self.read_size + self.read_bytes + 1 if not self.fd: self.fd = urllib2.urlopen(self.req) return self.fd.read(1024) def seek(self, frombytes, **kwargs): self.seekpos = frombytes return # .... idfinder = PathToId() idfinder.load_items() api = None class HttpFS(ftpserver.AbstractedFS): def __init__(self): self.root = None self.cwd = '/' self.rnfr = None self.dirlistcache = {} self.idfinder = idfinder def open(self, filename, mode): print "filename: ", filename if filename in self.dirlistcache: apifile = self.dirlistcache[filename] else: if filename == os.path.sep: # items = operations.api.get_items() # this is not a file its a directory # raise OSError(1, 'This is a directory') raise IOError(1, 'This is a directory') else: id = idfinder.find_item_by_path(filename) print "file id:", filename, id apifile = operations.api.get_items(id=id)[0] if apifile.is_dir: raise IOError(1, 'This is a directory') # return HttpFD(apifile, None, filename, mode) def chdir(self, path): self.cwd = path.decode('utf-8').encode('utf-8') def mkdir(self, path): dirs = os.path.split(path) apifile = self._getitem(dirs[0]) if not apifile: #this is root operations.api.create_folder(name = dirs[1], parent_id = 0) else: apifile.create_folder(name=dirs[1]) self.remove_from_cache(path) self.remove_from_cache(dirs[0]) def listdir(self, path): ret = [] try: item = self._getitem(path) except: return [] if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return [] for i in items: ret.append(i.name) return ret def remove_from_cache(self, path): if path in self.dirlistcache: del self.dirlistcache[path] idfinder.invalidate_items_cache_by_path(path) def rmdir(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def remove(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def rename(self, src, dst): apifile = self._getitem(src) if not apifile: raise OSError(2, 'No such file or directory') srcs = os.path.split(src) dsts = os.path.split(dst) if srcs[0] != dsts[0]: # this is a move operation.. if dsts[0] == os.path.sep: apifile.move_item(target=0) return destination = self._getitem(dsts[0]); if not destination: raise OSError(2, 'No such file or directory') apifile.move_item(target=destination.id) return apifile.rename_item(dsts[1]) self.remove_from_cache(src) self.remove_from_cache(srcs[0]) self.remove_from_cache(dst) self.remove_from_cache(dsts[0]) def isfile(self, path): return not self.isdir(path) def islink(self, path): return False def isdir(self, path): if path == os.path.sep: return True apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') if apifile.is_dir: return True else: return False def getsize(self, path): apifile = self._getitem(path) if not apifile: raise OSError(1, 'No such file or directory') print "filesize :", apifile.size return long(apifile.size) #return self.stat(path).st_size def getmtime(self, path): return self.stat(path).st_mtime def realpath(self, path): return path def lexists(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') return apifile def _getitem(self, filename): if filename in self.dirlistcache: apifile = self.dirlistcache[filename] print 'found........', apifile.id, apifile.name else: if filename == os.path.sep: # items = operations.api.get_items() return False else: id = idfinder.find_item_by_path(filename) print "file id:", id apifile = operations.api.get_items(id=id)[0] self.dirlistcache[filename] = apifile return apifile #.get_download_url() def stat(self, path): apifile = self._getitem(path) return os.stat_result((666, 0L, 0L, 0, 0, 0, apifile.size, 0, 0, 0)) exists = lexists lstat = stat def validpath(self, path): return True def format_list_items(self, items): for item in items: if item.is_dir: s = 'drwxrwxrwx 1 %s group %8s Jan 01 00:00 %s\r\n' % ('aaa', 0, item.name) else: s = '-rw-rw-rw- 1 %s group %8s %s %s\r\n' % ('aaa', item.size, time.strftime("%b %d %H:%M"), item.name) yield s.encode('utf-8') def get_list_dir(self, path): try: item = self._getitem(path) except: return self.format_list_items([]) if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return self.format_list_items([]) return self.format_list_items(items) def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): # find item in cache... if basedir in self.dirlistcache: fnd = self.dirlistcache[basedir] try: items = operations.api.get_items(parent_id = fnd.id) except: items = [] else: if basedir == os.path.sep: items = operations.api.get_items() else: parent_id = self.idfinder.find_item_by_path(pathtoid._utf8(basedir)) print "parent_id:", parent_id items = operations.api.get_items(parent_id=parent_id) c = 0 s = '' for i in items: c = c + 1 type = 'type=file;' if 'type' in facts: if i.type == 'folder': type = 'type=dir;' if 'size' in facts: size = 'size=%s;' % i.size # file size ln = "%s%sperm=r;modify=20071029155301;unique=11150051; %s\r\n" % (type, size, i.name) if basedir== os.path.sep:
else: key = '%s/%s' % (pathtoid._utf8(basedir), pathtoid._utf8(i.name)) self.dirlistcache[key] = i print 'key:', key yield ln.encode('utf-8') class HttpOperations(object): '''Storing connection object''' def __init__(self): self.connection = None self.username = None def authenticate(self, username, password): self.username = username self.password = password config.apisecret = password config.apikey = username self.api = putio.Api(config.apikey,config.apisecret) print "checking user & passwd" username = self.api.get_user_name() if not username: return False print "> welcome ", username return True
key = '/%s' % (pathtoid._utf8(i.name))
conditional_block
putio-ftp-connector.py
): c = self.buffer self.buffer = '' return c def read(self, size=65536): if self.req == None: self.req = urllib2.Request(self.download_url) if self.seekpos: self.req.headers['Range'] = 'bytes=%s-' % (self.seekpos) if self.read_size > self.total_size: return self.read_size = self.read_size + self.read_bytes + 1 if not self.fd: self.fd = urllib2.urlopen(self.req) return self.fd.read(1024) def seek(self, frombytes, **kwargs): self.seekpos = frombytes return # .... idfinder = PathToId() idfinder.load_items() api = None class HttpFS(ftpserver.AbstractedFS): def __init__(self): self.root = None self.cwd = '/' self.rnfr = None self.dirlistcache = {} self.idfinder = idfinder def open(self, filename, mode): print "filename: ", filename if filename in self.dirlistcache: apifile = self.dirlistcache[filename] else: if filename == os.path.sep: # items = operations.api.get_items() # this is not a file its a directory # raise OSError(1, 'This is a directory') raise IOError(1, 'This is a directory') else: id = idfinder.find_item_by_path(filename) print "file id:", filename, id apifile = operations.api.get_items(id=id)[0] if apifile.is_dir: raise IOError(1, 'This is a directory') # return HttpFD(apifile, None, filename, mode) def chdir(self, path): self.cwd = path.decode('utf-8').encode('utf-8') def mkdir(self, path): dirs = os.path.split(path) apifile = self._getitem(dirs[0]) if not apifile: #this is root operations.api.create_folder(name = dirs[1], parent_id = 0) else: apifile.create_folder(name=dirs[1]) self.remove_from_cache(path) self.remove_from_cache(dirs[0]) def listdir(self, path): ret = [] try: item = self._getitem(path) except: return [] if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return [] for i in items: ret.append(i.name) return ret def remove_from_cache(self, path): if path in self.dirlistcache: del self.dirlistcache[path] idfinder.invalidate_items_cache_by_path(path) def rmdir(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def remove(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def rename(self, src, dst): apifile = self._getitem(src) if not apifile: raise OSError(2, 'No such file or directory') srcs = os.path.split(src) dsts = os.path.split(dst) if srcs[0] != dsts[0]: # this is a move operation.. if dsts[0] == os.path.sep: apifile.move_item(target=0) return destination = self._getitem(dsts[0]); if not destination: raise OSError(2, 'No such file or directory') apifile.move_item(target=destination.id) return apifile.rename_item(dsts[1]) self.remove_from_cache(src) self.remove_from_cache(srcs[0]) self.remove_from_cache(dst) self.remove_from_cache(dsts[0]) def isfile(self, path): return not self.isdir(path) def islink(self, path): return False def isdir(self, path): if path == os.path.sep: return True apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') if apifile.is_dir: return True else: return False def getsize(self, path): apifile = self._getitem(path) if not apifile: raise OSError(1, 'No such file or directory') print "filesize :", apifile.size return long(apifile.size) #return self.stat(path).st_size def getmtime(self, path): return self.stat(path).st_mtime def realpath(self, path): return path def lexists(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') return apifile def _getitem(self, filename): if filename in self.dirlistcache: apifile = self.dirlistcache[filename] print 'found........', apifile.id, apifile.name else: if filename == os.path.sep: # items = operations.api.get_items() return False else: id = idfinder.find_item_by_path(filename) print "file id:", id apifile = operations.api.get_items(id=id)[0] self.dirlistcache[filename] = apifile return apifile #.get_download_url() def stat(self, path): apifile = self._getitem(path) return os.stat_result((666, 0L, 0L, 0, 0, 0, apifile.size, 0, 0, 0)) exists = lexists lstat = stat def validpath(self, path): return True def format_list_items(self, items): for item in items: if item.is_dir: s = 'drwxrwxrwx 1 %s group %8s Jan 01 00:00 %s\r\n' % ('aaa', 0, item.name) else: s = '-rw-rw-rw- 1 %s group %8s %s %s\r\n' % ('aaa', item.size, time.strftime("%b %d %H:%M"), item.name) yield s.encode('utf-8') def get_list_dir(self, path): try: item = self._getitem(path) except: return self.format_list_items([]) if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return self.format_list_items([]) return self.format_list_items(items) def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): # find item in cache... if basedir in self.dirlistcache: fnd = self.dirlistcache[basedir] try: items = operations.api.get_items(parent_id = fnd.id) except: items = [] else: if basedir == os.path.sep: items = operations.api.get_items() else: parent_id = self.idfinder.find_item_by_path(pathtoid._utf8(basedir)) print "parent_id:", parent_id items = operations.api.get_items(parent_id=parent_id) c = 0 s = '' for i in items: c = c + 1 type = 'type=file;' if 'type' in facts: if i.type == 'folder': type = 'type=dir;' if 'size' in facts: size = 'size=%s;' % i.size # file size ln = "%s%sperm=r;modify=20071029155301;unique=11150051; %s\r\n" % (type, size, i.name) if basedir== os.path.sep: key = '/%s' % (pathtoid._utf8(i.name)) else: key = '%s/%s' % (pathtoid._utf8(basedir), pathtoid._utf8(i.name)) self.dirlistcache[key] = i print 'key:', key yield ln.encode('utf-8') class HttpOperations(object): '''Storing connection object''' def __init__(self): self.connection = None self.username = None def authenticate(self, username, password): self.username = username self.password = password config.apisecret = password config.apikey = username self.api = putio.Api(config.apikey,config.apisecret)
if not username: return False print "> welcome ", username return True def
print "checking user & passwd" username = self.api.get_user_name()
random_line_split
putio-ftp-connector.py
): c = self.buffer self.buffer = '' return c def read(self, size=65536): if self.req == None: self.req = urllib2.Request(self.download_url) if self.seekpos: self.req.headers['Range'] = 'bytes=%s-' % (self.seekpos) if self.read_size > self.total_size: return self.read_size = self.read_size + self.read_bytes + 1 if not self.fd: self.fd = urllib2.urlopen(self.req) return self.fd.read(1024) def seek(self, frombytes, **kwargs): self.seekpos = frombytes return # .... idfinder = PathToId() idfinder.load_items() api = None class HttpFS(ftpserver.AbstractedFS): def __init__(self): self.root = None self.cwd = '/' self.rnfr = None self.dirlistcache = {} self.idfinder = idfinder def open(self, filename, mode): print "filename: ", filename if filename in self.dirlistcache: apifile = self.dirlistcache[filename] else: if filename == os.path.sep: # items = operations.api.get_items() # this is not a file its a directory # raise OSError(1, 'This is a directory') raise IOError(1, 'This is a directory') else: id = idfinder.find_item_by_path(filename) print "file id:", filename, id apifile = operations.api.get_items(id=id)[0] if apifile.is_dir: raise IOError(1, 'This is a directory') # return HttpFD(apifile, None, filename, mode) def chdir(self, path): self.cwd = path.decode('utf-8').encode('utf-8') def mkdir(self, path): dirs = os.path.split(path) apifile = self._getitem(dirs[0]) if not apifile: #this is root operations.api.create_folder(name = dirs[1], parent_id = 0) else: apifile.create_folder(name=dirs[1]) self.remove_from_cache(path) self.remove_from_cache(dirs[0]) def listdir(self, path): ret = [] try: item = self._getitem(path) except: return [] if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return [] for i in items: ret.append(i.name) return ret def remove_from_cache(self, path): if path in self.dirlistcache: del self.dirlistcache[path] idfinder.invalidate_items_cache_by_path(path) def rmdir(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def remove(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def rename(self, src, dst): apifile = self._getitem(src) if not apifile: raise OSError(2, 'No such file or directory') srcs = os.path.split(src) dsts = os.path.split(dst) if srcs[0] != dsts[0]: # this is a move operation.. if dsts[0] == os.path.sep: apifile.move_item(target=0) return destination = self._getitem(dsts[0]); if not destination: raise OSError(2, 'No such file or directory') apifile.move_item(target=destination.id) return apifile.rename_item(dsts[1]) self.remove_from_cache(src) self.remove_from_cache(srcs[0]) self.remove_from_cache(dst) self.remove_from_cache(dsts[0]) def isfile(self, path): return not self.isdir(path) def islink(self, path): return False def isdir(self, path): if path == os.path.sep: return True apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') if apifile.is_dir: return True else: return False def getsize(self, path): apifile = self._getitem(path) if not apifile: raise OSError(1, 'No such file or directory') print "filesize :", apifile.size return long(apifile.size) #return self.stat(path).st_size def getmtime(self, path): return self.stat(path).st_mtime def realpath(self, path): return path def lexists(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') return apifile def
(self, filename): if filename in self.dirlistcache: apifile = self.dirlistcache[filename] print 'found........', apifile.id, apifile.name else: if filename == os.path.sep: # items = operations.api.get_items() return False else: id = idfinder.find_item_by_path(filename) print "file id:", id apifile = operations.api.get_items(id=id)[0] self.dirlistcache[filename] = apifile return apifile #.get_download_url() def stat(self, path): apifile = self._getitem(path) return os.stat_result((666, 0L, 0L, 0, 0, 0, apifile.size, 0, 0, 0)) exists = lexists lstat = stat def validpath(self, path): return True def format_list_items(self, items): for item in items: if item.is_dir: s = 'drwxrwxrwx 1 %s group %8s Jan 01 00:00 %s\r\n' % ('aaa', 0, item.name) else: s = '-rw-rw-rw- 1 %s group %8s %s %s\r\n' % ('aaa', item.size, time.strftime("%b %d %H:%M"), item.name) yield s.encode('utf-8') def get_list_dir(self, path): try: item = self._getitem(path) except: return self.format_list_items([]) if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return self.format_list_items([]) return self.format_list_items(items) def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): # find item in cache... if basedir in self.dirlistcache: fnd = self.dirlistcache[basedir] try: items = operations.api.get_items(parent_id = fnd.id) except: items = [] else: if basedir == os.path.sep: items = operations.api.get_items() else: parent_id = self.idfinder.find_item_by_path(pathtoid._utf8(basedir)) print "parent_id:", parent_id items = operations.api.get_items(parent_id=parent_id) c = 0 s = '' for i in items: c = c + 1 type = 'type=file;' if 'type' in facts: if i.type == 'folder': type = 'type=dir;' if 'size' in facts: size = 'size=%s;' % i.size # file size ln = "%s%sperm=r;modify=20071029155301;unique=11150051; %s\r\n" % (type, size, i.name) if basedir== os.path.sep: key = '/%s' % (pathtoid._utf8(i.name)) else: key = '%s/%s' % (pathtoid._utf8(basedir), pathtoid._utf8(i.name)) self.dirlistcache[key] = i print 'key:', key yield ln.encode('utf-8') class HttpOperations(object): '''Storing connection object''' def __init__(self): self.connection = None self.username = None def authenticate(self, username, password): self.username = username self.password = password config.apisecret = password config.apikey = username self.api = putio.Api(config.apikey,config.apisecret) print "checking user & passwd" username = self.api.get_user_name() if not username: return False print "> welcome ", username return True
_getitem
identifier_name
putio-ftp-connector.py
): c = self.buffer self.buffer = '' return c def read(self, size=65536): if self.req == None: self.req = urllib2.Request(self.download_url) if self.seekpos: self.req.headers['Range'] = 'bytes=%s-' % (self.seekpos) if self.read_size > self.total_size: return self.read_size = self.read_size + self.read_bytes + 1 if not self.fd: self.fd = urllib2.urlopen(self.req) return self.fd.read(1024) def seek(self, frombytes, **kwargs): self.seekpos = frombytes return # .... idfinder = PathToId() idfinder.load_items() api = None class HttpFS(ftpserver.AbstractedFS): def __init__(self): self.root = None self.cwd = '/' self.rnfr = None self.dirlistcache = {} self.idfinder = idfinder def open(self, filename, mode): print "filename: ", filename if filename in self.dirlistcache: apifile = self.dirlistcache[filename] else: if filename == os.path.sep: # items = operations.api.get_items() # this is not a file its a directory # raise OSError(1, 'This is a directory') raise IOError(1, 'This is a directory') else: id = idfinder.find_item_by_path(filename) print "file id:", filename, id apifile = operations.api.get_items(id=id)[0] if apifile.is_dir: raise IOError(1, 'This is a directory') # return HttpFD(apifile, None, filename, mode) def chdir(self, path): self.cwd = path.decode('utf-8').encode('utf-8') def mkdir(self, path):
def listdir(self, path): ret = [] try: item = self._getitem(path) except: return [] if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return [] for i in items: ret.append(i.name) return ret def remove_from_cache(self, path): if path in self.dirlistcache: del self.dirlistcache[path] idfinder.invalidate_items_cache_by_path(path) def rmdir(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def remove(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def rename(self, src, dst): apifile = self._getitem(src) if not apifile: raise OSError(2, 'No such file or directory') srcs = os.path.split(src) dsts = os.path.split(dst) if srcs[0] != dsts[0]: # this is a move operation.. if dsts[0] == os.path.sep: apifile.move_item(target=0) return destination = self._getitem(dsts[0]); if not destination: raise OSError(2, 'No such file or directory') apifile.move_item(target=destination.id) return apifile.rename_item(dsts[1]) self.remove_from_cache(src) self.remove_from_cache(srcs[0]) self.remove_from_cache(dst) self.remove_from_cache(dsts[0]) def isfile(self, path): return not self.isdir(path) def islink(self, path): return False def isdir(self, path): if path == os.path.sep: return True apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') if apifile.is_dir: return True else: return False def getsize(self, path): apifile = self._getitem(path) if not apifile: raise OSError(1, 'No such file or directory') print "filesize :", apifile.size return long(apifile.size) #return self.stat(path).st_size def getmtime(self, path): return self.stat(path).st_mtime def realpath(self, path): return path def lexists(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') return apifile def _getitem(self, filename): if filename in self.dirlistcache: apifile = self.dirlistcache[filename] print 'found........', apifile.id, apifile.name else: if filename == os.path.sep: # items = operations.api.get_items() return False else: id = idfinder.find_item_by_path(filename) print "file id:", id apifile = operations.api.get_items(id=id)[0] self.dirlistcache[filename] = apifile return apifile #.get_download_url() def stat(self, path): apifile = self._getitem(path) return os.stat_result((666, 0L, 0L, 0, 0, 0, apifile.size, 0, 0, 0)) exists = lexists lstat = stat def validpath(self, path): return True def format_list_items(self, items): for item in items: if item.is_dir: s = 'drwxrwxrwx 1 %s group %8s Jan 01 00:00 %s\r\n' % ('aaa', 0, item.name) else: s = '-rw-rw-rw- 1 %s group %8s %s %s\r\n' % ('aaa', item.size, time.strftime("%b %d %H:%M"), item.name) yield s.encode('utf-8') def get_list_dir(self, path): try: item = self._getitem(path) except: return self.format_list_items([]) if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return self.format_list_items([]) return self.format_list_items(items) def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): # find item in cache... if basedir in self.dirlistcache: fnd = self.dirlistcache[basedir] try: items = operations.api.get_items(parent_id = fnd.id) except: items = [] else: if basedir == os.path.sep: items = operations.api.get_items() else: parent_id = self.idfinder.find_item_by_path(pathtoid._utf8(basedir)) print "parent_id:", parent_id items = operations.api.get_items(parent_id=parent_id) c = 0 s = '' for i in items: c = c + 1 type = 'type=file;' if 'type' in facts: if i.type == 'folder': type = 'type=dir;' if 'size' in facts: size = 'size=%s;' % i.size # file size ln = "%s%sperm=r;modify=20071029155301;unique=11150051; %s\r\n" % (type, size, i.name) if basedir== os.path.sep: key = '/%s' % (pathtoid._utf8(i.name)) else: key = '%s/%s' % (pathtoid._utf8(basedir), pathtoid._utf8(i.name)) self.dirlistcache[key] = i print 'key:', key yield ln.encode('utf-8') class HttpOperations(object): '''Storing connection object''' def __init__(self): self.connection = None self.username = None def authenticate(self, username, password): self.username = username self.password = password config.apisecret = password config.apikey = username self.api = putio.Api(config.apikey,config.apisecret) print "checking user & passwd" username = self.api.get_user_name() if not username: return False print "> welcome ", username return True
dirs = os.path.split(path) apifile = self._getitem(dirs[0]) if not apifile: #this is root operations.api.create_folder(name = dirs[1], parent_id = 0) else: apifile.create_folder(name=dirs[1]) self.remove_from_cache(path) self.remove_from_cache(dirs[0])
identifier_body
glfw.go
ickyKeysMode) // Value can be either 1 or 0 StickyMouseButtonsInputMode = InputMode(glfw.StickyMouseButtonsMode) // Value can be either 1 or 0 ) // Cursor mode values const ( CursorNormal = CursorMode(glfw.CursorNormal) CursorHidden = CursorMode(glfw.CursorHidden) CursorDisabled = CursorMode(glfw.CursorDisabled) ) // GlfwWindow describes one glfw window type GlfwWindow struct { *glfw.Window // Embedded GLFW window core.Dispatcher // Embedded event dispatcher gls *gls.GLS // Associated OpenGL State fullscreen bool lastX int lastY int lastWidth int lastHeight int scaleX float64 scaleY float64 // Events keyEv KeyEvent charEv CharEvent mouseEv MouseEvent posEv PosEvent sizeEv SizeEvent cursorEv CursorEvent scrollEv ScrollEvent focusEv FocusEvent mods ModifierKey // Current modifier keys // Cursors cursors map[Cursor]*glfw.Cursor lastCursorKey Cursor } // Init initializes the GlfwWindow singleton with the specified width, height, and title. func Init(width, height int, title string) error { // Panic if already created if win != nil { panic(fmt.Errorf("can only call window.Init() once")) } // OpenGL functions must be executed in the same thread where // the context was created (by wmgr.CreateWindow()) runtime.LockOSThread() // Create wrapper window with dispatcher w := new(GlfwWindow) w.Dispatcher.Initialize() var err error // Initialize GLFW err = glfw.Init() if err != nil { return err } // Set window hints glfw.WindowHint(glfw.ContextVersionMajor, 3) glfw.WindowHint(glfw.ContextVersionMinor, 3) glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile) glfw.WindowHint(glfw.Samples, 8) // Set OpenGL forward compatible context only for OSX because it is required for OSX. // When this is set, glLineWidth(width) only accepts width=1.0 and generates an error // for any other values although the spec says it should ignore unsupported widths // and generate an error only when width <= 0. if runtime.GOOS == "darwin" { glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True) } // Create window and set it as the current context. // The window is created always as not full screen because if it is // created as full screen it not possible to revert it to windowed mode. // At the end of this function, the window will be set to full screen if requested. w.Window, err = glfw.CreateWindow(width, height, title, nil, nil) if err != nil { return err } w.MakeContextCurrent() // Create OpenGL state w.gls, err = gls.New() if err != nil { return err } // Compute and store scale fbw, fbh := w.GetFramebufferSize() w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) // Create map for cursors w.cursors = make(map[Cursor]*glfw.Cursor) w.lastCursorKey = CursorLast // Preallocate GLFW standard cursors w.cursors[ArrowCursor] = glfw.CreateStandardCursor(glfw.ArrowCursor) w.cursors[IBeamCursor] = glfw.CreateStandardCursor(glfw.IBeamCursor) w.cursors[CrosshairCursor] = glfw.CreateStandardCursor(glfw.CrosshairCursor) w.cursors[HandCursor] = glfw.CreateStandardCursor(glfw.HandCursor) w.cursors[HResizeCursor] = glfw.CreateStandardCursor(glfw.HResizeCursor) w.cursors[VResizeCursor] = glfw.CreateStandardCursor(glfw.VResizeCursor) // Preallocate extra G3N standard cursors (diagonal resize cursors) cursorDiag1Png := assets.MustAsset("cursors/diag1.png") // [/] cursorDiag2Png := assets.MustAsset("cursors/diag2.png") // [\] diag1Img, _, err := image.Decode(bytes.NewReader(cursorDiag1Png)) diag2Img, _, err := image.Decode(bytes.NewReader(cursorDiag2Png)) if err != nil { return err } w.cursors[DiagResize1Cursor] = glfw.CreateCursor(diag1Img, 8, 8) // [/] w.cursors[DiagResize2Cursor] = glfw.CreateCursor(diag2Img, 8, 8) // [\] // Set up key callback to dispatch event w.SetKeyCallback(func(x *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { w.keyEv.Key = Key(key) w.keyEv.Mods = ModifierKey(mods) w.mods = w.keyEv.Mods if action == glfw.Press { w.Dispatch(OnKeyDown, &w.keyEv) } else if action == glfw.Release { w.Dispatch(OnKeyUp, &w.keyEv) } else if action == glfw.Repeat { w.Dispatch(OnKeyRepeat, &w.keyEv) } }) // Set up char callback to dispatch event w.SetCharModsCallback(func(x *glfw.Window, char rune, mods glfw.ModifierKey) { w.charEv.Char = char w.charEv.Mods = ModifierKey(mods) w.Dispatch(OnChar, &w.charEv) }) // Set up mouse button callback to dispatch event w.SetMouseButtonCallback(func(x *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) { xpos, ypos := x.GetCursorPos() w.mouseEv.Button = MouseButton(button) w.mouseEv.Mods = ModifierKey(mods) w.mouseEv.Xpos = float32(xpos) //* float32(w.scaleX) TODO w.mouseEv.Ypos = float32(ypos) //* float32(w.scaleY) if action == glfw.Press { w.Dispatch(OnMouseDown, &w.mouseEv) } else if action == glfw.Release { w.Dispatch(OnMouseUp, &w.mouseEv) } }) // Set up window size callback to dispatch event w.SetSizeCallback(func(x *glfw.Window, width int, height int) { fbw, fbh := x.GetFramebufferSize() w.sizeEv.Width = width w.sizeEv.Height = height w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) w.Dispatch(OnWindowSize, &w.sizeEv) }) // Set up window position callback to dispatch event w.SetPosCallback(func(x *glfw.Window, xpos int, ypos int) { w.posEv.Xpos = xpos w.posEv.Ypos = ypos w.Dispatch(OnWindowPos, &w.posEv) }) // Set up window focus callback to dispatch event w.SetFocusCallback(func(x *glfw.Window, focused bool) { w.focusEv.Focused = focused w.Dispatch(OnWindowFocus, &w.focusEv) }) // Set up window cursor position callback to dispatch event w.SetCursorPosCallback(func(x *glfw.Window, xpos float64, ypos float64) { w.cursorEv.Xpos = float32(xpos) w.cursorEv.Ypos = float32(ypos) w.cursorEv.Mods = w.mods w.Dispatch(OnCursor, &w.cursorEv) }) // Set up mouse wheel scroll callback to dispatch event w.SetScrollCallback(func(x *glfw.Window, xoff float64, yoff float64) { w.scrollEv.Xoffset = float32(xoff) w.scrollEv.Yoffset = float32(yoff) w.scrollEv.Mods = w.mods w.Dispatch(OnScroll, &w.scrollEv) }) win = w // Set singleton return nil } // Gls returns the associated OpenGL state. func (w *GlfwWindow) Gls() *gls.GLS { return w.gls } // FullScreen returns whether this windows is currently fullscreen. func (w *GlfwWindow) FullScreen() bool { return w.fullscreen } // SetFullScreen sets this window as fullscreen on the primary monitor // TODO allow for fullscreen with resolutions different than the monitor's func (w *GlfwWindow) SetFullScreen(full bool)
{ // If already in the desired state, nothing to do if w.fullscreen == full { return } // Set window fullscreen on the primary monitor if full { // Save current position and size of the window w.lastX, w.lastY = w.GetPos() w.lastWidth, w.lastHeight = w.GetSize() // Get size of primary monitor mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() width := vmode.Width height := vmode.Height // Set as fullscreen on the primary monitor w.SetMonitor(mon, 0, 0, width, height, vmode.RefreshRate) w.fullscreen = true } else {
identifier_body
glfw.go
= glfw.CreateWindow(width, height, title, nil, nil) if err != nil { return err } w.MakeContextCurrent() // Create OpenGL state w.gls, err = gls.New() if err != nil { return err } // Compute and store scale fbw, fbh := w.GetFramebufferSize() w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) // Create map for cursors w.cursors = make(map[Cursor]*glfw.Cursor) w.lastCursorKey = CursorLast // Preallocate GLFW standard cursors w.cursors[ArrowCursor] = glfw.CreateStandardCursor(glfw.ArrowCursor) w.cursors[IBeamCursor] = glfw.CreateStandardCursor(glfw.IBeamCursor) w.cursors[CrosshairCursor] = glfw.CreateStandardCursor(glfw.CrosshairCursor) w.cursors[HandCursor] = glfw.CreateStandardCursor(glfw.HandCursor) w.cursors[HResizeCursor] = glfw.CreateStandardCursor(glfw.HResizeCursor) w.cursors[VResizeCursor] = glfw.CreateStandardCursor(glfw.VResizeCursor) // Preallocate extra G3N standard cursors (diagonal resize cursors) cursorDiag1Png := assets.MustAsset("cursors/diag1.png") // [/] cursorDiag2Png := assets.MustAsset("cursors/diag2.png") // [\] diag1Img, _, err := image.Decode(bytes.NewReader(cursorDiag1Png)) diag2Img, _, err := image.Decode(bytes.NewReader(cursorDiag2Png)) if err != nil { return err } w.cursors[DiagResize1Cursor] = glfw.CreateCursor(diag1Img, 8, 8) // [/] w.cursors[DiagResize2Cursor] = glfw.CreateCursor(diag2Img, 8, 8) // [\] // Set up key callback to dispatch event w.SetKeyCallback(func(x *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { w.keyEv.Key = Key(key) w.keyEv.Mods = ModifierKey(mods) w.mods = w.keyEv.Mods if action == glfw.Press { w.Dispatch(OnKeyDown, &w.keyEv) } else if action == glfw.Release { w.Dispatch(OnKeyUp, &w.keyEv) } else if action == glfw.Repeat { w.Dispatch(OnKeyRepeat, &w.keyEv) } }) // Set up char callback to dispatch event w.SetCharModsCallback(func(x *glfw.Window, char rune, mods glfw.ModifierKey) { w.charEv.Char = char w.charEv.Mods = ModifierKey(mods) w.Dispatch(OnChar, &w.charEv) }) // Set up mouse button callback to dispatch event w.SetMouseButtonCallback(func(x *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) { xpos, ypos := x.GetCursorPos() w.mouseEv.Button = MouseButton(button) w.mouseEv.Mods = ModifierKey(mods) w.mouseEv.Xpos = float32(xpos) //* float32(w.scaleX) TODO w.mouseEv.Ypos = float32(ypos) //* float32(w.scaleY) if action == glfw.Press { w.Dispatch(OnMouseDown, &w.mouseEv) } else if action == glfw.Release { w.Dispatch(OnMouseUp, &w.mouseEv) } }) // Set up window size callback to dispatch event w.SetSizeCallback(func(x *glfw.Window, width int, height int) { fbw, fbh := x.GetFramebufferSize() w.sizeEv.Width = width w.sizeEv.Height = height w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) w.Dispatch(OnWindowSize, &w.sizeEv) }) // Set up window position callback to dispatch event w.SetPosCallback(func(x *glfw.Window, xpos int, ypos int) { w.posEv.Xpos = xpos w.posEv.Ypos = ypos w.Dispatch(OnWindowPos, &w.posEv) }) // Set up window focus callback to dispatch event w.SetFocusCallback(func(x *glfw.Window, focused bool) { w.focusEv.Focused = focused w.Dispatch(OnWindowFocus, &w.focusEv) }) // Set up window cursor position callback to dispatch event w.SetCursorPosCallback(func(x *glfw.Window, xpos float64, ypos float64) { w.cursorEv.Xpos = float32(xpos) w.cursorEv.Ypos = float32(ypos) w.cursorEv.Mods = w.mods w.Dispatch(OnCursor, &w.cursorEv) }) // Set up mouse wheel scroll callback to dispatch event w.SetScrollCallback(func(x *glfw.Window, xoff float64, yoff float64) { w.scrollEv.Xoffset = float32(xoff) w.scrollEv.Yoffset = float32(yoff) w.scrollEv.Mods = w.mods w.Dispatch(OnScroll, &w.scrollEv) }) win = w // Set singleton return nil } // Gls returns the associated OpenGL state. func (w *GlfwWindow) Gls() *gls.GLS { return w.gls } // FullScreen returns whether this windows is currently fullscreen. func (w *GlfwWindow) FullScreen() bool { return w.fullscreen } // SetFullScreen sets this window as fullscreen on the primary monitor // TODO allow for fullscreen with resolutions different than the monitor's func (w *GlfwWindow) SetFullScreen(full bool) { // If already in the desired state, nothing to do if w.fullscreen == full { return } // Set window fullscreen on the primary monitor if full { // Save current position and size of the window w.lastX, w.lastY = w.GetPos() w.lastWidth, w.lastHeight = w.GetSize() // Get size of primary monitor mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() width := vmode.Width height := vmode.Height // Set as fullscreen on the primary monitor w.SetMonitor(mon, 0, 0, width, height, vmode.RefreshRate) w.fullscreen = true } else { // Restore window to previous position and size w.SetMonitor(nil, w.lastX, w.lastY, w.lastWidth, w.lastHeight, glfw.DontCare) w.fullscreen = false } } // Destroy destroys this window and its context func (w *GlfwWindow) Destroy() { w.Window.Destroy() glfw.Terminate() runtime.UnlockOSThread() // Important when using the execution tracer } // Scale returns this window's DPI scale factor (FramebufferSize / Size) func (w *GlfwWindow) GetScale() (x float64, y float64) { return w.scaleX, w.scaleY } // ScreenResolution returns the screen resolution func (w *GlfwWindow) ScreenResolution(p interface{}) (width, height int) { mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() return vmode.Width, vmode.Height } // PollEvents process events in the event queue func (w *GlfwWindow) PollEvents() { glfw.PollEvents() } // SetSwapInterval sets the number of screen updates to wait from the time SwapBuffer() // is called before swapping the buffers and returning. func (w *GlfwWindow) SetSwapInterval(interval int) { glfw.SwapInterval(interval) } // SetCursor sets the window's cursor. func (w *GlfwWindow) SetCursor(cursor Cursor) { cur, ok := w.cursors[cursor] if !ok { panic("Invalid cursor") } w.Window.SetCursor(cur) } // CreateCursor creates a new custom cursor and returns an int handle. func (w *GlfwWindow) CreateCursor(imgFile string, xhot, yhot int) (Cursor, error) { // Open image file file, err := os.Open(imgFile) if err != nil { return 0, err } defer file.Close() // Decode image img, _, err := image.Decode(file) if err != nil { return 0, err } // Create and store cursor w.lastCursorKey += 1 w.cursors[Cursor(w.lastCursorKey)] = glfw.CreateCursor(img, xhot, yhot) return w.lastCursorKey, nil } // DisposeCursor deletes the existing custom cursor with the provided int handle. func (w *GlfwWindow) DisposeCursor(cursor Cursor) { if cursor <= CursorLast { panic("Can't dispose standard cursor") } w.cursors[cursor].Destroy() delete(w.cursors, cursor) } // DisposeAllCursors deletes all existing custom cursors. func (w *GlfwWindow) DisposeAllCustomCursors() { // Destroy and delete all custom cursors for key := range w.cursors { if key > CursorLast
{ w.cursors[key].Destroy() delete(w.cursors, key) }
conditional_block
glfw.go
ModifierKey(glfw.ModSuper) ) // Mouse buttons const ( MouseButton1 = MouseButton(glfw.MouseButton1) MouseButton2 = MouseButton(glfw.MouseButton2) MouseButton3 = MouseButton(glfw.MouseButton3) MouseButton4 = MouseButton(glfw.MouseButton4) MouseButton5 = MouseButton(glfw.MouseButton5) MouseButton6 = MouseButton(glfw.MouseButton6) MouseButton7 = MouseButton(glfw.MouseButton7) MouseButton8 = MouseButton(glfw.MouseButton8) MouseButtonLast = MouseButton(glfw.MouseButtonLast) MouseButtonLeft = MouseButton(glfw.MouseButtonLeft) MouseButtonRight = MouseButton(glfw.MouseButtonRight) MouseButtonMiddle = MouseButton(glfw.MouseButtonMiddle) ) // Input modes const ( CursorInputMode = InputMode(glfw.CursorMode) // See Cursor mode values StickyKeysInputMode = InputMode(glfw.StickyKeysMode) // Value can be either 1 or 0 StickyMouseButtonsInputMode = InputMode(glfw.StickyMouseButtonsMode) // Value can be either 1 or 0 ) // Cursor mode values const ( CursorNormal = CursorMode(glfw.CursorNormal) CursorHidden = CursorMode(glfw.CursorHidden) CursorDisabled = CursorMode(glfw.CursorDisabled) ) // GlfwWindow describes one glfw window type GlfwWindow struct { *glfw.Window // Embedded GLFW window core.Dispatcher // Embedded event dispatcher gls *gls.GLS // Associated OpenGL State fullscreen bool lastX int lastY int lastWidth int lastHeight int scaleX float64 scaleY float64 // Events keyEv KeyEvent charEv CharEvent mouseEv MouseEvent posEv PosEvent sizeEv SizeEvent cursorEv CursorEvent scrollEv ScrollEvent focusEv FocusEvent mods ModifierKey // Current modifier keys // Cursors cursors map[Cursor]*glfw.Cursor lastCursorKey Cursor } // Init initializes the GlfwWindow singleton with the specified width, height, and title. func Init(width, height int, title string) error { // Panic if already created if win != nil { panic(fmt.Errorf("can only call window.Init() once")) } // OpenGL functions must be executed in the same thread where // the context was created (by wmgr.CreateWindow()) runtime.LockOSThread() // Create wrapper window with dispatcher w := new(GlfwWindow) w.Dispatcher.Initialize() var err error // Initialize GLFW err = glfw.Init() if err != nil { return err } // Set window hints glfw.WindowHint(glfw.ContextVersionMajor, 3) glfw.WindowHint(glfw.ContextVersionMinor, 3) glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile) glfw.WindowHint(glfw.Samples, 8) // Set OpenGL forward compatible context only for OSX because it is required for OSX. // When this is set, glLineWidth(width) only accepts width=1.0 and generates an error // for any other values although the spec says it should ignore unsupported widths // and generate an error only when width <= 0. if runtime.GOOS == "darwin" { glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True) } // Create window and set it as the current context. // The window is created always as not full screen because if it is // created as full screen it not possible to revert it to windowed mode. // At the end of this function, the window will be set to full screen if requested. w.Window, err = glfw.CreateWindow(width, height, title, nil, nil) if err != nil { return err } w.MakeContextCurrent() // Create OpenGL state w.gls, err = gls.New() if err != nil { return err } // Compute and store scale fbw, fbh := w.GetFramebufferSize() w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) // Create map for cursors w.cursors = make(map[Cursor]*glfw.Cursor) w.lastCursorKey = CursorLast // Preallocate GLFW standard cursors w.cursors[ArrowCursor] = glfw.CreateStandardCursor(glfw.ArrowCursor) w.cursors[IBeamCursor] = glfw.CreateStandardCursor(glfw.IBeamCursor) w.cursors[CrosshairCursor] = glfw.CreateStandardCursor(glfw.CrosshairCursor) w.cursors[HandCursor] = glfw.CreateStandardCursor(glfw.HandCursor) w.cursors[HResizeCursor] = glfw.CreateStandardCursor(glfw.HResizeCursor) w.cursors[VResizeCursor] = glfw.CreateStandardCursor(glfw.VResizeCursor) // Preallocate extra G3N standard cursors (diagonal resize cursors) cursorDiag1Png := assets.MustAsset("cursors/diag1.png") // [/] cursorDiag2Png := assets.MustAsset("cursors/diag2.png") // [\] diag1Img, _, err := image.Decode(bytes.NewReader(cursorDiag1Png)) diag2Img, _, err := image.Decode(bytes.NewReader(cursorDiag2Png)) if err != nil { return err } w.cursors[DiagResize1Cursor] = glfw.CreateCursor(diag1Img, 8, 8) // [/] w.cursors[DiagResize2Cursor] = glfw.CreateCursor(diag2Img, 8, 8) // [\] // Set up key callback to dispatch event w.SetKeyCallback(func(x *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { w.keyEv.Key = Key(key) w.keyEv.Mods = ModifierKey(mods) w.mods = w.keyEv.Mods if action == glfw.Press { w.Dispatch(OnKeyDown, &w.keyEv) } else if action == glfw.Release { w.Dispatch(OnKeyUp, &w.keyEv) } else if action == glfw.Repeat { w.Dispatch(OnKeyRepeat, &w.keyEv) } }) // Set up char callback to dispatch event w.SetCharModsCallback(func(x *glfw.Window, char rune, mods glfw.ModifierKey) { w.charEv.Char = char w.charEv.Mods = ModifierKey(mods) w.Dispatch(OnChar, &w.charEv) }) // Set up mouse button callback to dispatch event w.SetMouseButtonCallback(func(x *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) { xpos, ypos := x.GetCursorPos() w.mouseEv.Button = MouseButton(button) w.mouseEv.Mods = ModifierKey(mods) w.mouseEv.Xpos = float32(xpos) //* float32(w.scaleX) TODO w.mouseEv.Ypos = float32(ypos) //* float32(w.scaleY) if action == glfw.Press { w.Dispatch(OnMouseDown, &w.mouseEv) } else if action == glfw.Release { w.Dispatch(OnMouseUp, &w.mouseEv) } }) // Set up window size callback to dispatch event w.SetSizeCallback(func(x *glfw.Window, width int, height int) { fbw, fbh := x.GetFramebufferSize() w.sizeEv.Width = width w.sizeEv.Height = height w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) w.Dispatch(OnWindowSize, &w.sizeEv) }) // Set up window position callback to dispatch event w.SetPosCallback(func(x *glfw.Window, xpos int, ypos int) { w.posEv.Xpos = xpos w.posEv.Ypos = ypos w.Dispatch(OnWindowPos, &w.posEv) }) // Set up window focus callback to dispatch event w.SetFocusCallback(func(x *glfw.Window, focused bool) { w.focusEv.Focused = focused w.Dispatch(OnWindowFocus, &w.focusEv) }) // Set up window cursor position callback to dispatch event w.SetCursorPosCallback(func(x *glfw.Window, xpos float64, ypos float64) { w.cursorEv.Xpos = float32(xpos) w.cursorEv.Ypos = float32(ypos) w.cursorEv.Mods = w.mods w.Dispatch(OnCursor, &w.cursorEv) }) // Set up mouse wheel scroll callback to dispatch event w.SetScrollCallback(func(x *glfw.Window, xoff float64, yoff float64) { w.scrollEv.Xoffset = float32(xoff) w.scrollEv.Yoffset = float32(yoff) w.scrollEv.Mods = w.mods w.Dispatch(OnScroll, &w.scrollEv) }) win = w // Set singleton return nil } // Gls returns the associated OpenGL state. func (w *GlfwWindow) Gls() *gls.GLS { return w.gls } // FullScreen returns whether this windows is currently fullscreen. func (w *GlfwWindow)
FullScreen
identifier_name
glfw.go
this is set, glLineWidth(width) only accepts width=1.0 and generates an error // for any other values although the spec says it should ignore unsupported widths // and generate an error only when width <= 0. if runtime.GOOS == "darwin" { glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True) } // Create window and set it as the current context. // The window is created always as not full screen because if it is // created as full screen it not possible to revert it to windowed mode. // At the end of this function, the window will be set to full screen if requested. w.Window, err = glfw.CreateWindow(width, height, title, nil, nil) if err != nil { return err } w.MakeContextCurrent() // Create OpenGL state w.gls, err = gls.New() if err != nil { return err } // Compute and store scale fbw, fbh := w.GetFramebufferSize() w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) // Create map for cursors w.cursors = make(map[Cursor]*glfw.Cursor) w.lastCursorKey = CursorLast // Preallocate GLFW standard cursors w.cursors[ArrowCursor] = glfw.CreateStandardCursor(glfw.ArrowCursor) w.cursors[IBeamCursor] = glfw.CreateStandardCursor(glfw.IBeamCursor) w.cursors[CrosshairCursor] = glfw.CreateStandardCursor(glfw.CrosshairCursor) w.cursors[HandCursor] = glfw.CreateStandardCursor(glfw.HandCursor) w.cursors[HResizeCursor] = glfw.CreateStandardCursor(glfw.HResizeCursor) w.cursors[VResizeCursor] = glfw.CreateStandardCursor(glfw.VResizeCursor) // Preallocate extra G3N standard cursors (diagonal resize cursors) cursorDiag1Png := assets.MustAsset("cursors/diag1.png") // [/] cursorDiag2Png := assets.MustAsset("cursors/diag2.png") // [\] diag1Img, _, err := image.Decode(bytes.NewReader(cursorDiag1Png)) diag2Img, _, err := image.Decode(bytes.NewReader(cursorDiag2Png)) if err != nil { return err } w.cursors[DiagResize1Cursor] = glfw.CreateCursor(diag1Img, 8, 8) // [/] w.cursors[DiagResize2Cursor] = glfw.CreateCursor(diag2Img, 8, 8) // [\] // Set up key callback to dispatch event w.SetKeyCallback(func(x *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { w.keyEv.Key = Key(key) w.keyEv.Mods = ModifierKey(mods) w.mods = w.keyEv.Mods if action == glfw.Press { w.Dispatch(OnKeyDown, &w.keyEv) } else if action == glfw.Release { w.Dispatch(OnKeyUp, &w.keyEv) } else if action == glfw.Repeat { w.Dispatch(OnKeyRepeat, &w.keyEv) } }) // Set up char callback to dispatch event w.SetCharModsCallback(func(x *glfw.Window, char rune, mods glfw.ModifierKey) { w.charEv.Char = char w.charEv.Mods = ModifierKey(mods) w.Dispatch(OnChar, &w.charEv) }) // Set up mouse button callback to dispatch event w.SetMouseButtonCallback(func(x *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) { xpos, ypos := x.GetCursorPos() w.mouseEv.Button = MouseButton(button) w.mouseEv.Mods = ModifierKey(mods) w.mouseEv.Xpos = float32(xpos) //* float32(w.scaleX) TODO w.mouseEv.Ypos = float32(ypos) //* float32(w.scaleY) if action == glfw.Press { w.Dispatch(OnMouseDown, &w.mouseEv) } else if action == glfw.Release { w.Dispatch(OnMouseUp, &w.mouseEv) } }) // Set up window size callback to dispatch event w.SetSizeCallback(func(x *glfw.Window, width int, height int) { fbw, fbh := x.GetFramebufferSize() w.sizeEv.Width = width w.sizeEv.Height = height w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) w.Dispatch(OnWindowSize, &w.sizeEv) }) // Set up window position callback to dispatch event w.SetPosCallback(func(x *glfw.Window, xpos int, ypos int) { w.posEv.Xpos = xpos w.posEv.Ypos = ypos w.Dispatch(OnWindowPos, &w.posEv) }) // Set up window focus callback to dispatch event w.SetFocusCallback(func(x *glfw.Window, focused bool) { w.focusEv.Focused = focused w.Dispatch(OnWindowFocus, &w.focusEv) }) // Set up window cursor position callback to dispatch event w.SetCursorPosCallback(func(x *glfw.Window, xpos float64, ypos float64) { w.cursorEv.Xpos = float32(xpos) w.cursorEv.Ypos = float32(ypos) w.cursorEv.Mods = w.mods w.Dispatch(OnCursor, &w.cursorEv) }) // Set up mouse wheel scroll callback to dispatch event w.SetScrollCallback(func(x *glfw.Window, xoff float64, yoff float64) { w.scrollEv.Xoffset = float32(xoff) w.scrollEv.Yoffset = float32(yoff) w.scrollEv.Mods = w.mods w.Dispatch(OnScroll, &w.scrollEv) }) win = w // Set singleton return nil } // Gls returns the associated OpenGL state. func (w *GlfwWindow) Gls() *gls.GLS { return w.gls } // FullScreen returns whether this windows is currently fullscreen. func (w *GlfwWindow) FullScreen() bool { return w.fullscreen } // SetFullScreen sets this window as fullscreen on the primary monitor // TODO allow for fullscreen with resolutions different than the monitor's func (w *GlfwWindow) SetFullScreen(full bool) { // If already in the desired state, nothing to do if w.fullscreen == full { return } // Set window fullscreen on the primary monitor if full { // Save current position and size of the window w.lastX, w.lastY = w.GetPos() w.lastWidth, w.lastHeight = w.GetSize() // Get size of primary monitor mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() width := vmode.Width height := vmode.Height // Set as fullscreen on the primary monitor w.SetMonitor(mon, 0, 0, width, height, vmode.RefreshRate) w.fullscreen = true } else { // Restore window to previous position and size w.SetMonitor(nil, w.lastX, w.lastY, w.lastWidth, w.lastHeight, glfw.DontCare) w.fullscreen = false } } // Destroy destroys this window and its context func (w *GlfwWindow) Destroy() { w.Window.Destroy() glfw.Terminate() runtime.UnlockOSThread() // Important when using the execution tracer } // Scale returns this window's DPI scale factor (FramebufferSize / Size) func (w *GlfwWindow) GetScale() (x float64, y float64) { return w.scaleX, w.scaleY } // ScreenResolution returns the screen resolution func (w *GlfwWindow) ScreenResolution(p interface{}) (width, height int) { mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() return vmode.Width, vmode.Height } // PollEvents process events in the event queue func (w *GlfwWindow) PollEvents() { glfw.PollEvents() } // SetSwapInterval sets the number of screen updates to wait from the time SwapBuffer() // is called before swapping the buffers and returning. func (w *GlfwWindow) SetSwapInterval(interval int) { glfw.SwapInterval(interval) } // SetCursor sets the window's cursor. func (w *GlfwWindow) SetCursor(cursor Cursor) { cur, ok := w.cursors[cursor] if !ok { panic("Invalid cursor") } w.Window.SetCursor(cur) } // CreateCursor creates a new custom cursor and returns an int handle. func (w *GlfwWindow) CreateCursor(imgFile string, xhot, yhot int) (Cursor, error) { // Open image file file, err := os.Open(imgFile) if err != nil { return 0, err } defer file.Close() // Decode image img, _, err := image.Decode(file) if err != nil { return 0, err
} // Create and store cursor w.lastCursorKey += 1 w.cursors[Cursor(w.lastCursorKey)] = glfw.CreateCursor(img, xhot, yhot)
random_line_split
rf_model.py
', \ 'following', 'homeless', 'homeless_trend', 'food_class'] writer.writerow(keys) i = 0 for dic in jsonData: try: # get coordinates if dic['location']['coordinates'] is None: city = dic['location']['place_name'] city = city.replace(" ","%20") coor = cityPos(city) lng = coor['location']['lng'] lat = coor['location']['lat'] else: lng = dic['location']['coordinates'][0] lat = dic['location']['coordinates'][1] # get time amd timesptamp time = dic['created_at']['day']+ '-' + \ trans_month(dic['created_at']['month'])+ '-' + \ dic['created_at']['year']+ ' ' +dic['created_at']['time'] timeArray = t.strptime(time, "%d-%m-%Y %H:%M:%S") timestamp = t.mktime(timeArray) # to ensure at least one of homeless info and food info appears home = dic['homeless'] foods = dic['food_list'] if (home is None) and (foods is None or len(foods) == 0): continue # get homeless information if home is None: homeless = -1 homeless_trend = 0 else: homeless = dic['homeless']['cnt16'] homeless_trend = dic['homeless']['incre/decre'] # get food if foods is None or len(foods) == 0: writer.writerow([i, time, timestamp, lat, lng, dic['polarity'], \ dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, "-1"]) i += 1 else: for food in foods: food_class = get_food_class(food) writer.writerow([i, time, timestamp, lat, lng, \ dic['polarity'], dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, food_class]) i += 1 except: continue csvfile.close() def trans_month(month): month_dic = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', \ 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', \ 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'} return month_dic[month] # transform food name into food classes that are integers def get_food_class(food): if not food in food_dict.keys(): food_dict[food] = str(len(food_dict)) return food_dict[food] def generate_rev_dict(): for key,value in food_dict.items(): rev_dict[value] = key # get food name by food class def get_food_type(food_class): the_class = str(food_class) if the_class in rev_dict.keys(): return rev_dict[the_class] return None # get food group by food name def get_food_group(food):
""" --------------------------------------------------------- --------------------- Main Function --------------------- --------------------------------------------------------- """ if __name__ == "__main__": COUCHDB_NAME = sys.argv[1] OUT_COUCHDB_NAME = sys.argv[2] REFORMED_FILE = sys.argv[3] # create spark session spark = SparkSession.builder.appName(APP_NAME) \ .master(SPARK_URL).getOrCreate() """ ---------- data preparation ---------- """ # read data from couchdb trans(REFORMED_FILE) # reform data into a dataframe df = spark.read.options(header = "true", inferschema = "true")\ .csv(REFORMED_FILE) print("\nTotal number of rows loaded: %d" % df.count()) # remove duplicated entries df = df.drop_duplicates() print("\nTotal number of rows without duplicates: %d" % df.count()) df.show() # filter dataframe df_no_food = df.filter(df['food_class'] == -1) df_no_homeless = df.filter(df['homeless'] == -1) df_all_info = df.filter(df['food_class'] >= 0).filter(df['homeless'] >= 0) print("\nNumber of rows having all information: %d" % df_all_info.count()) print("number of rows without food information: %d" % df_no_food.count()) print("number of rows without homeless information: %d" % df_no_homeless.count()) # transform dataframe into RDD transformed_df_food = df_all_info.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) transformed_df_homeless = df_all_info.rdd.map(lambda row: LabeledPoint(row[-3], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_homeless_trend = df_all_info.rdd.map(lambda row: LabeledPoint(row[-2], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) # split reformed data into tranning data and test data splits = [TRAINING_DATA_RATIO, 1.0 - TRAINING_DATA_RATIO] training_data_food, test_data_food = transformed_df_food.randomSplit(splits, RANDOM_SEED) training_data_homeless, test_data_homeless = transformed_df_homeless.randomSplit(splits, RANDOM_SEED) training_data_homeless_trend, test_data_homeless_trend = transformed_df_homeless_trend.randomSplit(splits, RANDOM_SEED) print("\nNumber of training set rows: %d" % training_data_food.count()) print("Number of test set rows: %d" % test_data_food.count()) """ ---------- model training ---------- """ # train the classification model using training data start_time = t.time() num_classes = len(food_dict) model_food_classifier = RandomForest.trainClassifier(training_data_food, \ numClasses=num_classes,categoricalFeaturesInfo={},\ numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="gini", \ maxDepth=RF_MAX_DEPTH, maxBins=32, \ seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train food classifier: %.3f seconds" % elapsed_time) # train the regression model using training data start_time = t.time() model_homeless_regressor = RandomForest.trainRegressor(training_data_homeless,\ categoricalFeaturesInfo={},numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="variance",\ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) model_homeless_trend_regressor = RandomForest.trainRegressor(training_data_homeless_trend, categoricalFeaturesInfo={}, \ numTrees=RF_NUM_TREES, featureSubsetStrategy="auto", impurity="variance", \ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train homeless regressor: %.3f seconds" % elapsed_time) # make predictions using test data and calculate the accuracy food_predictions = model_food_classifier.predict(test_data_food.map(lambda x: x.features)) homeless_predictions = model_homeless_regressor.predict(test_data_homeless.map(lambda x: x.features)) homeless_trend_predictions = model_homeless_trend_regressor.predict(test_data_homeless_trend.map(lambda x: x.features)) labels_and_predictions_food = test_data_food.map(lambda x: x.label).zip(food_predictions) labels_and_predictions_homeless = test_data_homeless.map(lambda x: x.label).zip(homeless_predictions) labels_and_predictions_homeless_trend = test_data_homeless_trend.map(lambda x: x.label).zip(homeless_trend_predictions) food_acc = labels_and_predictions_food.filter(lambda x: x[0] == x[1]).count() / float(test_data_food.count()) homeless_acc = labels_and_predictions_homeless.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless.count()) homeless_trend_acc = labels_and_predictions_homeless_trend.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless_trend.count()) print("\nFood classifier accuracy: %.3f%%" % (food_acc * 100)) print("Homeless regressor accuracy: %.3f%%" % (homeless_acc
if food in Keywords.fastfood: return "fastfood" if food in Keywords.fruits: return "fruits" if food in Keywords.grains: return "grains" if food in Keywords.meat: return "meat" if food in Keywords.seafood: return "seafood" if food in Keywords.vegetables: return "vegetables" return None
identifier_body
rf_model.py
\ 'following', 'homeless', 'homeless_trend', 'food_class'] writer.writerow(keys) i = 0 for dic in jsonData: try: # get coordinates if dic['location']['coordinates'] is None: city = dic['location']['place_name'] city = city.replace(" ","%20") coor = cityPos(city) lng = coor['location']['lng'] lat = coor['location']['lat'] else: lng = dic['location']['coordinates'][0] lat = dic['location']['coordinates'][1] # get time amd timesptamp time = dic['created_at']['day']+ '-' + \ trans_month(dic['created_at']['month'])+ '-' + \ dic['created_at']['year']+ ' ' +dic['created_at']['time'] timeArray = t.strptime(time, "%d-%m-%Y %H:%M:%S") timestamp = t.mktime(timeArray) # to ensure at least one of homeless info and food info appears home = dic['homeless'] foods = dic['food_list'] if (home is None) and (foods is None or len(foods) == 0): continue # get homeless information if home is None: homeless = -1 homeless_trend = 0 else: homeless = dic['homeless']['cnt16'] homeless_trend = dic['homeless']['incre/decre'] # get food if foods is None or len(foods) == 0: writer.writerow([i, time, timestamp, lat, lng, dic['polarity'], \ dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, "-1"]) i += 1 else: for food in foods: food_class = get_food_class(food) writer.writerow([i, time, timestamp, lat, lng, \ dic['polarity'], dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, food_class]) i += 1 except: continue csvfile.close() def trans_month(month): month_dic = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', \ 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', \ 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'} return month_dic[month] # transform food name into food classes that are integers def get_food_class(food): if not food in food_dict.keys(): food_dict[food] = str(len(food_dict)) return food_dict[food] def generate_rev_dict(): for key,value in food_dict.items(): rev_dict[value] = key # get food name by food class def get_food_type(food_class): the_class = str(food_class) if the_class in rev_dict.keys(): return rev_dict[the_class] return None # get food group by food name def
(food): if food in Keywords.fastfood: return "fastfood" if food in Keywords.fruits: return "fruits" if food in Keywords.grains: return "grains" if food in Keywords.meat: return "meat" if food in Keywords.seafood: return "seafood" if food in Keywords.vegetables: return "vegetables" return None """ --------------------------------------------------------- --------------------- Main Function --------------------- --------------------------------------------------------- """ if __name__ == "__main__": COUCHDB_NAME = sys.argv[1] OUT_COUCHDB_NAME = sys.argv[2] REFORMED_FILE = sys.argv[3] # create spark session spark = SparkSession.builder.appName(APP_NAME) \ .master(SPARK_URL).getOrCreate() """ ---------- data preparation ---------- """ # read data from couchdb trans(REFORMED_FILE) # reform data into a dataframe df = spark.read.options(header = "true", inferschema = "true")\ .csv(REFORMED_FILE) print("\nTotal number of rows loaded: %d" % df.count()) # remove duplicated entries df = df.drop_duplicates() print("\nTotal number of rows without duplicates: %d" % df.count()) df.show() # filter dataframe df_no_food = df.filter(df['food_class'] == -1) df_no_homeless = df.filter(df['homeless'] == -1) df_all_info = df.filter(df['food_class'] >= 0).filter(df['homeless'] >= 0) print("\nNumber of rows having all information: %d" % df_all_info.count()) print("number of rows without food information: %d" % df_no_food.count()) print("number of rows without homeless information: %d" % df_no_homeless.count()) # transform dataframe into RDD transformed_df_food = df_all_info.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) transformed_df_homeless = df_all_info.rdd.map(lambda row: LabeledPoint(row[-3], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_homeless_trend = df_all_info.rdd.map(lambda row: LabeledPoint(row[-2], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) # split reformed data into tranning data and test data splits = [TRAINING_DATA_RATIO, 1.0 - TRAINING_DATA_RATIO] training_data_food, test_data_food = transformed_df_food.randomSplit(splits, RANDOM_SEED) training_data_homeless, test_data_homeless = transformed_df_homeless.randomSplit(splits, RANDOM_SEED) training_data_homeless_trend, test_data_homeless_trend = transformed_df_homeless_trend.randomSplit(splits, RANDOM_SEED) print("\nNumber of training set rows: %d" % training_data_food.count()) print("Number of test set rows: %d" % test_data_food.count()) """ ---------- model training ---------- """ # train the classification model using training data start_time = t.time() num_classes = len(food_dict) model_food_classifier = RandomForest.trainClassifier(training_data_food, \ numClasses=num_classes,categoricalFeaturesInfo={},\ numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="gini", \ maxDepth=RF_MAX_DEPTH, maxBins=32, \ seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train food classifier: %.3f seconds" % elapsed_time) # train the regression model using training data start_time = t.time() model_homeless_regressor = RandomForest.trainRegressor(training_data_homeless,\ categoricalFeaturesInfo={},numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="variance",\ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) model_homeless_trend_regressor = RandomForest.trainRegressor(training_data_homeless_trend, categoricalFeaturesInfo={}, \ numTrees=RF_NUM_TREES, featureSubsetStrategy="auto", impurity="variance", \ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train homeless regressor: %.3f seconds" % elapsed_time) # make predictions using test data and calculate the accuracy food_predictions = model_food_classifier.predict(test_data_food.map(lambda x: x.features)) homeless_predictions = model_homeless_regressor.predict(test_data_homeless.map(lambda x: x.features)) homeless_trend_predictions = model_homeless_trend_regressor.predict(test_data_homeless_trend.map(lambda x: x.features)) labels_and_predictions_food = test_data_food.map(lambda x: x.label).zip(food_predictions) labels_and_predictions_homeless = test_data_homeless.map(lambda x: x.label).zip(homeless_predictions) labels_and_predictions_homeless_trend = test_data_homeless_trend.map(lambda x: x.label).zip(homeless_trend_predictions) food_acc = labels_and_predictions_food.filter(lambda x: x[0] == x[1]).count() / float(test_data_food.count()) homeless_acc = labels_and_predictions_homeless.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless.count()) homeless_trend_acc = labels_and_predictions_homeless_trend.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless_trend.count()) print("\nFood classifier accuracy: %.3f%%" % (food_acc * 100)) print("Homeless regressor accuracy: %.3f%%" % (homeless_acc
get_food_group
identifier_name
rf_model.py
("number of rows without homeless information: %d" % df_no_homeless.count()) # transform dataframe into RDD transformed_df_food = df_all_info.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) transformed_df_homeless = df_all_info.rdd.map(lambda row: LabeledPoint(row[-3], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_homeless_trend = df_all_info.rdd.map(lambda row: LabeledPoint(row[-2], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) # split reformed data into tranning data and test data splits = [TRAINING_DATA_RATIO, 1.0 - TRAINING_DATA_RATIO] training_data_food, test_data_food = transformed_df_food.randomSplit(splits, RANDOM_SEED) training_data_homeless, test_data_homeless = transformed_df_homeless.randomSplit(splits, RANDOM_SEED) training_data_homeless_trend, test_data_homeless_trend = transformed_df_homeless_trend.randomSplit(splits, RANDOM_SEED) print("\nNumber of training set rows: %d" % training_data_food.count()) print("Number of test set rows: %d" % test_data_food.count()) """ ---------- model training ---------- """ # train the classification model using training data start_time = t.time() num_classes = len(food_dict) model_food_classifier = RandomForest.trainClassifier(training_data_food, \ numClasses=num_classes,categoricalFeaturesInfo={},\ numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="gini", \ maxDepth=RF_MAX_DEPTH, maxBins=32, \ seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train food classifier: %.3f seconds" % elapsed_time) # train the regression model using training data start_time = t.time() model_homeless_regressor = RandomForest.trainRegressor(training_data_homeless,\ categoricalFeaturesInfo={},numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="variance",\ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) model_homeless_trend_regressor = RandomForest.trainRegressor(training_data_homeless_trend, categoricalFeaturesInfo={}, \ numTrees=RF_NUM_TREES, featureSubsetStrategy="auto", impurity="variance", \ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train homeless regressor: %.3f seconds" % elapsed_time) # make predictions using test data and calculate the accuracy food_predictions = model_food_classifier.predict(test_data_food.map(lambda x: x.features)) homeless_predictions = model_homeless_regressor.predict(test_data_homeless.map(lambda x: x.features)) homeless_trend_predictions = model_homeless_trend_regressor.predict(test_data_homeless_trend.map(lambda x: x.features)) labels_and_predictions_food = test_data_food.map(lambda x: x.label).zip(food_predictions) labels_and_predictions_homeless = test_data_homeless.map(lambda x: x.label).zip(homeless_predictions) labels_and_predictions_homeless_trend = test_data_homeless_trend.map(lambda x: x.label).zip(homeless_trend_predictions) food_acc = labels_and_predictions_food.filter(lambda x: x[0] == x[1]).count() / float(test_data_food.count()) homeless_acc = labels_and_predictions_homeless.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless.count()) homeless_trend_acc = labels_and_predictions_homeless_trend.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless_trend.count()) print("\nFood classifier accuracy: %.3f%%" % (food_acc * 100)) print("Homeless regressor accuracy: %.3f%%" % (homeless_acc * 100)) print("Homeless trend regressor accuracy: %.3f%%" % (homeless_trend_acc * 100)) """ ---------- make predictions ---------- """ food_pre = df_no_food.count() > 0 homeless_pre = df_no_homeless.count() > 0 # make food predictions if food_pre: transformed_df_no_food = df_no_food.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) predict_foods = model_food_classifier.predict(transformed_df_no_food.map(lambda x: x.features)) # make homeless predictions if homeless_pre: transformed_df_no_homeless = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[8], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_no_homeless_trend = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[9], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) predict_homeless = model_homeless_regressor.predict(transformed_df_no_homeless.map(lambda x: x.features)) predict_homeless_trend = model_homeless_trend_regressor.predict(transformed_df_no_homeless_trend.map(lambda x: x.features)) # zip id with predictions preparing for joining data if food_pre: rdd_predict_foods = df_no_food.rdd.map(lambda row: row[0]).zip(predict_foods.map(int)) list_predict_foods = rdd_predict_foods.collect() if homeless_pre: rdd_predict_homeless = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless.map(int)) rdd_predict_homeless_trend = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless_trend.map(int)) list_predict_homeless = rdd_predict_homeless.collect() list_predict_homeless_trend = rdd_predict_homeless_trend.collect() """ ---------- join predictions to original data """ # transform predicted rdd to dataframe if food_pre: df_predict_foods = spark.createDataFrame(list_predict_foods, schema=["id","food_class"]) df_no_food = df_no_food.drop('food_class') concat_df_food = df_no_food.join(df_predict_foods, on='id') if homeless_pre: df_predict_homeless = spark.createDataFrame(list_predict_homeless, schema=["id","homeless"]) df_predict_homeless_trend = spark.createDataFrame(list_predict_homeless_trend, schema=["id","homeless_trend"]) df_no_homeless = df_no_homeless.drop('homeless').drop('homeless_trend') concat_df_homeless = df_no_homeless.join(df_predict_homeless, on='id').join(df_predict_homeless_trend, on='id') generate_rev_dict() get_food_type_udf = udf(get_food_type, StringType()) get_food_group_udf = udf(get_food_group, StringType()) df_all_info = df_all_info.withColumn('food', get_food_type_udf(df_all_info['food_class'])) df_all_info = df_all_info.drop('food_class') # reform the dataframe to prepare for tranforming to json if food_pre: concat_df_food = concat_df_food.withColumn('food', get_food_type_udf(concat_df_food['food_class'])) concat_df_food = concat_df_food.drop('food_class') union_df = df_all_info.union(concat_df_food) else: union_df = df_all_info if homeless_pre: concat_df_homeless = concat_df_homeless.withColumn('food', get_food_type_udf(concat_df_homeless['food_class'])) concat_df_homeless = concat_df_homeless.drop('food_class') union_df = union_df.union(concat_df_homeless) union_df = union_df.drop('id') union_df = union_df.drop('timestamp') union_df = union_df.withColumn('food_group', get_food_group_udf(union_df['food'])) print("\nTotal number of rows of final data: %d" % (union_df.count())) union_df.show() """ ---------- transform dataframe into json preparing for inserting back to couchdb """ json_data = union_df.toJSON() print("\nStart inserting data back to database...") # insert data into couchdb my_db = Couch(OUT_COUCHDB_NAME) final_json = {} final_json["type"] = "FeatureCollection" final_json["features"] = [] j = 0 for row in json_data.collect(): entry = {} entry["type"] = "Feature" entry["properties"] = {} entry["geometry"] = {} entry["geometry"]["type"] = "Point" entry["geometry"]["coordinates"] = [] json_obj = json.loads(row)
entry["properties"]["time"] = json_obj["time"]
random_line_split
rf_model.py
', \ 'following', 'homeless', 'homeless_trend', 'food_class'] writer.writerow(keys) i = 0 for dic in jsonData:
home = dic['homeless'] foods = dic['food_list'] if (home is None) and (foods is None or len(foods) == 0): continue # get homeless information if home is None: homeless = -1 homeless_trend = 0 else: homeless = dic['homeless']['cnt16'] homeless_trend = dic['homeless']['incre/decre'] # get food if foods is None or len(foods) == 0: writer.writerow([i, time, timestamp, lat, lng, dic['polarity'], \ dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, "-1"]) i += 1 else: for food in foods: food_class = get_food_class(food) writer.writerow([i, time, timestamp, lat, lng, \ dic['polarity'], dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, food_class]) i += 1 except: continue csvfile.close() def trans_month(month): month_dic = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', \ 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', \ 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'} return month_dic[month] # transform food name into food classes that are integers def get_food_class(food): if not food in food_dict.keys(): food_dict[food] = str(len(food_dict)) return food_dict[food] def generate_rev_dict(): for key,value in food_dict.items(): rev_dict[value] = key # get food name by food class def get_food_type(food_class): the_class = str(food_class) if the_class in rev_dict.keys(): return rev_dict[the_class] return None # get food group by food name def get_food_group(food): if food in Keywords.fastfood: return "fastfood" if food in Keywords.fruits: return "fruits" if food in Keywords.grains: return "grains" if food in Keywords.meat: return "meat" if food in Keywords.seafood: return "seafood" if food in Keywords.vegetables: return "vegetables" return None """ --------------------------------------------------------- --------------------- Main Function --------------------- --------------------------------------------------------- """ if __name__ == "__main__": COUCHDB_NAME = sys.argv[1] OUT_COUCHDB_NAME = sys.argv[2] REFORMED_FILE = sys.argv[3] # create spark session spark = SparkSession.builder.appName(APP_NAME) \ .master(SPARK_URL).getOrCreate() """ ---------- data preparation ---------- """ # read data from couchdb trans(REFORMED_FILE) # reform data into a dataframe df = spark.read.options(header = "true", inferschema = "true")\ .csv(REFORMED_FILE) print("\nTotal number of rows loaded: %d" % df.count()) # remove duplicated entries df = df.drop_duplicates() print("\nTotal number of rows without duplicates: %d" % df.count()) df.show() # filter dataframe df_no_food = df.filter(df['food_class'] == -1) df_no_homeless = df.filter(df['homeless'] == -1) df_all_info = df.filter(df['food_class'] >= 0).filter(df['homeless'] >= 0) print("\nNumber of rows having all information: %d" % df_all_info.count()) print("number of rows without food information: %d" % df_no_food.count()) print("number of rows without homeless information: %d" % df_no_homeless.count()) # transform dataframe into RDD transformed_df_food = df_all_info.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) transformed_df_homeless = df_all_info.rdd.map(lambda row: LabeledPoint(row[-3], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_homeless_trend = df_all_info.rdd.map(lambda row: LabeledPoint(row[-2], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) # split reformed data into tranning data and test data splits = [TRAINING_DATA_RATIO, 1.0 - TRAINING_DATA_RATIO] training_data_food, test_data_food = transformed_df_food.randomSplit(splits, RANDOM_SEED) training_data_homeless, test_data_homeless = transformed_df_homeless.randomSplit(splits, RANDOM_SEED) training_data_homeless_trend, test_data_homeless_trend = transformed_df_homeless_trend.randomSplit(splits, RANDOM_SEED) print("\nNumber of training set rows: %d" % training_data_food.count()) print("Number of test set rows: %d" % test_data_food.count()) """ ---------- model training ---------- """ # train the classification model using training data start_time = t.time() num_classes = len(food_dict) model_food_classifier = RandomForest.trainClassifier(training_data_food, \ numClasses=num_classes,categoricalFeaturesInfo={},\ numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="gini", \ maxDepth=RF_MAX_DEPTH, maxBins=32, \ seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train food classifier: %.3f seconds" % elapsed_time) # train the regression model using training data start_time = t.time() model_homeless_regressor = RandomForest.trainRegressor(training_data_homeless,\ categoricalFeaturesInfo={},numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="variance",\ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) model_homeless_trend_regressor = RandomForest.trainRegressor(training_data_homeless_trend, categoricalFeaturesInfo={}, \ numTrees=RF_NUM_TREES, featureSubsetStrategy="auto", impurity="variance", \ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train homeless regressor: %.3f seconds" % elapsed_time) # make predictions using test data and calculate the accuracy food_predictions = model_food_classifier.predict(test_data_food.map(lambda x: x.features)) homeless_predictions = model_homeless_regressor.predict(test_data_homeless.map(lambda x: x.features)) homeless_trend_predictions = model_homeless_trend_regressor.predict(test_data_homeless_trend.map(lambda x: x.features)) labels_and_predictions_food = test_data_food.map(lambda x: x.label).zip(food_predictions) labels_and_predictions_homeless = test_data_homeless.map(lambda x: x.label).zip(homeless_predictions) labels_and_predictions_homeless_trend = test_data_homeless_trend.map(lambda x: x.label).zip(homeless_trend_predictions) food_acc = labels_and_predictions_food.filter(lambda x: x[0] == x[1]).count() / float(test_data_food.count()) homeless_acc = labels_and_predictions_homeless.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless.count()) homeless_trend_acc = labels_and_predictions_homeless_trend.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless_trend.count()) print("\nFood classifier accuracy: %.3f%%" % (food_acc * 100)) print("Homeless regressor accuracy: %.3f%%" % (homeless_acc
try: # get coordinates if dic['location']['coordinates'] is None: city = dic['location']['place_name'] city = city.replace(" ","%20") coor = cityPos(city) lng = coor['location']['lng'] lat = coor['location']['lat'] else: lng = dic['location']['coordinates'][0] lat = dic['location']['coordinates'][1] # get time amd timesptamp time = dic['created_at']['day']+ '-' + \ trans_month(dic['created_at']['month'])+ '-' + \ dic['created_at']['year']+ ' ' +dic['created_at']['time'] timeArray = t.strptime(time, "%d-%m-%Y %H:%M:%S") timestamp = t.mktime(timeArray) # to ensure at least one of homeless info and food info appears
conditional_block
actions.rs
Serialize, Deserialize, Debug)] pub struct ActionDisplayInfo { pub name: String, pub health_cost: Cost, pub time_cost: Cost, pub rules_text: String, pub flavor_text: String, } impl Default for ActionDisplayInfo { fn default() -> Self { ActionDisplayInfo { name: "".to_string(), health_cost: Cost::None, time_cost: Cost::Fixed(2), rules_text: "".to_string(), flavor_text: "".to_string(), } } } #[allow(unused)] pub trait ActionTrait { /** Perform a single time-step update on this action, possibly modifying the game state. Note that the action is removed from `game` before doing this, so that both mutable references can be held at the same time, so the action still stored in `game` is temporarily invalid. */ fn update(&mut self, context: ActionUpdateContext) -> ActionStatus; fn display_info(&self) -> ActionDisplayInfo; fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw); fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Action: ActionTrait { SimpleAction, } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct SimpleAction { display_info: ActionDisplayInfo, is_card: bool, simple_action_type: SimpleActionType, progress: Time, cancel_progress: Time, } #[allow(unused)] pub trait SimpleActionTrait { fn finish(&self, context: ActionUpdateContext); fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) {} fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum SimpleActionType: SimpleActionTrait { Reshuffle, BuildConveyor, BuildMechanism, } } fn smootherstep(a: f64, b: f64, x: f64) -> f64 { let x = ((x - a) / (b - a)).clamp(0.0, 1.0); x * x * x * (x * (x * 6.0 - 15.0) + 10.0) } impl SimpleAction { pub fn new( time_cost: i32, health_cost: Option<i32>, name: &str, rules_text: &str, flavor_text: &str, is_card: bool, simple_action_type: SimpleActionType, ) -> SimpleAction { SimpleAction { display_info: ActionDisplayInfo { name: name.to_string(), health_cost: match health_cost { Some(c) => Cost::Fixed(c), None => Cost::None, }, time_cost: Cost::Fixed(time_cost), rules_text: rules_text.to_string(), flavor_text: flavor_text.to_string(), }, is_card, simple_action_type, progress: 0.0, cancel_progress: 0.0, } } fn time_cost(&self) -> f64 { match self.display_info.time_cost { Cost::Fixed(cost) => cost as f64, _ => panic!(), } } fn health_cost(&self) -> f64 { match self.display_info.health_cost { Cost::Fixed(cost) => cost as f64, Cost::None => 0.0, _ => panic!(), } } fn cooldown_time(&self) -> f64 { self.time_cost() * 0.25 } fn startup_time(&self) -> f64 { self.time_cost() * 0.25 } fn finish_time(&self) -> f64 { self.time_cost() - self.cooldown_time() } fn finished(&self) -> bool { self.progress > self.finish_time() } fn health_to_pay_by(&self, progress: f64) -> f64 { smootherstep(self.startup_time(), self.finish_time(), progress) * self.health_cost() } } impl ActionTrait for SimpleAction { fn update(&mut self, context: ActionUpdateContext) -> ActionStatus { let simple_action_type = self.simple_action_type.clone(); let canceled = context.interaction_state().canceled && !self.finished(); if canceled { self.cancel_progress += UPDATE_DURATION; } else { let was_finished = self.finished(); let health_paid_already = self.health_to_pay_by(self.progress); self.progress += UPDATE_DURATION; let health_payment = self.health_to_pay_by(self.progress) - health_paid_already; context.game.player.health -= health_payment; if self.finished() > was_finished { if self.is_card { match context.game.cards.selected_index { Some(index) => { if index + 1 == context.game.cards.deck.len() { context.game.cards.selected_index = None; } else { context.game.cards.selected_index = Some(index + 1); } } _ => unreachable!(), } } self.simple_action_type.finish(context); } } if self.progress > self.time_cost() || self.cancel_progress > self.cooldown_time() { ActionStatus::Completed } else { ActionStatus::StillGoing } } fn display_info(&self) -> ActionDisplayInfo { self.display_info.clone() } fn possible(&self, game: &Game) -> bool { self.simple_action_type.possible(game) } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_progress(game, draw); let a = game.player.position + FloatingVector::new(-TILE_RADIUS as f64 * 0.5, 0.0); draw.rectangle_on_map( 70, a, FloatingVector::new(TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64), "#000", ); draw.rectangle_on_map( 71, a, FloatingVector::new( TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64 * self.progress / self.time_cost(), ), "#ff0", ); } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_preview(game, draw); } } impl SimpleActionTrait for BuildMechanism { fn finish(&self, context: ActionUpdateContext) { context.game.create_mechanism( context.game.player.position.containing_tile(), self.mechanism(context.game), ); } fn possible(&self, game: &Game) -> bool { let position = game.player.position.containing_tile(); game.grid.get(position).is_some() && game.mechanism(position).is_none() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw)
} #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct BuildConveyor { pub allow_splitting: bool, } #[derive(Copy, Clone, PartialEq, Serialize, Deserialize, Debug)] struct BuildConveyorCandidate { position: GridVector, input_side: Facing, } impl BuildConveyorCandidate { fn input_position(&self) -> GridVector { self.position + self.input_side.unit_vector() * TILE_WIDTH } fn output_side(&self) -> Facing { self.input_side + Rotation::U_TURN } } impl BuildConveyor { fn candidate_valid( game: &Game, candidate: BuildConveyorCandidate, allow_splitting: bool, ) -> bool { if game.grid.get(candidate.position).is_none() { return false; }; let output_mechanism = game.mechanism(candidate.position); //debug!("{:?}", (candidate, input_mechanism, output_mechanism)); guard!(let Some(input_mechanism) = game.mechanism(candidate.input_position()) else { return false }); if !input_mechanism .mechanism_type .can_be_material_source(candidate.output_side()) { return false; } if !allow_splitting { if matches!(&input_mechanism.mechanism_type, MechanismType::Conveyor(conveyor) if conveyor.sides.iter().filter(|&&side| side == ConveyorSide::Output).count() > 0) { return false; } } if let Some(output_mechanism) = output_mechanism { guard!(let Mechanism { mechanism_type: MechanismType::Conveyor(conveyor), .. } = output_mechanism else { return false }); if conveyor.sides[candidate.input_side.as_index()] != ConveyorSide::Disconnected { return false; } } true } /// the
{ draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#666", ); }
identifier_body
actions.rs
Serialize, Deserialize, Debug)] pub struct ActionDisplayInfo { pub name: String, pub health_cost: Cost, pub time_cost: Cost, pub rules_text: String, pub flavor_text: String, } impl Default for ActionDisplayInfo { fn default() -> Self { ActionDisplayInfo { name: "".to_string(), health_cost: Cost::None, time_cost: Cost::Fixed(2), rules_text: "".to_string(), flavor_text: "".to_string(), } } } #[allow(unused)] pub trait ActionTrait { /** Perform a single time-step update on this action, possibly modifying the game state. Note that the action is removed from `game` before doing this, so that both mutable references can be held at the same time, so the action still stored in `game` is temporarily invalid. */ fn update(&mut self, context: ActionUpdateContext) -> ActionStatus; fn display_info(&self) -> ActionDisplayInfo; fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw); fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Action: ActionTrait { SimpleAction, } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct SimpleAction { display_info: ActionDisplayInfo, is_card: bool, simple_action_type: SimpleActionType, progress: Time, cancel_progress: Time, } #[allow(unused)] pub trait SimpleActionTrait { fn finish(&self, context: ActionUpdateContext); fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) {} fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum SimpleActionType: SimpleActionTrait { Reshuffle, BuildConveyor, BuildMechanism, } } fn smootherstep(a: f64, b: f64, x: f64) -> f64 { let x = ((x - a) / (b - a)).clamp(0.0, 1.0); x * x * x * (x * (x * 6.0 - 15.0) + 10.0) } impl SimpleAction { pub fn new( time_cost: i32, health_cost: Option<i32>, name: &str, rules_text: &str, flavor_text: &str, is_card: bool, simple_action_type: SimpleActionType, ) -> SimpleAction { SimpleAction { display_info: ActionDisplayInfo { name: name.to_string(), health_cost: match health_cost { Some(c) => Cost::Fixed(c), None => Cost::None, }, time_cost: Cost::Fixed(time_cost), rules_text: rules_text.to_string(), flavor_text: flavor_text.to_string(), }, is_card, simple_action_type, progress: 0.0, cancel_progress: 0.0, } } fn time_cost(&self) -> f64 { match self.display_info.time_cost { Cost::Fixed(cost) => cost as f64, _ => panic!(), } } fn health_cost(&self) -> f64 { match self.display_info.health_cost { Cost::Fixed(cost) => cost as f64, Cost::None => 0.0, _ => panic!(), } } fn cooldown_time(&self) -> f64 { self.time_cost() * 0.25 } fn startup_time(&self) -> f64 { self.time_cost() * 0.25 } fn finish_time(&self) -> f64 { self.time_cost() - self.cooldown_time() } fn
(&self) -> bool { self.progress > self.finish_time() } fn health_to_pay_by(&self, progress: f64) -> f64 { smootherstep(self.startup_time(), self.finish_time(), progress) * self.health_cost() } } impl ActionTrait for SimpleAction { fn update(&mut self, context: ActionUpdateContext) -> ActionStatus { let simple_action_type = self.simple_action_type.clone(); let canceled = context.interaction_state().canceled && !self.finished(); if canceled { self.cancel_progress += UPDATE_DURATION; } else { let was_finished = self.finished(); let health_paid_already = self.health_to_pay_by(self.progress); self.progress += UPDATE_DURATION; let health_payment = self.health_to_pay_by(self.progress) - health_paid_already; context.game.player.health -= health_payment; if self.finished() > was_finished { if self.is_card { match context.game.cards.selected_index { Some(index) => { if index + 1 == context.game.cards.deck.len() { context.game.cards.selected_index = None; } else { context.game.cards.selected_index = Some(index + 1); } } _ => unreachable!(), } } self.simple_action_type.finish(context); } } if self.progress > self.time_cost() || self.cancel_progress > self.cooldown_time() { ActionStatus::Completed } else { ActionStatus::StillGoing } } fn display_info(&self) -> ActionDisplayInfo { self.display_info.clone() } fn possible(&self, game: &Game) -> bool { self.simple_action_type.possible(game) } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_progress(game, draw); let a = game.player.position + FloatingVector::new(-TILE_RADIUS as f64 * 0.5, 0.0); draw.rectangle_on_map( 70, a, FloatingVector::new(TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64), "#000", ); draw.rectangle_on_map( 71, a, FloatingVector::new( TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64 * self.progress / self.time_cost(), ), "#ff0", ); } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_preview(game, draw); } } impl SimpleActionTrait for BuildMechanism { fn finish(&self, context: ActionUpdateContext) { context.game.create_mechanism( context.game.player.position.containing_tile(), self.mechanism(context.game), ); } fn possible(&self, game: &Game) -> bool { let position = game.player.position.containing_tile(); game.grid.get(position).is_some() && game.mechanism(position).is_none() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#666", ); } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct BuildConveyor { pub allow_splitting: bool, } #[derive(Copy, Clone, PartialEq, Serialize, Deserialize, Debug)] struct BuildConveyorCandidate { position: GridVector, input_side: Facing, } impl BuildConveyorCandidate { fn input_position(&self) -> GridVector { self.position + self.input_side.unit_vector() * TILE_WIDTH } fn output_side(&self) -> Facing { self.input_side + Rotation::U_TURN } } impl BuildConveyor { fn candidate_valid( game: &Game, candidate: BuildConveyorCandidate, allow_splitting: bool, ) -> bool { if game.grid.get(candidate.position).is_none() { return false; }; let output_mechanism = game.mechanism(candidate.position); //debug!("{:?}", (candidate, input_mechanism, output_mechanism)); guard!(let Some(input_mechanism) = game.mechanism(candidate.input_position()) else { return false }); if !input_mechanism .mechanism_type .can_be_material_source(candidate.output_side()) { return false; } if !allow_splitting { if matches!(&input_mechanism.mechanism_type, MechanismType::Conveyor(conveyor) if conveyor.sides.iter().filter(|&&side| side == ConveyorSide::Output).count() > 0) { return false; } } if let Some(output_mechanism) = output_mechanism { guard!(let Mechanism { mechanism_type: MechanismType::Conveyor(conveyor), .. } = output_mechanism else { return false }); if conveyor.sides[candidate.input_side.as_index()] != ConveyorSide::Disconnected { return false; } } true } /// the returned
finished
identifier_name
actions.rs
Serialize, Deserialize, Debug)] pub struct ActionDisplayInfo { pub name: String, pub health_cost: Cost, pub time_cost: Cost, pub rules_text: String, pub flavor_text: String, } impl Default for ActionDisplayInfo { fn default() -> Self { ActionDisplayInfo { name: "".to_string(), health_cost: Cost::None, time_cost: Cost::Fixed(2), rules_text: "".to_string(), flavor_text: "".to_string(), } } } #[allow(unused)] pub trait ActionTrait { /** Perform a single time-step update on this action, possibly modifying the game state. Note that the action is removed from `game` before doing this, so that both mutable references can be held at the same time, so the action still stored in `game` is temporarily invalid. */ fn update(&mut self, context: ActionUpdateContext) -> ActionStatus; fn display_info(&self) -> ActionDisplayInfo; fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw); fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Action: ActionTrait { SimpleAction, } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct SimpleAction { display_info: ActionDisplayInfo, is_card: bool, simple_action_type: SimpleActionType, progress: Time, cancel_progress: Time, } #[allow(unused)] pub trait SimpleActionTrait { fn finish(&self, context: ActionUpdateContext); fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) {} fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum SimpleActionType: SimpleActionTrait { Reshuffle, BuildConveyor, BuildMechanism, } } fn smootherstep(a: f64, b: f64, x: f64) -> f64 { let x = ((x - a) / (b - a)).clamp(0.0, 1.0); x * x * x * (x * (x * 6.0 - 15.0) + 10.0) } impl SimpleAction { pub fn new( time_cost: i32, health_cost: Option<i32>, name: &str, rules_text: &str, flavor_text: &str, is_card: bool, simple_action_type: SimpleActionType, ) -> SimpleAction { SimpleAction { display_info: ActionDisplayInfo { name: name.to_string(), health_cost: match health_cost { Some(c) => Cost::Fixed(c), None => Cost::None, }, time_cost: Cost::Fixed(time_cost), rules_text: rules_text.to_string(), flavor_text: flavor_text.to_string(), }, is_card, simple_action_type, progress: 0.0, cancel_progress: 0.0, } } fn time_cost(&self) -> f64 { match self.display_info.time_cost { Cost::Fixed(cost) => cost as f64, _ => panic!(), } } fn health_cost(&self) -> f64 { match self.display_info.health_cost { Cost::Fixed(cost) => cost as f64, Cost::None => 0.0, _ => panic!(), } } fn cooldown_time(&self) -> f64 { self.time_cost() * 0.25 } fn startup_time(&self) -> f64 { self.time_cost() * 0.25 } fn finish_time(&self) -> f64 { self.time_cost() - self.cooldown_time() } fn finished(&self) -> bool { self.progress > self.finish_time() } fn health_to_pay_by(&self, progress: f64) -> f64 { smootherstep(self.startup_time(), self.finish_time(), progress) * self.health_cost() } } impl ActionTrait for SimpleAction { fn update(&mut self, context: ActionUpdateContext) -> ActionStatus { let simple_action_type = self.simple_action_type.clone(); let canceled = context.interaction_state().canceled && !self.finished(); if canceled { self.cancel_progress += UPDATE_DURATION; } else { let was_finished = self.finished(); let health_paid_already = self.health_to_pay_by(self.progress); self.progress += UPDATE_DURATION; let health_payment = self.health_to_pay_by(self.progress) - health_paid_already; context.game.player.health -= health_payment; if self.finished() > was_finished { if self.is_card { match context.game.cards.selected_index { Some(index) => { if index + 1 == context.game.cards.deck.len() { context.game.cards.selected_index = None; } else
} _ => unreachable!(), } } self.simple_action_type.finish(context); } } if self.progress > self.time_cost() || self.cancel_progress > self.cooldown_time() { ActionStatus::Completed } else { ActionStatus::StillGoing } } fn display_info(&self) -> ActionDisplayInfo { self.display_info.clone() } fn possible(&self, game: &Game) -> bool { self.simple_action_type.possible(game) } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_progress(game, draw); let a = game.player.position + FloatingVector::new(-TILE_RADIUS as f64 * 0.5, 0.0); draw.rectangle_on_map( 70, a, FloatingVector::new(TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64), "#000", ); draw.rectangle_on_map( 71, a, FloatingVector::new( TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64 * self.progress / self.time_cost(), ), "#ff0", ); } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_preview(game, draw); } } impl SimpleActionTrait for BuildMechanism { fn finish(&self, context: ActionUpdateContext) { context.game.create_mechanism( context.game.player.position.containing_tile(), self.mechanism(context.game), ); } fn possible(&self, game: &Game) -> bool { let position = game.player.position.containing_tile(); game.grid.get(position).is_some() && game.mechanism(position).is_none() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#666", ); } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct BuildConveyor { pub allow_splitting: bool, } #[derive(Copy, Clone, PartialEq, Serialize, Deserialize, Debug)] struct BuildConveyorCandidate { position: GridVector, input_side: Facing, } impl BuildConveyorCandidate { fn input_position(&self) -> GridVector { self.position + self.input_side.unit_vector() * TILE_WIDTH } fn output_side(&self) -> Facing { self.input_side + Rotation::U_TURN } } impl BuildConveyor { fn candidate_valid( game: &Game, candidate: BuildConveyorCandidate, allow_splitting: bool, ) -> bool { if game.grid.get(candidate.position).is_none() { return false; }; let output_mechanism = game.mechanism(candidate.position); //debug!("{:?}", (candidate, input_mechanism, output_mechanism)); guard!(let Some(input_mechanism) = game.mechanism(candidate.input_position()) else { return false }); if !input_mechanism .mechanism_type .can_be_material_source(candidate.output_side()) { return false; } if !allow_splitting { if matches!(&input_mechanism.mechanism_type, MechanismType::Conveyor(conveyor) if conveyor.sides.iter().filter(|&&side| side == ConveyorSide::Output).count() > 0) { return false; } } if let Some(output_mechanism) = output_mechanism { guard!(let Mechanism { mechanism_type: MechanismType::Conveyor(conveyor), .. } = output_mechanism else { return false }); if conveyor.sides[candidate.input_side.as_index()] != ConveyorSide::Disconnected { return false; } } true } /// the
{ context.game.cards.selected_index = Some(index + 1); }
conditional_block
actions.rs
_text: flavor_text.to_string(), }, is_card, simple_action_type, progress: 0.0, cancel_progress: 0.0, } } fn time_cost(&self) -> f64 { match self.display_info.time_cost { Cost::Fixed(cost) => cost as f64, _ => panic!(), } } fn health_cost(&self) -> f64 { match self.display_info.health_cost { Cost::Fixed(cost) => cost as f64, Cost::None => 0.0, _ => panic!(), } } fn cooldown_time(&self) -> f64 { self.time_cost() * 0.25 } fn startup_time(&self) -> f64 { self.time_cost() * 0.25 } fn finish_time(&self) -> f64 { self.time_cost() - self.cooldown_time() } fn finished(&self) -> bool { self.progress > self.finish_time() } fn health_to_pay_by(&self, progress: f64) -> f64 { smootherstep(self.startup_time(), self.finish_time(), progress) * self.health_cost() } } impl ActionTrait for SimpleAction { fn update(&mut self, context: ActionUpdateContext) -> ActionStatus { let simple_action_type = self.simple_action_type.clone(); let canceled = context.interaction_state().canceled && !self.finished(); if canceled { self.cancel_progress += UPDATE_DURATION; } else { let was_finished = self.finished(); let health_paid_already = self.health_to_pay_by(self.progress); self.progress += UPDATE_DURATION; let health_payment = self.health_to_pay_by(self.progress) - health_paid_already; context.game.player.health -= health_payment; if self.finished() > was_finished { if self.is_card { match context.game.cards.selected_index { Some(index) => { if index + 1 == context.game.cards.deck.len() { context.game.cards.selected_index = None; } else { context.game.cards.selected_index = Some(index + 1); } } _ => unreachable!(), } } self.simple_action_type.finish(context); } } if self.progress > self.time_cost() || self.cancel_progress > self.cooldown_time() { ActionStatus::Completed } else { ActionStatus::StillGoing } } fn display_info(&self) -> ActionDisplayInfo { self.display_info.clone() } fn possible(&self, game: &Game) -> bool { self.simple_action_type.possible(game) } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_progress(game, draw); let a = game.player.position + FloatingVector::new(-TILE_RADIUS as f64 * 0.5, 0.0); draw.rectangle_on_map( 70, a, FloatingVector::new(TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64), "#000", ); draw.rectangle_on_map( 71, a, FloatingVector::new( TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64 * self.progress / self.time_cost(), ), "#ff0", ); } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_preview(game, draw); } } impl SimpleActionTrait for BuildMechanism { fn finish(&self, context: ActionUpdateContext) { context.game.create_mechanism( context.game.player.position.containing_tile(), self.mechanism(context.game), ); } fn possible(&self, game: &Game) -> bool { let position = game.player.position.containing_tile(); game.grid.get(position).is_some() && game.mechanism(position).is_none() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#666", ); } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct BuildConveyor { pub allow_splitting: bool, } #[derive(Copy, Clone, PartialEq, Serialize, Deserialize, Debug)] struct BuildConveyorCandidate { position: GridVector, input_side: Facing, } impl BuildConveyorCandidate { fn input_position(&self) -> GridVector { self.position + self.input_side.unit_vector() * TILE_WIDTH } fn output_side(&self) -> Facing { self.input_side + Rotation::U_TURN } } impl BuildConveyor { fn candidate_valid( game: &Game, candidate: BuildConveyorCandidate, allow_splitting: bool, ) -> bool { if game.grid.get(candidate.position).is_none() { return false; }; let output_mechanism = game.mechanism(candidate.position); //debug!("{:?}", (candidate, input_mechanism, output_mechanism)); guard!(let Some(input_mechanism) = game.mechanism(candidate.input_position()) else { return false }); if !input_mechanism .mechanism_type .can_be_material_source(candidate.output_side()) { return false; } if !allow_splitting { if matches!(&input_mechanism.mechanism_type, MechanismType::Conveyor(conveyor) if conveyor.sides.iter().filter(|&&side| side == ConveyorSide::Output).count() > 0) { return false; } } if let Some(output_mechanism) = output_mechanism { guard!(let Mechanism { mechanism_type: MechanismType::Conveyor(conveyor), .. } = output_mechanism else { return false }); if conveyor.sides[candidate.input_side.as_index()] != ConveyorSide::Disconnected { return false; } } true } /// the returned facing is the input side of the new conveyor fn current_target(game: &Game, allow_splitting: bool) -> Option<BuildConveyorCandidate> { let player_position = game.player.position.containing_tile(); let player_offset = game.player.position - player_position.to_floating(); let mut candidates = Vec::new(); let mut consider = |candidate, score| { if Self::candidate_valid(game, candidate, allow_splitting) { candidates.push((candidate, score)) } }; for facing in Facing::ALL_FACINGS { consider( BuildConveyorCandidate { position: player_position, input_side: facing, }, (player_offset - facing.unit_vector().to_floating()).magnitude_squared(), ); consider( BuildConveyorCandidate { position: player_position - facing.unit_vector() * TILE_WIDTH, input_side: facing, }, (player_offset - -facing.unit_vector().to_floating()).magnitude_squared(), ); } candidates .into_iter() .min_by_key(|&(_, score)| OrderedFloat(score)) .map(|(c, _)| c) } } impl SimpleActionTrait for BuildConveyor { fn finish(&self, context: ActionUpdateContext) { let candidate = Self::current_target(context.game, self.allow_splitting).unwrap(); let mut sides = [ConveyorSide::Disconnected; 4]; sides[candidate.input_side.as_index()] = ConveyorSide::Input; context.game.create_mechanism( candidate.position, Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, last_sent: Facing::from_index(0), }), }, ); context .game .mutate_mechanism(candidate.input_position(), |mechanism| { if let Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, .. }), .. } = mechanism { sides[candidate.output_side().as_index()] = ConveyorSide::Output; } }); } fn possible(&self, game: &Game) -> bool { Self::current_target(game, self.allow_splitting).is_some() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { if let Some(candidate) = Self::current_target(game, self.allow_splitting) { draw.rectangle_on_map( 5, candidate.position.to_floating(), TILE_SIZE.to_floating(), "#666", ); draw.rectangle_on_map( 5, candidate.input_position().to_floating(), TILE_SIZE.to_floating(), "#555", ); } else { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#555", ); } } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct Reshuffle; impl SimpleActionTrait for Reshuffle { fn finish(&self, context: ActionUpdateContext) { let cards = &mut context.game.cards; cards.deck.shuffle(&mut rand::thread_rng()); cards.selected_index = Some(0); }
}
random_line_split
train.py
train_features, dev_features, test_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator): model.zero_grad() for step, batch in tqdm(enumerate(train_dataloader)): model.train() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'labels': batch[2], 'entity_pos': batch[3], 'hts': batch[4], } outputs = model(**inputs) loss = outputs[0] / args.gradient_accumulation_steps with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() num_steps += 1 #wandb.log({"loss": loss.item()}, step=num_steps) if (step + 1) == len(train_dataloader) - 1 or (args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): ''' dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") #wandb.log(dev_output, step=num_steps) print(dev_output) if dev_score > best_score: best_score = dev_score pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if args.save_path != "": torch.save(model.state_dict(), args.save_path) ''' if args.save_path != "": torch.save(model.state_dict(), args.save_path+'_'+str(epoch+1)+'_'+str(step)+'.pt') return num_steps new_layer = ["extractor", "bilinear"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": 1e-4}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) ans = to_official(preds, features) best_f1 = 0.0 best_f1_ign = 0.0 if len(ans) > 0: best_f1, _, best_f1_ign, _ = official_evaluate(ans, args.data_dir) output = { tag + "_F1": best_f1 * 100, tag + "_F1_ign": best_f1_ign * 100, } return best_f1, output def report(args, model, features): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) preds = to_official(preds, features) return preds def main():
parser.add_argument("--train_batch_size", default=4, type=int, help="Batch size for training.") parser.add_argument("--test_batch_size", default=8, type=int, help="Batch size for testing.") parser.add_argument("--gradient_accumulation_steps", default=1, type=int, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--num_labels", default=4, type=int, help="Max number of labels in prediction.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--warmup_ratio", default=0.06, type=float, help="Warm up ratio for Adam.") parser.add_argument("--num_train_epochs", default=30.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--evaluation_steps", default=7500, type=int, help="Number of training steps between evaluations.") parser.add_argument("--seed", type=int, default=66, help="random seed for initialization") parser.add_argument("--num_class", type=int, default=97, help="Number of relation types in dataset.") args = parser.parse_args() #wandb.init(project="DocRED") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) read = read_correction #read = read_docred train_file = os.path.join(args.data_dir, args.train_file) dev_file = os.path.join(args.data_dir, args.dev_file) test_file = os.path.join(args.data_dir, args.test_file) train_file_p = train_file.replace('.json', '.pkl') dev_file_p = dev_file.replace('.json', '.pkl') #test_file_p = test_file.replace('.json', 'pkl') if os.path.exists(train_file_p): with open(train_file_p, 'rb') as f: train_features = pickle.load(f) else: train_features = read(train_file, tokenizer, max_seq_length=args.max_seq_length) with open(train_file_p, 'wb') as f: pickle.dump(train_features, f) if os.path.exists(dev_file_p): with open(dev_file_p, 'rb') as
parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default="./dataset/docred", type=str) parser.add_argument("--transformer_type", default="bert", type=str) parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str) parser.add_argument("--train_file", default="train_annotated.json", type=str) parser.add_argument("--dev_file", default="dev.json", type=str) parser.add_argument("--test_file", default="test.json", type=str) parser.add_argument("--save_path", default="./ckpt/base.pt", type=str) parser.add_argument("--load_path", default="", type=str) parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.")
identifier_body
train.py
train_features, dev_features, test_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator): model.zero_grad() for step, batch in tqdm(enumerate(train_dataloader)): model.train() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'labels': batch[2], 'entity_pos': batch[3], 'hts': batch[4], } outputs = model(**inputs) loss = outputs[0] / args.gradient_accumulation_steps with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() num_steps += 1 #wandb.log({"loss": loss.item()}, step=num_steps) if (step + 1) == len(train_dataloader) - 1 or (args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): ''' dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") #wandb.log(dev_output, step=num_steps) print(dev_output) if dev_score > best_score: best_score = dev_score pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if args.save_path != "": torch.save(model.state_dict(), args.save_path) ''' if args.save_path != "": torch.save(model.state_dict(), args.save_path+'_'+str(epoch+1)+'_'+str(step)+'.pt') return num_steps new_layer = ["extractor", "bilinear"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": 1e-4}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def
(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) ans = to_official(preds, features) best_f1 = 0.0 best_f1_ign = 0.0 if len(ans) > 0: best_f1, _, best_f1_ign, _ = official_evaluate(ans, args.data_dir) output = { tag + "_F1": best_f1 * 100, tag + "_F1_ign": best_f1_ign * 100, } return best_f1, output def report(args, model, features): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) preds = to_official(preds, features) return preds def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default="./dataset/docred", type=str) parser.add_argument("--transformer_type", default="bert", type=str) parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str) parser.add_argument("--train_file", default="train_annotated.json", type=str) parser.add_argument("--dev_file", default="dev.json", type=str) parser.add_argument("--test_file", default="test.json", type=str) parser.add_argument("--save_path", default="./ckpt/base.pt", type=str) parser.add_argument("--load_path", default="", type=str) parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--train_batch_size", default=4, type=int, help="Batch size for training.") parser.add_argument("--test_batch_size", default=8, type=int, help="Batch size for testing.") parser.add_argument("--gradient_accumulation_steps", default=1, type=int, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--num_labels", default=4, type=int, help="Max number of labels in prediction.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--warmup_ratio", default=0.06, type=float, help="Warm up ratio for Adam.") parser.add_argument("--num_train_epochs", default=30.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--evaluation_steps", default=7500, type=int, help="Number of training steps between evaluations.") parser.add_argument("--seed", type=int, default=66, help="random seed for initialization") parser.add_argument("--num_class", type=int, default=97, help="Number of relation types in dataset.") args = parser.parse_args() #wandb.init(project="DocRED") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) read = read_correction #read = read_docred train_file = os.path.join(args.data_dir, args.train_file) dev_file = os.path.join(args.data_dir, args.dev_file) test_file = os.path.join(args.data_dir, args.test_file) train_file_p = train_file.replace('.json', '.pkl') dev_file_p = dev_file.replace('.json', '.pkl') #test_file_p = test_file.replace('.json', 'pkl') if os.path.exists(train_file_p): with open(train_file_p, 'rb') as f: train_features = pickle.load(f) else: train_features = read(train_file, tokenizer, max_seq_length=args.max_seq_length) with open(train_file_p, 'wb') as f: pickle.dump(train_features, f) if os.path.exists(dev_file_p): with open(dev_file_p, 'rb')
evaluate
identifier_name
train.py
train_features, dev_features, test_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator): model.zero_grad() for step, batch in tqdm(enumerate(train_dataloader)): model.train() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'labels': batch[2], 'entity_pos': batch[3], 'hts': batch[4], } outputs = model(**inputs) loss = outputs[0] / args.gradient_accumulation_steps with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() num_steps += 1 #wandb.log({"loss": loss.item()}, step=num_steps) if (step + 1) == len(train_dataloader) - 1 or (args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0):
dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") #wandb.log(dev_output, step=num_steps) print(dev_output) if dev_score > best_score: best_score = dev_score pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if args.save_path != "": torch.save(model.state_dict(), args.save_path) ''' if args.save_path != "": torch.save(model.state_dict(), args.save_path+'_'+str(epoch+1)+'_'+str(step)+'.pt') return num_steps new_layer = ["extractor", "bilinear"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": 1e-4}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) ans = to_official(preds, features) best_f1 = 0.0 best_f1_ign = 0.0 if len(ans) > 0: best_f1, _, best_f1_ign, _ = official_evaluate(ans, args.data_dir) output = { tag + "_F1": best_f1 * 100, tag + "_F1_ign": best_f1_ign * 100, } return best_f1, output def report(args, model, features): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) preds = to_official(preds, features) return preds def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default="./dataset/docred", type=str) parser.add_argument("--transformer_type", default="bert", type=str) parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str) parser.add_argument("--train_file", default="train_annotated.json", type=str) parser.add_argument("--dev_file", default="dev.json", type=str) parser.add_argument("--test_file", default="test.json", type=str) parser.add_argument("--save_path", default="./ckpt/base.pt", type=str) parser.add_argument("--load_path", default="", type=str) parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--train_batch_size", default=4, type=int, help="Batch size for training.") parser.add_argument("--test_batch_size", default=8, type=int, help="Batch size for testing.") parser.add_argument("--gradient_accumulation_steps", default=1, type=int, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--num_labels", default=4, type=int, help="Max number of labels in prediction.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--warmup_ratio", default=0.06, type=float, help="Warm up ratio for Adam.") parser.add_argument("--num_train_epochs", default=30.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--evaluation_steps", default=7500, type=int, help="Number of training steps between evaluations.") parser.add_argument("--seed", type=int, default=66, help="random seed for initialization") parser.add_argument("--num_class", type=int, default=97, help="Number of relation types in dataset.") args = parser.parse_args() #wandb.init(project="DocRED") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) read = read_correction #read = read_docred train_file = os.path.join(args.data_dir, args.train_file) dev_file = os.path.join(args.data_dir, args.dev_file) test_file = os.path.join(args.data_dir, args.test_file) train_file_p = train_file.replace('.json', '.pkl') dev_file_p = dev_file.replace('.json', '.pkl') #test_file_p = test_file.replace('.json', 'pkl') if os.path.exists(train_file_p): with open(train_file_p, 'rb') as f: train_features = pickle.load(f) else: train_features = read(train_file, tokenizer, max_seq_length=args.max_seq_length) with open(train_file_p, 'wb') as f: pickle.dump(train_features, f) if os.path.exists(dev_file_p): with open(dev_file_p, 'rb') as
'''
random_line_split
train.py
train_features, dev_features, test_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator): model.zero_grad() for step, batch in tqdm(enumerate(train_dataloader)): model.train() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'labels': batch[2], 'entity_pos': batch[3], 'hts': batch[4], } outputs = model(**inputs) loss = outputs[0] / args.gradient_accumulation_steps with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() num_steps += 1 #wandb.log({"loss": loss.item()}, step=num_steps) if (step + 1) == len(train_dataloader) - 1 or (args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): ''' dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") #wandb.log(dev_output, step=num_steps) print(dev_output) if dev_score > best_score: best_score = dev_score pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if args.save_path != "": torch.save(model.state_dict(), args.save_path) ''' if args.save_path != "": torch.save(model.state_dict(), args.save_path+'_'+str(epoch+1)+'_'+str(step)+'.pt') return num_steps new_layer = ["extractor", "bilinear"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": 1e-4}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) ans = to_official(preds, features) best_f1 = 0.0 best_f1_ign = 0.0 if len(ans) > 0:
output = { tag + "_F1": best_f1 * 100, tag + "_F1_ign": best_f1_ign * 100, } return best_f1, output def report(args, model, features): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) preds = to_official(preds, features) return preds def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default="./dataset/docred", type=str) parser.add_argument("--transformer_type", default="bert", type=str) parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str) parser.add_argument("--train_file", default="train_annotated.json", type=str) parser.add_argument("--dev_file", default="dev.json", type=str) parser.add_argument("--test_file", default="test.json", type=str) parser.add_argument("--save_path", default="./ckpt/base.pt", type=str) parser.add_argument("--load_path", default="", type=str) parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--train_batch_size", default=4, type=int, help="Batch size for training.") parser.add_argument("--test_batch_size", default=8, type=int, help="Batch size for testing.") parser.add_argument("--gradient_accumulation_steps", default=1, type=int, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--num_labels", default=4, type=int, help="Max number of labels in prediction.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--warmup_ratio", default=0.06, type=float, help="Warm up ratio for Adam.") parser.add_argument("--num_train_epochs", default=30.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--evaluation_steps", default=7500, type=int, help="Number of training steps between evaluations.") parser.add_argument("--seed", type=int, default=66, help="random seed for initialization") parser.add_argument("--num_class", type=int, default=97, help="Number of relation types in dataset.") args = parser.parse_args() #wandb.init(project="DocRED") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) read = read_correction #read = read_docred train_file = os.path.join(args.data_dir, args.train_file) dev_file = os.path.join(args.data_dir, args.dev_file) test_file = os.path.join(args.data_dir, args.test_file) train_file_p = train_file.replace('.json', '.pkl') dev_file_p = dev_file.replace('.json', '.pkl') #test_file_p = test_file.replace('.json', 'pkl') if os.path.exists(train_file_p): with open(train_file_p, 'rb') as f: train_features = pickle.load(f) else: train_features = read(train_file, tokenizer, max_seq_length=args.max_seq_length) with open(train_file_p, 'wb') as f: pickle.dump(train_features, f) if os.path.exists(dev_file_p): with open(dev_file_p, 'rb')
best_f1, _, best_f1_ign, _ = official_evaluate(ans, args.data_dir)
conditional_block
Set.go
) Remove(removeMem setMember) { delete(s.mem, removeMem) } // ----------------------------------------------------------------------------- // Set equality: Two sets are equal if they have the same elements. func Equals(oneSet, otherSet Set) bool { if len(oneSet.mem) != len(otherSet.mem) { return false } /* Obviously, if the sets have different numbers of elements, they are not equal. */ for k := range oneSet.mem { if otherSet.mem[k] == false { return false } } /* If the sets have an equal number of elements and an element in one of the sets is not in the other set, they are not equal. */ return true } // ----------------------------------------------------------------------------- /* The union of two sets is the set of members of either (or both) sets. This function is a method which acts on a set and returns the union of it and another set specified in the argument of the function. */ func (s Set) Union(otherSet Set) Set { unionSet := NewSet() for k := range s.mem { unionSet.mem[k] = true } // Adds the elements of the set to the union set. for k := range otherSet.mem { unionSet.mem[k] = true } // Adds the elements of the set in the argument to the union set. return unionSet } // ----------------------------------------------------------------------------- /* The intersection of two sets is the set of members of both sets. This function is a method which acts on a set and returns the intersection of it and another set specified in the argument of the function. */ func (s Set) Intersection(otherSet Set) Set { interSet := NewSet() for k := range s.mem { if otherSet.mem[k] == true { interSet.mem[k] = true // Adds an element to the intersection set if it is a member of both sets. } } return interSet } // ----------------------------------------------------------------------------- /* The relative complement of a set relative to another set (call it universe) is the set of members of set which are not members of universe. This method returns the relative complement of the set it acts upon relative to the set specified in the argument.*/ func (s Set) RelCompl(universe Set) Set { relSet := NewSet() for k := range s.mem { if universe.mem[k] == false { relSet.mem[k] = true // Adds a member to the relative set if it is in s but not in universe. } } return relSet } // ----------------------------------------------------------------------------- // Return a slice containing all subsets of mySet. func PowerSet(mySet Set) []Set { powSet := []Set{} mySlice := []setMember{} mySubSlices := [][]setMember{} /* The function first breaks the set down into a slice which is easier to work with. The problem then amounts to finding all subslices of this slice. This can be done by realizing that, since the power set of a set with N elements has 2^N elements, these elements can be represented by binary strings with N digits. This is what the subset() function below does. */ for k := range mySet.mem { mySlice = append(mySlice, k) } mySubSlices = subsets(mySlice) // Generate the slice containing all subslices of this slice. for i := 0; i < len(mySubSlices); i++ { newSubSet := NewSet() for k := 0; k < len(mySubSlices[i]); k++ { newSubSet.mem[mySubSlices[i][k]] = true } powSet = append(powSet, newSubSet) } return powSet } /* This function takes a slice as input and returns a slice containing all subslices of this slice. */ func subsets(mySlice []setMember) [][]setMember { indexSlice := []string{} subSetSlice := [][]setMember{} binaryLen := "%0" + strconv.Itoa(len(mySlice)) + "b" for i := 0; i < pow(2, len(mySlice)); i++ { indexSlice = append(indexSlice, fmt.Sprintf(binaryLen, i)) } /* Generate all binary numbers from 0 to 2^len(mySlice)-1 and set their number of digits to len(mySlice). */ for i := 0; i < pow(2, len(mySlice)); i++ { subsets := []setMember{} for k := 0; k < len(mySlice); k++ { if indexSlice[i][k] == byte('1') { subsets = append(subsets, mySlice[k]) } /* Every binary number from 0 to 2^(len(mySLice))-1 represents a subset of mySlice: iterate through every binary number, and if the k:th element of the binary number is 1, append the k:th element of mySlice to subsets, which represents a given subset. When all the binary numbers are iterated through, every possible subset of mySlice will have been generated through this process, since there are 2^len(mySlice) unique binary numbers and 2^len(mySlice) subsets of mySlice, counting the empty slice. */ } subSetSlice = append(subSetSlice, subsets) // Append the generated subset to the subSetSlice. } return subSetSlice } /* There is no native integer exponential method in Go. I used this basic implementation I found on: http://grokbase.com/t/gg/golang-nuts/139n3edtq3/go-nuts-integer-exponentiation-in-go */ func pow(a, exp int) int { p := 1 for exp > 0 { if exp&1 != 0 { p *= a } exp >>= 1 a *= a } return p } // Testing some conditions put on the power set. func testPowerSet(subSetSlice []Set, theSet Set) { allElemsInPowSet := NewSet() if len(subSetSlice) != pow(2, len(theSet.mem)) { panic("Error in number of elements in PowerSet") } // Check if the length of the set relative to the power set align. for i := 0; i < len(subSetSlice); i++ { for k := range subSetSlice[i].mem { allElemsInPowSet.Append(k) // Collect all elements found in the power set. if theSet.mem[k] == false { panic("Error in allowed set members in PowerSet") // Checks if some elements of the power set are not in the set. } } } for i := range allElemsInPowSet.mem { if theSet.mem[i] == false { panic("Error: elements missing in PowerSet") } } // Check if some elements of the set are not in the power set. } // ----------------------------------------------------------------------------- /* Return a string representation of the set. */ func (s *Set)
() string { mySetSlice := []setMember{} for i := range s.mem { mySetSlice = append(mySetSlice, i) } myString := fmt.Sprint(mySetSlice) return myString } func testSetString(theSetString string, theSet Set) { counter := 0 for i := range theSet.mem { for k := 0; k < len(theSetString); k++ { if fmt.Sprint(i) == string(theSetString[k]) { counter += 1 } } } if counter != len(theSet.mem) { panic("Error in SetString: wrong number of elements") } // Seeing if every element of theSet can be found as a string in theSetString. } // ***************************************************************************** // TESTING: func main() { theSet := NewSet() theOtherSet := NewSet() emptySet := NewSet() // --------------------------------------------------------------------------- // TESTING OF EMPTY SET AND ONE-ELEMENT SET OPERATIONS: if Equals(theSet, theOtherSet) != true { panic("Error handling empty set in Equals") } testPowerSet(PowerSet(emptySet), emptySet) theSet.Append(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theSet.Intersection(emptySet), emptySet) != true { panic("Error handling intersection of set with emptySet") } if Equals(theSet.Union(emptySet), theSet) != true { panic("Error handling union of set with emptySet") } // Testing union and intersection on emptySet. if Equals(theSet, theOtherSet) != false { panic("Error handling comparing empty to one-element set in Equals") } if Equals(theSet, theSet) != true { panic("Error handling comparing set to itself in Equals") } theOtherSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error handling comparing one-element sets in Equals") } // Testing Equals with one and zero-element sets. theSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error in handling same-element appending in Append") } testPowerSet(PowerSet(theSet), theSet) if len(PowerSet(emptySet)) != 1 { panic("Error in handling power set of empty set") }
SetString
identifier_name
Set.go
Set) Remove(removeMem setMember) { delete(s.mem, removeMem) } // ----------------------------------------------------------------------------- // Set equality: Two sets are equal if they have the same elements. func Equals(oneSet, otherSet Set) bool { if len(oneSet.mem) != len(otherSet.mem) { return false } /* Obviously, if the sets have different numbers of elements, they are not equal. */ for k := range oneSet.mem { if otherSet.mem[k] == false { return false } } /* If the sets have an equal number of elements and an element in one of the sets is not in the other set, they are not equal. */ return true } // ----------------------------------------------------------------------------- /* The union of two sets is the set of members of either (or both) sets. This function is a method which acts on a set and returns the union of it and
unionSet := NewSet() for k := range s.mem { unionSet.mem[k] = true } // Adds the elements of the set to the union set. for k := range otherSet.mem { unionSet.mem[k] = true } // Adds the elements of the set in the argument to the union set. return unionSet } // ----------------------------------------------------------------------------- /* The intersection of two sets is the set of members of both sets. This function is a method which acts on a set and returns the intersection of it and another set specified in the argument of the function. */ func (s Set) Intersection(otherSet Set) Set { interSet := NewSet() for k := range s.mem { if otherSet.mem[k] == true { interSet.mem[k] = true // Adds an element to the intersection set if it is a member of both sets. } } return interSet } // ----------------------------------------------------------------------------- /* The relative complement of a set relative to another set (call it universe) is the set of members of set which are not members of universe. This method returns the relative complement of the set it acts upon relative to the set specified in the argument.*/ func (s Set) RelCompl(universe Set) Set { relSet := NewSet() for k := range s.mem { if universe.mem[k] == false { relSet.mem[k] = true // Adds a member to the relative set if it is in s but not in universe. } } return relSet } // ----------------------------------------------------------------------------- // Return a slice containing all subsets of mySet. func PowerSet(mySet Set) []Set { powSet := []Set{} mySlice := []setMember{} mySubSlices := [][]setMember{} /* The function first breaks the set down into a slice which is easier to work with. The problem then amounts to finding all subslices of this slice. This can be done by realizing that, since the power set of a set with N elements has 2^N elements, these elements can be represented by binary strings with N digits. This is what the subset() function below does. */ for k := range mySet.mem { mySlice = append(mySlice, k) } mySubSlices = subsets(mySlice) // Generate the slice containing all subslices of this slice. for i := 0; i < len(mySubSlices); i++ { newSubSet := NewSet() for k := 0; k < len(mySubSlices[i]); k++ { newSubSet.mem[mySubSlices[i][k]] = true } powSet = append(powSet, newSubSet) } return powSet } /* This function takes a slice as input and returns a slice containing all subslices of this slice. */ func subsets(mySlice []setMember) [][]setMember { indexSlice := []string{} subSetSlice := [][]setMember{} binaryLen := "%0" + strconv.Itoa(len(mySlice)) + "b" for i := 0; i < pow(2, len(mySlice)); i++ { indexSlice = append(indexSlice, fmt.Sprintf(binaryLen, i)) } /* Generate all binary numbers from 0 to 2^len(mySlice)-1 and set their number of digits to len(mySlice). */ for i := 0; i < pow(2, len(mySlice)); i++ { subsets := []setMember{} for k := 0; k < len(mySlice); k++ { if indexSlice[i][k] == byte('1') { subsets = append(subsets, mySlice[k]) } /* Every binary number from 0 to 2^(len(mySLice))-1 represents a subset of mySlice: iterate through every binary number, and if the k:th element of the binary number is 1, append the k:th element of mySlice to subsets, which represents a given subset. When all the binary numbers are iterated through, every possible subset of mySlice will have been generated through this process, since there are 2^len(mySlice) unique binary numbers and 2^len(mySlice) subsets of mySlice, counting the empty slice. */ } subSetSlice = append(subSetSlice, subsets) // Append the generated subset to the subSetSlice. } return subSetSlice } /* There is no native integer exponential method in Go. I used this basic implementation I found on: http://grokbase.com/t/gg/golang-nuts/139n3edtq3/go-nuts-integer-exponentiation-in-go */ func pow(a, exp int) int { p := 1 for exp > 0 { if exp&1 != 0 { p *= a } exp >>= 1 a *= a } return p } // Testing some conditions put on the power set. func testPowerSet(subSetSlice []Set, theSet Set) { allElemsInPowSet := NewSet() if len(subSetSlice) != pow(2, len(theSet.mem)) { panic("Error in number of elements in PowerSet") } // Check if the length of the set relative to the power set align. for i := 0; i < len(subSetSlice); i++ { for k := range subSetSlice[i].mem { allElemsInPowSet.Append(k) // Collect all elements found in the power set. if theSet.mem[k] == false { panic("Error in allowed set members in PowerSet") // Checks if some elements of the power set are not in the set. } } } for i := range allElemsInPowSet.mem { if theSet.mem[i] == false { panic("Error: elements missing in PowerSet") } } // Check if some elements of the set are not in the power set. } // ----------------------------------------------------------------------------- /* Return a string representation of the set. */ func (s *Set) SetString() string { mySetSlice := []setMember{} for i := range s.mem { mySetSlice = append(mySetSlice, i) } myString := fmt.Sprint(mySetSlice) return myString } func testSetString(theSetString string, theSet Set) { counter := 0 for i := range theSet.mem { for k := 0; k < len(theSetString); k++ { if fmt.Sprint(i) == string(theSetString[k]) { counter += 1 } } } if counter != len(theSet.mem) { panic("Error in SetString: wrong number of elements") } // Seeing if every element of theSet can be found as a string in theSetString. } // ***************************************************************************** // TESTING: func main() { theSet := NewSet() theOtherSet := NewSet() emptySet := NewSet() // --------------------------------------------------------------------------- // TESTING OF EMPTY SET AND ONE-ELEMENT SET OPERATIONS: if Equals(theSet, theOtherSet) != true { panic("Error handling empty set in Equals") } testPowerSet(PowerSet(emptySet), emptySet) theSet.Append(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theSet.Intersection(emptySet), emptySet) != true { panic("Error handling intersection of set with emptySet") } if Equals(theSet.Union(emptySet), theSet) != true { panic("Error handling union of set with emptySet") } // Testing union and intersection on emptySet. if Equals(theSet, theOtherSet) != false { panic("Error handling comparing empty to one-element set in Equals") } if Equals(theSet, theSet) != true { panic("Error handling comparing set to itself in Equals") } theOtherSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error handling comparing one-element sets in Equals") } // Testing Equals with one and zero-element sets. theSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error in handling same-element appending in Append") } testPowerSet(PowerSet(theSet), theSet) if len(PowerSet(emptySet)) != 1 { panic("Error in handling power set of empty set") } if
another set specified in the argument of the function. */ func (s Set) Union(otherSet Set) Set {
random_line_split
Set.go
) Remove(removeMem setMember)
// ----------------------------------------------------------------------------- // Set equality: Two sets are equal if they have the same elements. func Equals(oneSet, otherSet Set) bool { if len(oneSet.mem) != len(otherSet.mem) { return false } /* Obviously, if the sets have different numbers of elements, they are not equal. */ for k := range oneSet.mem { if otherSet.mem[k] == false { return false } } /* If the sets have an equal number of elements and an element in one of the sets is not in the other set, they are not equal. */ return true } // ----------------------------------------------------------------------------- /* The union of two sets is the set of members of either (or both) sets. This function is a method which acts on a set and returns the union of it and another set specified in the argument of the function. */ func (s Set) Union(otherSet Set) Set { unionSet := NewSet() for k := range s.mem { unionSet.mem[k] = true } // Adds the elements of the set to the union set. for k := range otherSet.mem { unionSet.mem[k] = true } // Adds the elements of the set in the argument to the union set. return unionSet } // ----------------------------------------------------------------------------- /* The intersection of two sets is the set of members of both sets. This function is a method which acts on a set and returns the intersection of it and another set specified in the argument of the function. */ func (s Set) Intersection(otherSet Set) Set { interSet := NewSet() for k := range s.mem { if otherSet.mem[k] == true { interSet.mem[k] = true // Adds an element to the intersection set if it is a member of both sets. } } return interSet } // ----------------------------------------------------------------------------- /* The relative complement of a set relative to another set (call it universe) is the set of members of set which are not members of universe. This method returns the relative complement of the set it acts upon relative to the set specified in the argument.*/ func (s Set) RelCompl(universe Set) Set { relSet := NewSet() for k := range s.mem { if universe.mem[k] == false { relSet.mem[k] = true // Adds a member to the relative set if it is in s but not in universe. } } return relSet } // ----------------------------------------------------------------------------- // Return a slice containing all subsets of mySet. func PowerSet(mySet Set) []Set { powSet := []Set{} mySlice := []setMember{} mySubSlices := [][]setMember{} /* The function first breaks the set down into a slice which is easier to work with. The problem then amounts to finding all subslices of this slice. This can be done by realizing that, since the power set of a set with N elements has 2^N elements, these elements can be represented by binary strings with N digits. This is what the subset() function below does. */ for k := range mySet.mem { mySlice = append(mySlice, k) } mySubSlices = subsets(mySlice) // Generate the slice containing all subslices of this slice. for i := 0; i < len(mySubSlices); i++ { newSubSet := NewSet() for k := 0; k < len(mySubSlices[i]); k++ { newSubSet.mem[mySubSlices[i][k]] = true } powSet = append(powSet, newSubSet) } return powSet } /* This function takes a slice as input and returns a slice containing all subslices of this slice. */ func subsets(mySlice []setMember) [][]setMember { indexSlice := []string{} subSetSlice := [][]setMember{} binaryLen := "%0" + strconv.Itoa(len(mySlice)) + "b" for i := 0; i < pow(2, len(mySlice)); i++ { indexSlice = append(indexSlice, fmt.Sprintf(binaryLen, i)) } /* Generate all binary numbers from 0 to 2^len(mySlice)-1 and set their number of digits to len(mySlice). */ for i := 0; i < pow(2, len(mySlice)); i++ { subsets := []setMember{} for k := 0; k < len(mySlice); k++ { if indexSlice[i][k] == byte('1') { subsets = append(subsets, mySlice[k]) } /* Every binary number from 0 to 2^(len(mySLice))-1 represents a subset of mySlice: iterate through every binary number, and if the k:th element of the binary number is 1, append the k:th element of mySlice to subsets, which represents a given subset. When all the binary numbers are iterated through, every possible subset of mySlice will have been generated through this process, since there are 2^len(mySlice) unique binary numbers and 2^len(mySlice) subsets of mySlice, counting the empty slice. */ } subSetSlice = append(subSetSlice, subsets) // Append the generated subset to the subSetSlice. } return subSetSlice } /* There is no native integer exponential method in Go. I used this basic implementation I found on: http://grokbase.com/t/gg/golang-nuts/139n3edtq3/go-nuts-integer-exponentiation-in-go */ func pow(a, exp int) int { p := 1 for exp > 0 { if exp&1 != 0 { p *= a } exp >>= 1 a *= a } return p } // Testing some conditions put on the power set. func testPowerSet(subSetSlice []Set, theSet Set) { allElemsInPowSet := NewSet() if len(subSetSlice) != pow(2, len(theSet.mem)) { panic("Error in number of elements in PowerSet") } // Check if the length of the set relative to the power set align. for i := 0; i < len(subSetSlice); i++ { for k := range subSetSlice[i].mem { allElemsInPowSet.Append(k) // Collect all elements found in the power set. if theSet.mem[k] == false { panic("Error in allowed set members in PowerSet") // Checks if some elements of the power set are not in the set. } } } for i := range allElemsInPowSet.mem { if theSet.mem[i] == false { panic("Error: elements missing in PowerSet") } } // Check if some elements of the set are not in the power set. } // ----------------------------------------------------------------------------- /* Return a string representation of the set. */ func (s *Set) SetString() string { mySetSlice := []setMember{} for i := range s.mem { mySetSlice = append(mySetSlice, i) } myString := fmt.Sprint(mySetSlice) return myString } func testSetString(theSetString string, theSet Set) { counter := 0 for i := range theSet.mem { for k := 0; k < len(theSetString); k++ { if fmt.Sprint(i) == string(theSetString[k]) { counter += 1 } } } if counter != len(theSet.mem) { panic("Error in SetString: wrong number of elements") } // Seeing if every element of theSet can be found as a string in theSetString. } // ***************************************************************************** // TESTING: func main() { theSet := NewSet() theOtherSet := NewSet() emptySet := NewSet() // --------------------------------------------------------------------------- // TESTING OF EMPTY SET AND ONE-ELEMENT SET OPERATIONS: if Equals(theSet, theOtherSet) != true { panic("Error handling empty set in Equals") } testPowerSet(PowerSet(emptySet), emptySet) theSet.Append(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theSet.Intersection(emptySet), emptySet) != true { panic("Error handling intersection of set with emptySet") } if Equals(theSet.Union(emptySet), theSet) != true { panic("Error handling union of set with emptySet") } // Testing union and intersection on emptySet. if Equals(theSet, theOtherSet) != false { panic("Error handling comparing empty to one-element set in Equals") } if Equals(theSet, theSet) != true { panic("Error handling comparing set to itself in Equals") } theOtherSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error handling comparing one-element sets in Equals") } // Testing Equals with one and zero-element sets. theSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error in handling same-element appending in Append") } testPowerSet(PowerSet(theSet), theSet) if len(PowerSet(emptySet)) != 1 { panic("Error in handling power set of empty set")
{ delete(s.mem, removeMem) }
identifier_body
Set.go
) Remove(removeMem setMember) { delete(s.mem, removeMem) } // ----------------------------------------------------------------------------- // Set equality: Two sets are equal if they have the same elements. func Equals(oneSet, otherSet Set) bool { if len(oneSet.mem) != len(otherSet.mem) { return false } /* Obviously, if the sets have different numbers of elements, they are not equal. */ for k := range oneSet.mem { if otherSet.mem[k] == false { return false } } /* If the sets have an equal number of elements and an element in one of the sets is not in the other set, they are not equal. */ return true } // ----------------------------------------------------------------------------- /* The union of two sets is the set of members of either (or both) sets. This function is a method which acts on a set and returns the union of it and another set specified in the argument of the function. */ func (s Set) Union(otherSet Set) Set { unionSet := NewSet() for k := range s.mem { unionSet.mem[k] = true } // Adds the elements of the set to the union set. for k := range otherSet.mem
// Adds the elements of the set in the argument to the union set. return unionSet } // ----------------------------------------------------------------------------- /* The intersection of two sets is the set of members of both sets. This function is a method which acts on a set and returns the intersection of it and another set specified in the argument of the function. */ func (s Set) Intersection(otherSet Set) Set { interSet := NewSet() for k := range s.mem { if otherSet.mem[k] == true { interSet.mem[k] = true // Adds an element to the intersection set if it is a member of both sets. } } return interSet } // ----------------------------------------------------------------------------- /* The relative complement of a set relative to another set (call it universe) is the set of members of set which are not members of universe. This method returns the relative complement of the set it acts upon relative to the set specified in the argument.*/ func (s Set) RelCompl(universe Set) Set { relSet := NewSet() for k := range s.mem { if universe.mem[k] == false { relSet.mem[k] = true // Adds a member to the relative set if it is in s but not in universe. } } return relSet } // ----------------------------------------------------------------------------- // Return a slice containing all subsets of mySet. func PowerSet(mySet Set) []Set { powSet := []Set{} mySlice := []setMember{} mySubSlices := [][]setMember{} /* The function first breaks the set down into a slice which is easier to work with. The problem then amounts to finding all subslices of this slice. This can be done by realizing that, since the power set of a set with N elements has 2^N elements, these elements can be represented by binary strings with N digits. This is what the subset() function below does. */ for k := range mySet.mem { mySlice = append(mySlice, k) } mySubSlices = subsets(mySlice) // Generate the slice containing all subslices of this slice. for i := 0; i < len(mySubSlices); i++ { newSubSet := NewSet() for k := 0; k < len(mySubSlices[i]); k++ { newSubSet.mem[mySubSlices[i][k]] = true } powSet = append(powSet, newSubSet) } return powSet } /* This function takes a slice as input and returns a slice containing all subslices of this slice. */ func subsets(mySlice []setMember) [][]setMember { indexSlice := []string{} subSetSlice := [][]setMember{} binaryLen := "%0" + strconv.Itoa(len(mySlice)) + "b" for i := 0; i < pow(2, len(mySlice)); i++ { indexSlice = append(indexSlice, fmt.Sprintf(binaryLen, i)) } /* Generate all binary numbers from 0 to 2^len(mySlice)-1 and set their number of digits to len(mySlice). */ for i := 0; i < pow(2, len(mySlice)); i++ { subsets := []setMember{} for k := 0; k < len(mySlice); k++ { if indexSlice[i][k] == byte('1') { subsets = append(subsets, mySlice[k]) } /* Every binary number from 0 to 2^(len(mySLice))-1 represents a subset of mySlice: iterate through every binary number, and if the k:th element of the binary number is 1, append the k:th element of mySlice to subsets, which represents a given subset. When all the binary numbers are iterated through, every possible subset of mySlice will have been generated through this process, since there are 2^len(mySlice) unique binary numbers and 2^len(mySlice) subsets of mySlice, counting the empty slice. */ } subSetSlice = append(subSetSlice, subsets) // Append the generated subset to the subSetSlice. } return subSetSlice } /* There is no native integer exponential method in Go. I used this basic implementation I found on: http://grokbase.com/t/gg/golang-nuts/139n3edtq3/go-nuts-integer-exponentiation-in-go */ func pow(a, exp int) int { p := 1 for exp > 0 { if exp&1 != 0 { p *= a } exp >>= 1 a *= a } return p } // Testing some conditions put on the power set. func testPowerSet(subSetSlice []Set, theSet Set) { allElemsInPowSet := NewSet() if len(subSetSlice) != pow(2, len(theSet.mem)) { panic("Error in number of elements in PowerSet") } // Check if the length of the set relative to the power set align. for i := 0; i < len(subSetSlice); i++ { for k := range subSetSlice[i].mem { allElemsInPowSet.Append(k) // Collect all elements found in the power set. if theSet.mem[k] == false { panic("Error in allowed set members in PowerSet") // Checks if some elements of the power set are not in the set. } } } for i := range allElemsInPowSet.mem { if theSet.mem[i] == false { panic("Error: elements missing in PowerSet") } } // Check if some elements of the set are not in the power set. } // ----------------------------------------------------------------------------- /* Return a string representation of the set. */ func (s *Set) SetString() string { mySetSlice := []setMember{} for i := range s.mem { mySetSlice = append(mySetSlice, i) } myString := fmt.Sprint(mySetSlice) return myString } func testSetString(theSetString string, theSet Set) { counter := 0 for i := range theSet.mem { for k := 0; k < len(theSetString); k++ { if fmt.Sprint(i) == string(theSetString[k]) { counter += 1 } } } if counter != len(theSet.mem) { panic("Error in SetString: wrong number of elements") } // Seeing if every element of theSet can be found as a string in theSetString. } // ***************************************************************************** // TESTING: func main() { theSet := NewSet() theOtherSet := NewSet() emptySet := NewSet() // --------------------------------------------------------------------------- // TESTING OF EMPTY SET AND ONE-ELEMENT SET OPERATIONS: if Equals(theSet, theOtherSet) != true { panic("Error handling empty set in Equals") } testPowerSet(PowerSet(emptySet), emptySet) theSet.Append(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theSet.Intersection(emptySet), emptySet) != true { panic("Error handling intersection of set with emptySet") } if Equals(theSet.Union(emptySet), theSet) != true { panic("Error handling union of set with emptySet") } // Testing union and intersection on emptySet. if Equals(theSet, theOtherSet) != false { panic("Error handling comparing empty to one-element set in Equals") } if Equals(theSet, theSet) != true { panic("Error handling comparing set to itself in Equals") } theOtherSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error handling comparing one-element sets in Equals") } // Testing Equals with one and zero-element sets. theSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error in handling same-element appending in Append") } testPowerSet(PowerSet(theSet), theSet) if len(PowerSet(emptySet)) != 1 { panic("Error in handling power set of empty set")
{ unionSet.mem[k] = true }
conditional_block
gitiles.pb.go
func (*LogResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{1} } func (m *LogResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogResponse.Unmarshal(m, b) } func (m *LogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogResponse.Marshal(b, m, deterministic) } func (dst *LogResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LogResponse.Merge(dst, src) } func (m *LogResponse) XXX_Size() int { return xxx_messageInfo_LogResponse.Size(m) } func (m *LogResponse) XXX_DiscardUnknown() { xxx_messageInfo_LogResponse.DiscardUnknown(m) } var xxx_messageInfo_LogResponse proto.InternalMessageInfo func (m *LogResponse) GetLog() []*git.Commit { if m != nil { return m.Log } return nil } func (m *LogResponse) GetNextPageToken() string { if m != nil { return m.NextPageToken } return "" } // RefsRequest is a request message of Gitiles.Refs RPC. type RefsRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // Limits which refs to resolve to only those matching {refsPath}/*. // // Must be "refs" or start with "refs/". // Must not include glob '*'. // Use "refs/heads" to retrieve all branches. // // To fetch **all** refs in a repo, specify just "refs" but beware of two // caveats: // * refs returned include a ref for each patchset for each Gerrit change // associated with the repo. // * returned map will contain special "HEAD" ref whose value in resulting map // will be name of the actual ref to which "HEAD" points, which is typically // "refs/heads/master". // // Thus, if you are looking for all tags and all branches of repo, it's // recommended to issue two Refs calls limited to "refs/tags" and "refs/heads" // instead of one call for "refs". // // Since Gerrit allows per-ref ACLs, it is possible that some refs matching // refPrefix would not be present in results because current user isn't granted // read permission on them. RefsPath string `protobuf:"bytes,2,opt,name=refs_path,json=refsPath" json:"refs_path,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsRequest) Reset() { *m = RefsRequest{} } func (m *RefsRequest) String() string { return proto.CompactTextString(m) } func (*RefsRequest) ProtoMessage() {} func (*RefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{2} } func (m *RefsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsRequest.Unmarshal(m, b) } func (m *RefsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsRequest.Marshal(b, m, deterministic) } func (dst *RefsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsRequest.Merge(dst, src) } func (m *RefsRequest) XXX_Size() int { return xxx_messageInfo_RefsRequest.Size(m) } func (m *RefsRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefsRequest.DiscardUnknown(m) } var xxx_messageInfo_RefsRequest proto.InternalMessageInfo func (m *RefsRequest)
() string { if m != nil { return m.Project } return "" } func (m *RefsRequest) GetRefsPath() string { if m != nil { return m.RefsPath } return "" } // RefsResponse is a response message of Gitiles.Refs RPC. type RefsResponse struct { // revisions maps a ref to a revision. // Git branches have keys start with "refs/heads/". Revisions map[string]string `protobuf:"bytes,2,rep,name=revisions" json:"revisions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsResponse) Reset() { *m = RefsResponse{} } func (m *RefsResponse) String() string { return proto.CompactTextString(m) } func (*RefsResponse) ProtoMessage() {} func (*RefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{3} } func (m *RefsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsResponse.Unmarshal(m, b) } func (m *RefsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsResponse.Marshal(b, m, deterministic) } func (dst *RefsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsResponse.Merge(dst, src) } func (m *RefsResponse) XXX_Size() int { return xxx_messageInfo_RefsResponse.Size(m) } func (m *RefsResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefsResponse.DiscardUnknown(m) } var xxx_messageInfo_RefsResponse proto.InternalMessageInfo func (m *RefsResponse) GetRevisions() map[string]string { if m != nil { return m.Revisions } return nil } func init() { proto.RegisterType((*LogRequest)(nil), "gitiles.LogRequest") proto.RegisterType((*LogResponse)(nil), "gitiles.LogResponse") proto.RegisterType((*RefsRequest)(nil), "gitiles.RefsRequest") proto.RegisterType((*RefsResponse)(nil), "gitiles.RefsResponse") proto.RegisterMapType((map[string]string)(nil), "gitiles.RefsResponse.RevisionsEntry") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // GitilesClient is the client API for Gitiles service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GitilesClient interface { // Log retrieves commit log. Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) // Refs retrieves repo refs. Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) } type gitilesPRPCClient struct { client *prpc.Client } func NewGitilesPRPCClient(client *prpc.Client) GitilesClient { return &gitilesPRPCClient{client} } func (c *gitilesPRPCClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesPRPCClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } type gitilesClient struct { cc *grpc.ClientConn } func NewGitilesClient(cc *grpc.ClientConn) GitilesClient { return &gitilesClient{cc} } func (c *gitilesClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } // GitilesServer is the server API for Gitiles service. type GitilesServer interface
GetProject
identifier_name
gitiles.pb.go
(*LogResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{1} } func (m *LogResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogResponse.Unmarshal(m, b) } func (m *LogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogResponse.Marshal(b, m, deterministic) } func (dst *LogResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LogResponse.Merge(dst, src) } func (m *LogResponse) XXX_Size() int { return xxx_messageInfo_LogResponse.Size(m) } func (m *LogResponse) XXX_DiscardUnknown() { xxx_messageInfo_LogResponse.DiscardUnknown(m) } var xxx_messageInfo_LogResponse proto.InternalMessageInfo func (m *LogResponse) GetLog() []*git.Commit { if m != nil { return m.Log } return nil } func (m *LogResponse) GetNextPageToken() string { if m != nil { return m.NextPageToken } return "" } // RefsRequest is a request message of Gitiles.Refs RPC. type RefsRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // Limits which refs to resolve to only those matching {refsPath}/*. // // Must be "refs" or start with "refs/". // Must not include glob '*'. // Use "refs/heads" to retrieve all branches. // // To fetch **all** refs in a repo, specify just "refs" but beware of two // caveats: // * refs returned include a ref for each patchset for each Gerrit change // associated with the repo. // * returned map will contain special "HEAD" ref whose value in resulting map // will be name of the actual ref to which "HEAD" points, which is typically // "refs/heads/master". // // Thus, if you are looking for all tags and all branches of repo, it's // recommended to issue two Refs calls limited to "refs/tags" and "refs/heads" // instead of one call for "refs". // // Since Gerrit allows per-ref ACLs, it is possible that some refs matching // refPrefix would not be present in results because current user isn't granted // read permission on them. RefsPath string `protobuf:"bytes,2,opt,name=refs_path,json=refsPath" json:"refs_path,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsRequest) Reset() { *m = RefsRequest{} } func (m *RefsRequest) String() string { return proto.CompactTextString(m) } func (*RefsRequest) ProtoMessage() {} func (*RefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{2} } func (m *RefsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsRequest.Unmarshal(m, b) } func (m *RefsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsRequest.Marshal(b, m, deterministic) } func (dst *RefsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsRequest.Merge(dst, src) } func (m *RefsRequest) XXX_Size() int { return xxx_messageInfo_RefsRequest.Size(m) } func (m *RefsRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefsRequest.DiscardUnknown(m) } var xxx_messageInfo_RefsRequest proto.InternalMessageInfo func (m *RefsRequest) GetProject() string { if m != nil { return m.Project } return "" } func (m *RefsRequest) GetRefsPath() string { if m != nil { return m.RefsPath } return "" } // RefsResponse is a response message of Gitiles.Refs RPC. type RefsResponse struct { // revisions maps a ref to a revision. // Git branches have keys start with "refs/heads/". Revisions map[string]string `protobuf:"bytes,2,rep,name=revisions" json:"revisions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsResponse) Reset() { *m = RefsResponse{} } func (m *RefsResponse) String() string { return proto.CompactTextString(m) } func (*RefsResponse) ProtoMessage() {} func (*RefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{3} } func (m *RefsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsResponse.Unmarshal(m, b) } func (m *RefsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsResponse.Marshal(b, m, deterministic) } func (dst *RefsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsResponse.Merge(dst, src) } func (m *RefsResponse) XXX_Size() int { return xxx_messageInfo_RefsResponse.Size(m) } func (m *RefsResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefsResponse.DiscardUnknown(m) } var xxx_messageInfo_RefsResponse proto.InternalMessageInfo func (m *RefsResponse) GetRevisions() map[string]string { if m != nil { return m.Revisions } return nil } func init() { proto.RegisterType((*LogRequest)(nil), "gitiles.LogRequest") proto.RegisterType((*LogResponse)(nil), "gitiles.LogResponse") proto.RegisterType((*RefsRequest)(nil), "gitiles.RefsRequest") proto.RegisterType((*RefsResponse)(nil), "gitiles.RefsResponse") proto.RegisterMapType((map[string]string)(nil), "gitiles.RefsResponse.RevisionsEntry") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // GitilesClient is the client API for Gitiles service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GitilesClient interface { // Log retrieves commit log. Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) // Refs retrieves repo refs. Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) } type gitilesPRPCClient struct { client *prpc.Client } func NewGitilesPRPCClient(client *prpc.Client) GitilesClient { return &gitilesPRPCClient{client} } func (c *gitilesPRPCClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Log", in, out, opts...) if err != nil
return out, nil } func (c *gitilesPRPCClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } type gitilesClient struct { cc *grpc.ClientConn } func NewGitilesClient(cc *grpc.ClientConn) GitilesClient { return &gitilesClient{cc} } func (c *gitilesClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } // GitilesServer is the server API for Gitiles service. type GitilesServer interface
{ return nil, err }
conditional_block
gitiles.pb.go
func (m *LogRequest) GetTreeDiff() bool { if m != nil { return m.TreeDiff } return false } func (m *LogRequest) GetPageToken() string { if m != nil { return m.PageToken } return "" } func (m *LogRequest) GetPageSize() int32 { if m != nil { return m.PageSize } return 0 } // LogRequest is response message for Gitiles.Log rpc. type LogResponse struct { // Retrieved commits. Log []*git.Commit `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` // A page token for next LogRequest to fetch next page of commits. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LogResponse) Reset() { *m = LogResponse{} } func (m *LogResponse) String() string { return proto.CompactTextString(m) } func (*LogResponse) ProtoMessage() {} func (*LogResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{1} } func (m *LogResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogResponse.Unmarshal(m, b) } func (m *LogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogResponse.Marshal(b, m, deterministic) } func (dst *LogResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LogResponse.Merge(dst, src) } func (m *LogResponse) XXX_Size() int { return xxx_messageInfo_LogResponse.Size(m) } func (m *LogResponse) XXX_DiscardUnknown() { xxx_messageInfo_LogResponse.DiscardUnknown(m) } var xxx_messageInfo_LogResponse proto.InternalMessageInfo func (m *LogResponse) GetLog() []*git.Commit { if m != nil { return m.Log } return nil } func (m *LogResponse) GetNextPageToken() string { if m != nil { return m.NextPageToken } return "" } // RefsRequest is a request message of Gitiles.Refs RPC. type RefsRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // Limits which refs to resolve to only those matching {refsPath}/*. // // Must be "refs" or start with "refs/". // Must not include glob '*'. // Use "refs/heads" to retrieve all branches. // // To fetch **all** refs in a repo, specify just "refs" but beware of two // caveats: // * refs returned include a ref for each patchset for each Gerrit change // associated with the repo. // * returned map will contain special "HEAD" ref whose value in resulting map // will be name of the actual ref to which "HEAD" points, which is typically // "refs/heads/master". // // Thus, if you are looking for all tags and all branches of repo, it's // recommended to issue two Refs calls limited to "refs/tags" and "refs/heads" // instead of one call for "refs". // // Since Gerrit allows per-ref ACLs, it is possible that some refs matching // refPrefix would not be present in results because current user isn't granted // read permission on them. RefsPath string `protobuf:"bytes,2,opt,name=refs_path,json=refsPath" json:"refs_path,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsRequest) Reset() { *m = RefsRequest{} } func (m *RefsRequest) String() string { return proto.CompactTextString(m) } func (*RefsRequest) ProtoMessage() {} func (*RefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{2} } func (m *RefsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsRequest.Unmarshal(m, b) } func (m *RefsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsRequest.Marshal(b, m, deterministic) } func (dst *RefsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsRequest.Merge(dst, src) } func (m *RefsRequest) XXX_Size() int { return xxx_messageInfo_RefsRequest.Size(m) } func (m *RefsRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefsRequest.DiscardUnknown(m) } var xxx_messageInfo_RefsRequest proto.InternalMessageInfo func (m *RefsRequest) GetProject() string { if m != nil { return m.Project } return "" } func (m *RefsRequest) GetRefsPath() string { if m != nil { return m.RefsPath } return "" } // RefsResponse is a response message of Gitiles.Refs RPC. type RefsResponse struct { // revisions maps a ref to a revision. // Git branches have keys start with "refs/heads/". Revisions map[string]string `protobuf:"bytes,2,rep,name=revisions" json:"revisions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsResponse) Reset() { *m = RefsResponse{} } func (m *RefsResponse) String() string { return proto.CompactTextString(m) } func (*RefsResponse) ProtoMessage() {} func (*RefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{3} } func (m *RefsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsResponse.Unmarshal(m, b) } func (m *RefsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsResponse.Marshal(b, m, deterministic) } func (dst *RefsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsResponse.Merge(dst, src) } func (m *RefsResponse) XXX_Size() int { return xxx_messageInfo_RefsResponse.Size(m) } func (m *RefsResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefsResponse.DiscardUnknown(m) } var xxx_messageInfo_RefsResponse proto.InternalMessageInfo func (m *RefsResponse) GetRevisions() map[string]string { if m != nil { return m.Revisions } return nil } func init() { proto.RegisterType((*LogRequest)(nil), "gitiles.LogRequest") proto.RegisterType((*LogResponse)(nil), "gitiles.LogResponse") proto.RegisterType((*RefsRequest)(nil), "gitiles.RefsRequest") proto.RegisterType((*RefsResponse)(nil), "gitiles.RefsResponse") proto.RegisterMapType((map[string]string)(nil), "gitiles.RefsResponse.RevisionsEntry") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // GitilesClient is the client API for Gitiles service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GitilesClient interface { // Log retrieves commit log. Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) // Refs retrieves repo refs. Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) } type gitilesPRPCClient struct { client *prpc.Client } func NewGitilesPRPCClient(client *prpc.Client) GitilesClient { return &gitilesPRPCClient{client} } func (c *gitilesPRPCClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesPRPCClient) Refs(ctx context.Context
{ if m != nil { return m.Ancestor } return "" }
identifier_body
gitiles.pb.go
func (*LogResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{1} } func (m *LogResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogResponse.Unmarshal(m, b) } func (m *LogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogResponse.Marshal(b, m, deterministic) } func (dst *LogResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LogResponse.Merge(dst, src) } func (m *LogResponse) XXX_Size() int { return xxx_messageInfo_LogResponse.Size(m) } func (m *LogResponse) XXX_DiscardUnknown() { xxx_messageInfo_LogResponse.DiscardUnknown(m) } var xxx_messageInfo_LogResponse proto.InternalMessageInfo func (m *LogResponse) GetLog() []*git.Commit { if m != nil { return m.Log } return nil } func (m *LogResponse) GetNextPageToken() string { if m != nil { return m.NextPageToken } return "" } // RefsRequest is a request message of Gitiles.Refs RPC. type RefsRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // Limits which refs to resolve to only those matching {refsPath}/*. // // Must be "refs" or start with "refs/". // Must not include glob '*'. // Use "refs/heads" to retrieve all branches. // // To fetch **all** refs in a repo, specify just "refs" but beware of two // caveats: // * refs returned include a ref for each patchset for each Gerrit change // associated with the repo. // * returned map will contain special "HEAD" ref whose value in resulting map // will be name of the actual ref to which "HEAD" points, which is typically // "refs/heads/master". // // Thus, if you are looking for all tags and all branches of repo, it's // recommended to issue two Refs calls limited to "refs/tags" and "refs/heads" // instead of one call for "refs". // // Since Gerrit allows per-ref ACLs, it is possible that some refs matching // refPrefix would not be present in results because current user isn't granted // read permission on them. RefsPath string `protobuf:"bytes,2,opt,name=refs_path,json=refsPath" json:"refs_path,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsRequest) Reset() { *m = RefsRequest{} } func (m *RefsRequest) String() string { return proto.CompactTextString(m) } func (*RefsRequest) ProtoMessage() {} func (*RefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{2} } func (m *RefsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsRequest.Unmarshal(m, b) } func (m *RefsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsRequest.Marshal(b, m, deterministic) } func (dst *RefsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsRequest.Merge(dst, src) } func (m *RefsRequest) XXX_Size() int { return xxx_messageInfo_RefsRequest.Size(m) } func (m *RefsRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefsRequest.DiscardUnknown(m) } var xxx_messageInfo_RefsRequest proto.InternalMessageInfo
} return "" } func (m *RefsRequest) GetRefsPath() string { if m != nil { return m.RefsPath } return "" } // RefsResponse is a response message of Gitiles.Refs RPC. type RefsResponse struct { // revisions maps a ref to a revision. // Git branches have keys start with "refs/heads/". Revisions map[string]string `protobuf:"bytes,2,rep,name=revisions" json:"revisions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsResponse) Reset() { *m = RefsResponse{} } func (m *RefsResponse) String() string { return proto.CompactTextString(m) } func (*RefsResponse) ProtoMessage() {} func (*RefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{3} } func (m *RefsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsResponse.Unmarshal(m, b) } func (m *RefsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsResponse.Marshal(b, m, deterministic) } func (dst *RefsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsResponse.Merge(dst, src) } func (m *RefsResponse) XXX_Size() int { return xxx_messageInfo_RefsResponse.Size(m) } func (m *RefsResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefsResponse.DiscardUnknown(m) } var xxx_messageInfo_RefsResponse proto.InternalMessageInfo func (m *RefsResponse) GetRevisions() map[string]string { if m != nil { return m.Revisions } return nil } func init() { proto.RegisterType((*LogRequest)(nil), "gitiles.LogRequest") proto.RegisterType((*LogResponse)(nil), "gitiles.LogResponse") proto.RegisterType((*RefsRequest)(nil), "gitiles.RefsRequest") proto.RegisterType((*RefsResponse)(nil), "gitiles.RefsResponse") proto.RegisterMapType((map[string]string)(nil), "gitiles.RefsResponse.RevisionsEntry") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // GitilesClient is the client API for Gitiles service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GitilesClient interface { // Log retrieves commit log. Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) // Refs retrieves repo refs. Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) } type gitilesPRPCClient struct { client *prpc.Client } func NewGitilesPRPCClient(client *prpc.Client) GitilesClient { return &gitilesPRPCClient{client} } func (c *gitilesPRPCClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesPRPCClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } type gitilesClient struct { cc *grpc.ClientConn } func NewGitilesClient(cc *grpc.ClientConn) GitilesClient { return &gitilesClient{cc} } func (c *gitilesClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } // GitilesServer is the server API for Gitiles service. type GitilesServer interface {
func (m *RefsRequest) GetProject() string { if m != nil { return m.Project
random_line_split
lib.rs
_mut_passed)] #![warn(clippy::wildcard_in_or_patterns)] #![warn(clippy::crosspointer_transmute)] #![warn(clippy::excessive_precision)] #![warn(clippy::overflow_check_conditional)] #![warn(clippy::as_conversions)] #![warn(clippy::match_overlapping_arm)] #![warn(clippy::zero_divided_by_zero)] #![warn(clippy::must_use_unit)] #![warn(clippy::suspicious_assignment_formatting)] #![warn(clippy::suspicious_else_formatting)] #![warn(clippy::suspicious_unary_op_formatting)] #![warn(clippy::mut_mutex_lock)] #![warn(clippy::print_literal)] #![warn(clippy::same_item_push)] #![warn(clippy::useless_format)] #![warn(clippy::write_literal)] #![warn(clippy::redundant_closure)] #![warn(clippy::redundant_closure_call)] #![warn(clippy::unnecessary_lazy_evaluations)] #![warn(clippy::partialeq_ne_impl)] #![warn(clippy::redundant_field_names)] #![warn(clippy::transmutes_expressible_as_ptr_casts)] #![warn(clippy::unused_async)] #![warn(clippy::disallowed_methods)] #![warn(clippy::disallowed_macros)] #![warn(clippy::disallowed_types)] #![warn(clippy::from_over_into)] // END LINT CONFIG //! An API client for [Metabase]. //! //! Only the features presently required are implemented. Documentation is //! sparse to avoid duplicating information in the upstream API documentation. //! See: //! //! * [Using the REST API](https://github.com/metabase/metabase/wiki/Using-the-REST-API) //! * [Auto-generated API documentation](https://github.com/metabase/metabase/blob/master/docs/api-documentation.md) //! //! [Metabase]: https://metabase.com #![warn(missing_debug_implementations)] use std::fmt; use std::time::Duration; use reqwest::{IntoUrl, Url}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; /// A Metabase API client. #[derive(Debug)] pub struct Client { inner: reqwest::Client, url: Url, session_id: Option<String>, } impl Client { /// Constructs a new `Client` that will target a Metabase instance at `url`. /// /// `url` must not contain a path nor be a [cannot-be-a-base] URL. /// /// [cannot-be-a-base]: https://url.spec.whatwg.org/#url-cannot-be-a-base-url-flag pub fn new<U>(url: U) -> Result<Self, Error> where U: IntoUrl, { let mut url = url.into_url()?; if url.path() != "/" { return Err(Error::InvalidUrl("base URL cannot have path".into())); } assert!(!url.cannot_be_a_base()); url.path_segments_mut() .expect("cannot-be-a-base checked to be false") .push("api"); Ok(Client { inner: reqwest::Client::new(), url, session_id: None, }) } /// Sets the session ID to include in future requests made by this client. pub fn set_session_id(&mut self, session_id: String) { self.session_id = Some(session_id); } /// Fetches public, global properties. /// /// The underlying API call is `GET /api/session/properties`. pub async fn session_properties(&self) -> Result<SessionPropertiesResponse, reqwest::Error> { let url = self.api_url(&["session", "properties"]); self.send_request(self.inner.get(url)).await } /// Requests a session ID for the username and password named in `request`. /// /// Note that usernames are typically email addresses. To authenticate /// future requests with the returned session ID, call `set_session_id`. /// /// The underlying API call is `POST /api/session`. pub async fn login(&self, request: &LoginRequest) -> Result<LoginResponse, reqwest::Error> { let url = self.api_url(&["session"]); self.send_request(self.inner.post(url).json(request)).await } /// Creates a user and database connection if the Metabase instance has not /// yet been set up. /// /// The request must include the `setup_token` from a /// `SessionPropertiesResponse`. If the setup token returned by /// [`Client::session_properties`] is `None`, the cluster is already set up, /// and this request will fail. /// /// The underlying API call is `POST /api/setup`. pub async fn setup(&self, request: &SetupRequest) -> Result<LoginResponse, reqwest::Error> { let url = self.api_url(&["setup"]); self.send_request(self.inner.post(url).json(request)).await } /// Fetches the list of databases known to Metabase. /// /// The underlying API call is `GET /database`. pub async fn databases(&self) -> Result<Vec<Database>, reqwest::Error> { let url = self.api_url(&["database"]); let res: ListWrapper<_> = self.send_request(self.inner.get(url)).await?; Ok(res.data) } /// Fetches metadata about a particular database. /// /// The underlying API call is `GET /database/:id/metadata`. pub async fn database_metadata(&self, id: usize) -> Result<DatabaseMetadata, reqwest::Error> { let url = self.api_url(&["database", &id.to_string(), "metadata"]); self.send_request(self.inner.get(url)).await } fn api_url(&self, endpoint: &[&str]) -> Url { let mut url = self.url.clone(); url.path_segments_mut() .expect("url validated on construction") .extend(endpoint); url } async fn send_request<T>(&self, mut req: reqwest::RequestBuilder) -> Result<T, reqwest::Error> where T: DeserializeOwned, { req = req.timeout(Duration::from_secs(5)); if let Some(session_id) = &self.session_id { req = req.header("X-Metabase-Session", session_id); } let res = req.send().await?.error_for_status()?; res.json().await } } /// A Metabase error. #[derive(Debug)] pub enum Error { /// The provided URL was invalid. InvalidUrl(String), /// The underlying transport mechanism returned na error. Transport(reqwest::Error), } impl From<reqwest::Error> for Error { fn from(e: reqwest::Error) -> Error { Error::Transport(e) } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Error::InvalidUrl(_) => None, Error::Transport(e) => Some(e), } } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::InvalidUrl(msg) => write!(f, "invalid url: {}", msg), Error::Transport(e) => write!(f, "transport: {}", e), } } } #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] struct ListWrapper<T> { data: Vec<T>, } /// The response to [`Client::session_properties`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] #[serde(rename_all = "kebab-case")] pub struct SessionPropertiesResponse { pub setup_token: Option<String>, } /// The request for [`Client::setup`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupRequest { pub allow_tracking: bool, pub database: SetupDatabase, pub token: String, pub prefs: SetupPrefs, pub user: SetupUser, } /// A database to create as part of a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupDatabase { pub engine: String, pub name: String, pub details: SetupDatabaseDetails, } /// Details for a [`SetupDatabase`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupDatabaseDetails { pub host: String, pub port: usize, pub dbname: String, pub user: String, } /// Preferences for a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupPrefs { pub site_name: String, } /// A user to create as part of a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupUser { pub email: String, pub first_name: String, pub last_name: String, pub password: String, pub site_name: String, } /// The request for [`Client::login`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct LoginRequest { pub username: String, pub password: String, } /// The response to [`Client::login`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct LoginResponse { pub id: String, } /// A database returned by [`Client::databases`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct
Database
identifier_name
lib.rs
ting)] #![warn(clippy::suspicious_unary_op_formatting)] #![warn(clippy::mut_mutex_lock)] #![warn(clippy::print_literal)] #![warn(clippy::same_item_push)] #![warn(clippy::useless_format)] #![warn(clippy::write_literal)] #![warn(clippy::redundant_closure)] #![warn(clippy::redundant_closure_call)] #![warn(clippy::unnecessary_lazy_evaluations)] #![warn(clippy::partialeq_ne_impl)] #![warn(clippy::redundant_field_names)] #![warn(clippy::transmutes_expressible_as_ptr_casts)] #![warn(clippy::unused_async)] #![warn(clippy::disallowed_methods)] #![warn(clippy::disallowed_macros)] #![warn(clippy::disallowed_types)] #![warn(clippy::from_over_into)] // END LINT CONFIG //! An API client for [Metabase]. //! //! Only the features presently required are implemented. Documentation is //! sparse to avoid duplicating information in the upstream API documentation. //! See: //! //! * [Using the REST API](https://github.com/metabase/metabase/wiki/Using-the-REST-API) //! * [Auto-generated API documentation](https://github.com/metabase/metabase/blob/master/docs/api-documentation.md) //! //! [Metabase]: https://metabase.com #![warn(missing_debug_implementations)] use std::fmt; use std::time::Duration; use reqwest::{IntoUrl, Url}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; /// A Metabase API client. #[derive(Debug)] pub struct Client { inner: reqwest::Client, url: Url, session_id: Option<String>, } impl Client { /// Constructs a new `Client` that will target a Metabase instance at `url`. /// /// `url` must not contain a path nor be a [cannot-be-a-base] URL. /// /// [cannot-be-a-base]: https://url.spec.whatwg.org/#url-cannot-be-a-base-url-flag pub fn new<U>(url: U) -> Result<Self, Error> where U: IntoUrl, { let mut url = url.into_url()?; if url.path() != "/" { return Err(Error::InvalidUrl("base URL cannot have path".into())); } assert!(!url.cannot_be_a_base()); url.path_segments_mut() .expect("cannot-be-a-base checked to be false") .push("api"); Ok(Client { inner: reqwest::Client::new(), url, session_id: None, }) } /// Sets the session ID to include in future requests made by this client. pub fn set_session_id(&mut self, session_id: String) { self.session_id = Some(session_id); } /// Fetches public, global properties. /// /// The underlying API call is `GET /api/session/properties`. pub async fn session_properties(&self) -> Result<SessionPropertiesResponse, reqwest::Error> { let url = self.api_url(&["session", "properties"]); self.send_request(self.inner.get(url)).await } /// Requests a session ID for the username and password named in `request`. /// /// Note that usernames are typically email addresses. To authenticate /// future requests with the returned session ID, call `set_session_id`. /// /// The underlying API call is `POST /api/session`. pub async fn login(&self, request: &LoginRequest) -> Result<LoginResponse, reqwest::Error> { let url = self.api_url(&["session"]); self.send_request(self.inner.post(url).json(request)).await } /// Creates a user and database connection if the Metabase instance has not /// yet been set up. /// /// The request must include the `setup_token` from a /// `SessionPropertiesResponse`. If the setup token returned by /// [`Client::session_properties`] is `None`, the cluster is already set up, /// and this request will fail. /// /// The underlying API call is `POST /api/setup`. pub async fn setup(&self, request: &SetupRequest) -> Result<LoginResponse, reqwest::Error> { let url = self.api_url(&["setup"]); self.send_request(self.inner.post(url).json(request)).await } /// Fetches the list of databases known to Metabase. /// /// The underlying API call is `GET /database`. pub async fn databases(&self) -> Result<Vec<Database>, reqwest::Error> { let url = self.api_url(&["database"]); let res: ListWrapper<_> = self.send_request(self.inner.get(url)).await?; Ok(res.data) } /// Fetches metadata about a particular database. /// /// The underlying API call is `GET /database/:id/metadata`. pub async fn database_metadata(&self, id: usize) -> Result<DatabaseMetadata, reqwest::Error> { let url = self.api_url(&["database", &id.to_string(), "metadata"]); self.send_request(self.inner.get(url)).await } fn api_url(&self, endpoint: &[&str]) -> Url { let mut url = self.url.clone(); url.path_segments_mut() .expect("url validated on construction") .extend(endpoint); url } async fn send_request<T>(&self, mut req: reqwest::RequestBuilder) -> Result<T, reqwest::Error> where T: DeserializeOwned, { req = req.timeout(Duration::from_secs(5)); if let Some(session_id) = &self.session_id { req = req.header("X-Metabase-Session", session_id); } let res = req.send().await?.error_for_status()?; res.json().await } } /// A Metabase error. #[derive(Debug)] pub enum Error { /// The provided URL was invalid. InvalidUrl(String), /// The underlying transport mechanism returned na error. Transport(reqwest::Error), } impl From<reqwest::Error> for Error { fn from(e: reqwest::Error) -> Error { Error::Transport(e) } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Error::InvalidUrl(_) => None, Error::Transport(e) => Some(e), } } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::InvalidUrl(msg) => write!(f, "invalid url: {}", msg), Error::Transport(e) => write!(f, "transport: {}", e), } } } #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] struct ListWrapper<T> { data: Vec<T>, } /// The response to [`Client::session_properties`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] #[serde(rename_all = "kebab-case")] pub struct SessionPropertiesResponse { pub setup_token: Option<String>, } /// The request for [`Client::setup`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupRequest { pub allow_tracking: bool, pub database: SetupDatabase, pub token: String, pub prefs: SetupPrefs, pub user: SetupUser, } /// A database to create as part of a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupDatabase { pub engine: String, pub name: String, pub details: SetupDatabaseDetails, } /// Details for a [`SetupDatabase`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupDatabaseDetails { pub host: String, pub port: usize, pub dbname: String, pub user: String, } /// Preferences for a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupPrefs { pub site_name: String, } /// A user to create as part of a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupUser { pub email: String, pub first_name: String, pub last_name: String, pub password: String, pub site_name: String, } /// The request for [`Client::login`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct LoginRequest { pub username: String, pub password: String, } /// The response to [`Client::login`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct LoginResponse { pub id: String, } /// A database returned by [`Client::databases`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct Database { pub name: String, pub id: usize, } /// The response to [`Client::database_metadata`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct DatabaseMetadata { pub tables: Vec<Table>, } /// A table that is part of [`DatabaseMetadata`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct Table {
pub name: String, pub schema: String, pub fields: Vec<TableField>, }
random_line_split
transaction.rs
_dao = TransactionDao { conn: &conn }; let trans = tran_dao.list(&since_nd, &until_nd); let account_dao = AccountDao { conn: &conn }; let account = account_dao.list().into_iter().find(|acct| { acct.qualified_name() == expense_name }).unwrap(); // :(:( let mut splits = Vec::new(); for t in trans { for s in t.splits { if (s.account_guid == account.guid) { splits.push( TranSplit { account_guid: s.account_guid, transaction_guid: s.transaction_guid, value_num: s.value_num, memo: s.memo, date: t.post_date, description: t.description.clone(), }); } } } splits } #[derive(Debug)] #[derive(Serialize)] pub struct TranSplit { pub account_guid: String, pub transaction_guid: String, pub value_num: i64, pub memo: String, pub date: NaiveDateTime, pub description: String } pub fn list( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> Vec<Transaction>
#[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotals { summaries: Vec<MonthTotal>, totalSpent: i64, pub acctSums: Vec<MonthlyExpenseGroup> } #[derive(Debug)] #[derive(Serialize)] pub struct AccountSummary { name: String, monthlyTotals: Vec<MonthlyTotal>, } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotal { month: NaiveDate, total: i64 } pub fn monthly_totals<'a>( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> MonthlyTotals { let trans_dao = TransactionDao { conn: &conn }; let account_dao = AccountDao { conn: &conn }; let (since_nd, until_nd) = since_until(since, until, months, year); let trans = trans_dao.list(&since_nd, &until_nd); let mut accounts = account_dao.list(); let mut unfilled_months = expenses_by_month(&trans, &accounts); let mut months = fill_empty_months(&since_nd, &until_nd, &unfilled_months); months.sort_by(|a, b| b.total.cmp(&a.total)); let all_months = months.iter().flat_map(|m| &m.monthlyTotals); let grouped = group_by(all_months.collect::<Vec<_>>(), |m| m.month.clone()); let mut summed = grouped.into_iter().map(|(i, month_summary)| { MonthTotal { month: i, total: month_summary.into_iter().map(|m| m.total).sum() } }).collect::<Vec<_>>(); summed.sort_by(|a, b| parse_nd(&b.month).cmp(&parse_nd(&a.month))); let total_spent = summed.iter().map(|m| m.total).sum(); //let mut acct_sums = months.clone(); MonthlyTotals { summaries: summed, totalSpent: total_spent, acctSums: months.clone() } } // TODO need to understand the type option for 'function overloading' because // the following is not good fn since_until( since_p: Option<String>, until_p: Option<String>, mut months_p: Option<String>, // :(:( year_p: Option<String> ) -> (NaiveDate, NaiveDate) { let until = year_p.as_ref().map(|y| NaiveDate::from_ymd(y.parse::<i32>().unwrap(), 12, 31)) .unwrap_or({ until_p.map(|s| parse_nd(&s)).unwrap_or({ let now = Local::now().naive_local().date(); if (now.month() == 12) { NaiveDate::from_ymd(now.year(), 12, 31) } else { NaiveDate::from_ymd(now.year(), now.month() + 1, 1).pred() } }) }); months_p = year_p.map(|y| "12".to_string()).or(months_p); let since = since_p.map(|s| parse_nd(&s)).unwrap_or({ let months_since = months_p.map(|m| m.parse().unwrap()).unwrap_or(6); // yes I've (sort of) done the following twice, and it's crappy both times let mut curr_year = until.year(); let mut curr_month = until.month(); (0..months_since - 1).for_each(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; }); NaiveDate::from_ymd(curr_year, curr_month, 1) }); (since, until) } fn months_between(since: &NaiveDate, until: &NaiveDate) -> u32 { let mut curr_year = until.year(); let mut curr_month = until.month(); let mut ctr = 0; while curr_year > since.year() || curr_year == since.year() && curr_month > since.month() { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; ctr += 1; } ctr } fn fill_empty_months( since: &NaiveDate, until: &NaiveDate, expenses: &Vec<MonthlyExpenseGroup> ) -> Vec<MonthlyExpenseGroup> { // don't have moment like we do in node:( let mut curr_year = until.year(); let mut curr_month = until.month(); let num_months = months_between(since, until); let mut desired_months = (0..num_months).map(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; NaiveDate::from_ymd(curr_year, curr_month, 1) }).collect::<Vec<_>>(); desired_months.insert(0, NaiveDate::from_ymd(until.year(), until.month(), 1)); let mut cloned_expenses = expenses.clone(); (0..cloned_expenses.len()).for_each(|i| { let mut exp = &mut cloned_expenses[i]; (0..num_months+1).for_each(|_j| { let j = _j as usize; let month_str = format_nd(desired_months[j]); let exp_month = exp.monthlyTotals.get(j).map(|mt| mt.clone()); if (exp_month.is_none() || month_str != exp_month.unwrap().month) { exp.monthlyTotals.insert(j, MonthTotal { month: month_str, total: 0 }); } }); }); cloned_expenses } pub struct MonthlyExpense { name: String, date: NaiveDate, amount: i64, memo: String } struct ExpenseSplit { name: String, date: NaiveDateTime, amount: i64, memo: String } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] struct MonthTotal { month: String, total: i64 } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyExpenseGroup { pub name: String, pub total: i64, monthlyTotals: Vec<MonthTotal>, } fn expenses_by_month( transactions: &Vec<Transaction>, accounts: &Vec<Account> ) -> Vec<MonthlyExpenseGroup> { let mut accounts_map = HashMap::new(); for a in accounts { accounts_map.insert(&a.guid, a); } // No need to fold/reduce here like we do in the node version. // That was probably just a mistake there. let mut splits = transactions.iter().flat_map(|tran| { let expenses = tran.splits.iter().filter(|s| accounts_map[&s.account_guid].is_expense()).collect::<Vec<&Split>>(); expenses.iter().map(|e| { ExpenseSplit { name: accounts_map[&e.account_guid].qualified_name(), date: tran.post_date, amount: e.value_num, memo: e.memo.clone() } }).collect::<Vec<_>>() }).collect::<Vec<_>>(); splits.sort_by(|a,b| a.name.cmp(&b.name)); let expense_groups = group_by(splits, |s| s.name.to_string()); let expense_groups_by_month = expense_groups.into_iter().map(|(name, exp_group)| { let mut start = HashMap::<String, Vec<ExpenseSplit>>::new(); let mut exp_splits = group_by(exp_group.into_iter().collect::<Vec<ExpenseSplit>>(), |item| {
{ let (since_nd, until_nd) = since_until(since, until, months, year); let dao = TransactionDao { conn: &conn }; dao.list(&since_nd, &until_nd) }
identifier_body
transaction.rs
_dao = TransactionDao { conn: &conn }; let trans = tran_dao.list(&since_nd, &until_nd); let account_dao = AccountDao { conn: &conn }; let account = account_dao.list().into_iter().find(|acct| { acct.qualified_name() == expense_name }).unwrap(); // :(:( let mut splits = Vec::new(); for t in trans { for s in t.splits { if (s.account_guid == account.guid) { splits.push( TranSplit { account_guid: s.account_guid, transaction_guid: s.transaction_guid, value_num: s.value_num, memo: s.memo, date: t.post_date, description: t.description.clone(), }); } } } splits } #[derive(Debug)] #[derive(Serialize)] pub struct TranSplit { pub account_guid: String, pub transaction_guid: String, pub value_num: i64, pub memo: String, pub date: NaiveDateTime, pub description: String } pub fn list( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> Vec<Transaction> { let (since_nd, until_nd) = since_until(since, until, months, year); let dao = TransactionDao { conn: &conn }; dao.list(&since_nd, &until_nd) } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotals { summaries: Vec<MonthTotal>, totalSpent: i64, pub acctSums: Vec<MonthlyExpenseGroup> } #[derive(Debug)] #[derive(Serialize)] pub struct AccountSummary { name: String, monthlyTotals: Vec<MonthlyTotal>, } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotal { month: NaiveDate, total: i64 } pub fn monthly_totals<'a>( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> MonthlyTotals { let trans_dao = TransactionDao { conn: &conn }; let account_dao = AccountDao { conn: &conn }; let (since_nd, until_nd) = since_until(since, until, months, year); let trans = trans_dao.list(&since_nd, &until_nd); let mut accounts = account_dao.list(); let mut unfilled_months = expenses_by_month(&trans, &accounts); let mut months = fill_empty_months(&since_nd, &until_nd, &unfilled_months); months.sort_by(|a, b| b.total.cmp(&a.total)); let all_months = months.iter().flat_map(|m| &m.monthlyTotals); let grouped = group_by(all_months.collect::<Vec<_>>(), |m| m.month.clone()); let mut summed = grouped.into_iter().map(|(i, month_summary)| { MonthTotal { month: i, total: month_summary.into_iter().map(|m| m.total).sum() } }).collect::<Vec<_>>(); summed.sort_by(|a, b| parse_nd(&b.month).cmp(&parse_nd(&a.month))); let total_spent = summed.iter().map(|m| m.total).sum(); //let mut acct_sums = months.clone(); MonthlyTotals { summaries: summed, totalSpent: total_spent, acctSums: months.clone() } } // TODO need to understand the type option for 'function overloading' because // the following is not good fn since_until( since_p: Option<String>, until_p: Option<String>, mut months_p: Option<String>, // :(:( year_p: Option<String> ) -> (NaiveDate, NaiveDate) { let until = year_p.as_ref().map(|y| NaiveDate::from_ymd(y.parse::<i32>().unwrap(), 12, 31)) .unwrap_or({ until_p.map(|s| parse_nd(&s)).unwrap_or({ let now = Local::now().naive_local().date(); if (now.month() == 12) { NaiveDate::from_ymd(now.year(), 12, 31) } else { NaiveDate::from_ymd(now.year(), now.month() + 1, 1).pred() } }) }); months_p = year_p.map(|y| "12".to_string()).or(months_p); let since = since_p.map(|s| parse_nd(&s)).unwrap_or({ let months_since = months_p.map(|m| m.parse().unwrap()).unwrap_or(6); // yes I've (sort of) done the following twice, and it's crappy both times let mut curr_year = until.year(); let mut curr_month = until.month(); (0..months_since - 1).for_each(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; }); NaiveDate::from_ymd(curr_year, curr_month, 1) }); (since, until) } fn months_between(since: &NaiveDate, until: &NaiveDate) -> u32 { let mut curr_year = until.year(); let mut curr_month = until.month(); let mut ctr = 0; while curr_year > since.year() || curr_year == since.year() && curr_month > since.month() { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; ctr += 1; } ctr } fn fill_empty_months( since: &NaiveDate, until: &NaiveDate, expenses: &Vec<MonthlyExpenseGroup> ) -> Vec<MonthlyExpenseGroup> { // don't have moment like we do in node:( let mut curr_year = until.year(); let mut curr_month = until.month(); let num_months = months_between(since, until); let mut desired_months = (0..num_months).map(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else
; NaiveDate::from_ymd(curr_year, curr_month, 1) }).collect::<Vec<_>>(); desired_months.insert(0, NaiveDate::from_ymd(until.year(), until.month(), 1)); let mut cloned_expenses = expenses.clone(); (0..cloned_expenses.len()).for_each(|i| { let mut exp = &mut cloned_expenses[i]; (0..num_months+1).for_each(|_j| { let j = _j as usize; let month_str = format_nd(desired_months[j]); let exp_month = exp.monthlyTotals.get(j).map(|mt| mt.clone()); if (exp_month.is_none() || month_str != exp_month.unwrap().month) { exp.monthlyTotals.insert(j, MonthTotal { month: month_str, total: 0 }); } }); }); cloned_expenses } pub struct MonthlyExpense { name: String, date: NaiveDate, amount: i64, memo: String } struct ExpenseSplit { name: String, date: NaiveDateTime, amount: i64, memo: String } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] struct MonthTotal { month: String, total: i64 } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyExpenseGroup { pub name: String, pub total: i64, monthlyTotals: Vec<MonthTotal>, } fn expenses_by_month( transactions: &Vec<Transaction>, accounts: &Vec<Account> ) -> Vec<MonthlyExpenseGroup> { let mut accounts_map = HashMap::new(); for a in accounts { accounts_map.insert(&a.guid, a); } // No need to fold/reduce here like we do in the node version. // That was probably just a mistake there. let mut splits = transactions.iter().flat_map(|tran| { let expenses = tran.splits.iter().filter(|s| accounts_map[&s.account_guid].is_expense()).collect::<Vec<&Split>>(); expenses.iter().map(|e| { ExpenseSplit { name: accounts_map[&e.account_guid].qualified_name(), date: tran.post_date, amount: e.value_num, memo: e.memo.clone() } }).collect::<Vec<_>>() }).collect::<Vec<_>>(); splits.sort_by(|a,b| a.name.cmp(&b.name)); let expense_groups = group_by(splits, |s| s.name.to_string()); let expense_groups_by_month = expense_groups.into_iter().map(|(name, exp_group)| { let mut start = HashMap::<String, Vec<ExpenseSplit>>::new(); let mut exp_splits = group_by(exp_group.into_iter().collect::<Vec<ExpenseSplit>>(), |item| {
{ curr_month -= 1; }
conditional_block
transaction.rs
_dao = TransactionDao { conn: &conn }; let trans = tran_dao.list(&since_nd, &until_nd); let account_dao = AccountDao { conn: &conn }; let account = account_dao.list().into_iter().find(|acct| { acct.qualified_name() == expense_name }).unwrap(); // :(:( let mut splits = Vec::new(); for t in trans { for s in t.splits { if (s.account_guid == account.guid) { splits.push( TranSplit { account_guid: s.account_guid, transaction_guid: s.transaction_guid, value_num: s.value_num, memo: s.memo, date: t.post_date, description: t.description.clone(), }); } } } splits } #[derive(Debug)] #[derive(Serialize)] pub struct TranSplit { pub account_guid: String, pub transaction_guid: String, pub value_num: i64, pub memo: String, pub date: NaiveDateTime, pub description: String } pub fn list( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> Vec<Transaction> { let (since_nd, until_nd) = since_until(since, until, months, year); let dao = TransactionDao { conn: &conn }; dao.list(&since_nd, &until_nd) } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotals { summaries: Vec<MonthTotal>, totalSpent: i64, pub acctSums: Vec<MonthlyExpenseGroup> } #[derive(Debug)] #[derive(Serialize)] pub struct AccountSummary { name: String, monthlyTotals: Vec<MonthlyTotal>, } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotal { month: NaiveDate, total: i64 } pub fn monthly_totals<'a>( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> MonthlyTotals { let trans_dao = TransactionDao { conn: &conn }; let account_dao = AccountDao { conn: &conn }; let (since_nd, until_nd) = since_until(since, until, months, year); let trans = trans_dao.list(&since_nd, &until_nd); let mut accounts = account_dao.list(); let mut unfilled_months = expenses_by_month(&trans, &accounts); let mut months = fill_empty_months(&since_nd, &until_nd, &unfilled_months); months.sort_by(|a, b| b.total.cmp(&a.total)); let all_months = months.iter().flat_map(|m| &m.monthlyTotals); let grouped = group_by(all_months.collect::<Vec<_>>(), |m| m.month.clone()); let mut summed = grouped.into_iter().map(|(i, month_summary)| { MonthTotal { month: i, total: month_summary.into_iter().map(|m| m.total).sum() } }).collect::<Vec<_>>(); summed.sort_by(|a, b| parse_nd(&b.month).cmp(&parse_nd(&a.month))); let total_spent = summed.iter().map(|m| m.total).sum(); //let mut acct_sums = months.clone(); MonthlyTotals { summaries: summed, totalSpent: total_spent, acctSums: months.clone() } } // TODO need to understand the type option for 'function overloading' because // the following is not good fn since_until( since_p: Option<String>, until_p: Option<String>, mut months_p: Option<String>, // :(:( year_p: Option<String> ) -> (NaiveDate, NaiveDate) { let until = year_p.as_ref().map(|y| NaiveDate::from_ymd(y.parse::<i32>().unwrap(), 12, 31)) .unwrap_or({ until_p.map(|s| parse_nd(&s)).unwrap_or({ let now = Local::now().naive_local().date(); if (now.month() == 12) { NaiveDate::from_ymd(now.year(), 12, 31) } else { NaiveDate::from_ymd(now.year(), now.month() + 1, 1).pred() } }) }); months_p = year_p.map(|y| "12".to_string()).or(months_p); let since = since_p.map(|s| parse_nd(&s)).unwrap_or({ let months_since = months_p.map(|m| m.parse().unwrap()).unwrap_or(6); // yes I've (sort of) done the following twice, and it's crappy both times let mut curr_year = until.year(); let mut curr_month = until.month(); (0..months_since - 1).for_each(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; }); NaiveDate::from_ymd(curr_year, curr_month, 1) }); (since, until) } fn months_between(since: &NaiveDate, until: &NaiveDate) -> u32 { let mut curr_year = until.year(); let mut curr_month = until.month(); let mut ctr = 0; while curr_year > since.year() || curr_year == since.year() && curr_month > since.month() { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; ctr += 1; } ctr } fn fill_empty_months( since: &NaiveDate, until: &NaiveDate, expenses: &Vec<MonthlyExpenseGroup> ) -> Vec<MonthlyExpenseGroup> { // don't have moment like we do in node:( let mut curr_year = until.year(); let mut curr_month = until.month(); let num_months = months_between(since, until); let mut desired_months = (0..num_months).map(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; NaiveDate::from_ymd(curr_year, curr_month, 1) }).collect::<Vec<_>>(); desired_months.insert(0, NaiveDate::from_ymd(until.year(), until.month(), 1)); let mut cloned_expenses = expenses.clone(); (0..cloned_expenses.len()).for_each(|i| { let mut exp = &mut cloned_expenses[i]; (0..num_months+1).for_each(|_j| { let j = _j as usize; let month_str = format_nd(desired_months[j]); let exp_month = exp.monthlyTotals.get(j).map(|mt| mt.clone()); if (exp_month.is_none() || month_str != exp_month.unwrap().month) { exp.monthlyTotals.insert(j, MonthTotal { month: month_str, total: 0 }); } }); }); cloned_expenses } pub struct MonthlyExpense { name: String, date: NaiveDate, amount: i64, memo: String } struct ExpenseSplit { name: String, date: NaiveDateTime, amount: i64, memo: String } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] struct MonthTotal { month: String, total: i64 } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] pub struct
{ pub name: String, pub total: i64, monthlyTotals: Vec<MonthTotal>, } fn expenses_by_month( transactions: &Vec<Transaction>, accounts: &Vec<Account> ) -> Vec<MonthlyExpenseGroup> { let mut accounts_map = HashMap::new(); for a in accounts { accounts_map.insert(&a.guid, a); } // No need to fold/reduce here like we do in the node version. // That was probably just a mistake there. let mut splits = transactions.iter().flat_map(|tran| { let expenses = tran.splits.iter().filter(|s| accounts_map[&s.account_guid].is_expense()).collect::<Vec<&Split>>(); expenses.iter().map(|e| { ExpenseSplit { name: accounts_map[&e.account_guid].qualified_name(), date: tran.post_date, amount: e.value_num, memo: e.memo.clone() } }).collect::<Vec<_>>() }).collect::<Vec<_>>(); splits.sort_by(|a,b| a.name.cmp(&b.name)); let expense_groups = group_by(splits, |s| s.name.to_string()); let expense_groups_by_month = expense_groups.into_iter().map(|(name, exp_group)| { let mut start = HashMap::<String, Vec<ExpenseSplit>>::new(); let mut exp_splits = group_by(exp_group.into_iter().collect::<Vec<ExpenseSplit>>(), |item| {
MonthlyExpenseGroup
identifier_name
transaction.rs
tran_dao = TransactionDao { conn: &conn }; let trans = tran_dao.list(&since_nd, &until_nd); let account_dao = AccountDao { conn: &conn }; let account = account_dao.list().into_iter().find(|acct| { acct.qualified_name() == expense_name }).unwrap(); // :(:( let mut splits = Vec::new(); for t in trans { for s in t.splits { if (s.account_guid == account.guid) { splits.push( TranSplit { account_guid: s.account_guid, transaction_guid: s.transaction_guid, value_num: s.value_num, memo: s.memo, date: t.post_date, description: t.description.clone(), }); } } } splits } #[derive(Debug)] #[derive(Serialize)] pub struct TranSplit { pub account_guid: String, pub transaction_guid: String, pub value_num: i64, pub memo: String, pub date: NaiveDateTime, pub description: String } pub fn list( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> Vec<Transaction> { let (since_nd, until_nd) = since_until(since, until, months, year); let dao = TransactionDao { conn: &conn }; dao.list(&since_nd, &until_nd) } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotals { summaries: Vec<MonthTotal>, totalSpent: i64, pub acctSums: Vec<MonthlyExpenseGroup> } #[derive(Debug)] #[derive(Serialize)] pub struct AccountSummary { name: String, monthlyTotals: Vec<MonthlyTotal>, } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotal { month: NaiveDate, total: i64 } pub fn monthly_totals<'a>( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> MonthlyTotals { let trans_dao = TransactionDao { conn: &conn }; let account_dao = AccountDao { conn: &conn }; let (since_nd, until_nd) = since_until(since, until, months, year); let trans = trans_dao.list(&since_nd, &until_nd); let mut accounts = account_dao.list(); let mut unfilled_months = expenses_by_month(&trans, &accounts); let mut months = fill_empty_months(&since_nd, &until_nd, &unfilled_months); months.sort_by(|a, b| b.total.cmp(&a.total)); let all_months = months.iter().flat_map(|m| &m.monthlyTotals); let grouped = group_by(all_months.collect::<Vec<_>>(), |m| m.month.clone()); let mut summed = grouped.into_iter().map(|(i, month_summary)| { MonthTotal {
}).collect::<Vec<_>>(); summed.sort_by(|a, b| parse_nd(&b.month).cmp(&parse_nd(&a.month))); let total_spent = summed.iter().map(|m| m.total).sum(); //let mut acct_sums = months.clone(); MonthlyTotals { summaries: summed, totalSpent: total_spent, acctSums: months.clone() } } // TODO need to understand the type option for 'function overloading' because // the following is not good fn since_until( since_p: Option<String>, until_p: Option<String>, mut months_p: Option<String>, // :(:( year_p: Option<String> ) -> (NaiveDate, NaiveDate) { let until = year_p.as_ref().map(|y| NaiveDate::from_ymd(y.parse::<i32>().unwrap(), 12, 31)) .unwrap_or({ until_p.map(|s| parse_nd(&s)).unwrap_or({ let now = Local::now().naive_local().date(); if (now.month() == 12) { NaiveDate::from_ymd(now.year(), 12, 31) } else { NaiveDate::from_ymd(now.year(), now.month() + 1, 1).pred() } }) }); months_p = year_p.map(|y| "12".to_string()).or(months_p); let since = since_p.map(|s| parse_nd(&s)).unwrap_or({ let months_since = months_p.map(|m| m.parse().unwrap()).unwrap_or(6); // yes I've (sort of) done the following twice, and it's crappy both times let mut curr_year = until.year(); let mut curr_month = until.month(); (0..months_since - 1).for_each(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; }); NaiveDate::from_ymd(curr_year, curr_month, 1) }); (since, until) } fn months_between(since: &NaiveDate, until: &NaiveDate) -> u32 { let mut curr_year = until.year(); let mut curr_month = until.month(); let mut ctr = 0; while curr_year > since.year() || curr_year == since.year() && curr_month > since.month() { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; ctr += 1; } ctr } fn fill_empty_months( since: &NaiveDate, until: &NaiveDate, expenses: &Vec<MonthlyExpenseGroup> ) -> Vec<MonthlyExpenseGroup> { // don't have moment like we do in node:( let mut curr_year = until.year(); let mut curr_month = until.month(); let num_months = months_between(since, until); let mut desired_months = (0..num_months).map(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; NaiveDate::from_ymd(curr_year, curr_month, 1) }).collect::<Vec<_>>(); desired_months.insert(0, NaiveDate::from_ymd(until.year(), until.month(), 1)); let mut cloned_expenses = expenses.clone(); (0..cloned_expenses.len()).for_each(|i| { let mut exp = &mut cloned_expenses[i]; (0..num_months+1).for_each(|_j| { let j = _j as usize; let month_str = format_nd(desired_months[j]); let exp_month = exp.monthlyTotals.get(j).map(|mt| mt.clone()); if (exp_month.is_none() || month_str != exp_month.unwrap().month) { exp.monthlyTotals.insert(j, MonthTotal { month: month_str, total: 0 }); } }); }); cloned_expenses } pub struct MonthlyExpense { name: String, date: NaiveDate, amount: i64, memo: String } struct ExpenseSplit { name: String, date: NaiveDateTime, amount: i64, memo: String } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] struct MonthTotal { month: String, total: i64 } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyExpenseGroup { pub name: String, pub total: i64, monthlyTotals: Vec<MonthTotal>, } fn expenses_by_month( transactions: &Vec<Transaction>, accounts: &Vec<Account> ) -> Vec<MonthlyExpenseGroup> { let mut accounts_map = HashMap::new(); for a in accounts { accounts_map.insert(&a.guid, a); } // No need to fold/reduce here like we do in the node version. // That was probably just a mistake there. let mut splits = transactions.iter().flat_map(|tran| { let expenses = tran.splits.iter().filter(|s| accounts_map[&s.account_guid].is_expense()).collect::<Vec<&Split>>(); expenses.iter().map(|e| { ExpenseSplit { name: accounts_map[&e.account_guid].qualified_name(), date: tran.post_date, amount: e.value_num, memo: e.memo.clone() } }).collect::<Vec<_>>() }).collect::<Vec<_>>(); splits.sort_by(|a,b| a.name.cmp(&b.name)); let expense_groups = group_by(splits, |s| s.name.to_string()); let expense_groups_by_month = expense_groups.into_iter().map(|(name, exp_group)| { let mut start = HashMap::<String, Vec<ExpenseSplit>>::new(); let mut exp_splits = group_by(exp_group.into_iter().collect::<Vec<ExpenseSplit>>(), |item| {
month: i, total: month_summary.into_iter().map(|m| m.total).sum() }
random_line_split
glyph_brush.rs
{ fn pixel_bounds_custom_layout<'a, S, L>( &mut self, section: S, custom_layout: &L, ) -> Option<Rect<i32>> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].pixel_bounds() } fn glyphs_custom_layout<'a, 'b, S, L>( &'b mut self, section: S, custom_layout: &L, ) -> PositionedGlyphIter<'b, 'font> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].glyphs() } fn fonts(&self) -> &[Font<'font>] { &self.fonts } } impl<'font, H: BuildHasher> GlyphBrush<'font, H> { /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Used to provide custom `GlyphPositioner` logic, if using built-in /// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue) /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G) where G: GlyphPositioner, S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } let section_hash = self.cache_glyphs(&section, custom_layout); self.section_buffer.push(section_hash); } /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.queue(Section { /// text: "Hello glyph_brush", /// ..Section::default() /// }); /// ``` pub fn queue<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { profile_scope!("glyph_brush_queue"); let section = section.into(); let layout = section.layout; self.queue_custom_layout(section, &layout) } #[inline] fn hash<T: Hash>(&self, hashable: &T) -> SectionHash { let mut s = self.section_hasher.build_hasher(); hashable.hash(&mut s); s.finish() } /// Returns the calculate_glyph_cache key for this sections glyphs fn cache_glyphs<L>(&mut self, section: &VariedSection<'_>, layout: &L) -> SectionHash where L: GlyphPositioner, { profile_scope!("glyph_brush_cache_glyphs"); let section_hash = self.hash(&(section, layout)); if self.cache_glyph_positioning { if let Entry::Vacant(entry) = self.calculate_glyph_cache.entry(section_hash) { let geometry = SectionGeometry::from(section); entry.insert(GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }); } } else { let geometry = SectionGeometry::from(section); self.calculate_glyph_cache.insert( section_hash, GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }, ); } section_hash } /// Processes all queued sections, calling texture update logic when necessary & /// returning a `BrushAction`. /// See [`queue`](struct.GlyphBrush.html#method.queue). /// /// Two closures are required: /// * `update_texture` is called when new glyph texture data has been drawn for update in the /// actual texture. /// The arguments are the rect position of the data in the texture & the byte data itself /// which is a single `u8` alpha value per pixel. /// * `to_vertex` maps a single glyph's `GlyphVertex` data into a generic vertex type. The /// mapped vertices are returned in an `Ok(BrushAction::Draw(vertices))` result. /// It's recommended to use a single vertex per glyph quad for best performance. /// /// Trims the cache, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # fn main() -> Result<(), BrushError> { /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// # let update_texture = |_, _| {}; /// # let into_vertex = |_| (); /// glyph_brush.process_queued( /// (1024, 768), /// |rect, tex_data| update_texture(rect, tex_data), /// |vertex_data| into_vertex(vertex_data), /// )? /// # ; /// # Ok(()) /// # } /// ``` pub fn process_queued<V, F1, F2>( &mut self, (screen_w, screen_h): (u32, u32), update_texture: F1, to_vertex: F2, ) -> Result<BrushAction<V>, BrushError> where F1: FnMut(Rect<u32>, &[u8]), F2: Fn(GlyphVertex) -> V, { profile_scope!("glyph_brush_process_queue"); let current_text_state = self.hash(&(&self.section_buffer, screen_w, screen_h)); let result = if !self.cache_glyph_drawing || self.last_draw.text_state != current_text_state { let mut some_text = false; for section_hash in &self.section_buffer { let GlyphedSection { ref glyphs, .. } = self.calculate_glyph_cache[section_hash]; for &(ref glyph, _, font_id) in glyphs { self.texture_cache.queue_glyph(font_id.0, glyph.clone()); some_text = true; } } if some_text && self.texture_cache.cache_queued(update_texture).is_err() { let (width, height) = self.texture_cache.dimensions(); return Err(BrushError::TextureTooSmall { suggested: (width * 2, height * 2), }); } let verts: Vec<V> = if some_text { let sections: Vec<_> = self .section_buffer .iter() .map(|hash| &self.calculate_glyph_cache[hash]) .collect(); let mut verts = Vec::with_capacity( sections .iter() .map(|section| section.glyphs.len()) .sum::<usize>(), ); for &GlyphedSection { ref glyphs, bounds, z, } in sections { verts.extend(glyphs.iter().filter_map(|(glyph, color, font_id)| { match self.texture_cache.rect_for(font_id.0, glyph) { Err(err) => { error!("Cache miss?: {:?}, {:?}: {}", font_id, glyph, err);
Ok(None) => None, Ok(Some((tex_coords, pixel_coords))) => { if pixel_coords.min.x as f32 > bounds.max.x || pixel_coords.min.y as f32 > bounds.max.y || bounds.min.x > pixel_coords.max.x as f32 || bounds.min.y > pixel_coords.max.y as f32 { // glyph is totally outside the bounds None } else { Some(to_vertex(GlyphVertex { tex_coords, pixel_coords, bounds, screen_dimensions: (screen_w as f32, screen_h as f32), color: *color, z, })) } } } })); } verts } else { vec![] }; self.last_draw
None }
random_line_split
glyph_brush.rs
{ fn pixel_bounds_custom_layout<'a, S, L>( &mut self, section: S, custom_layout: &L, ) -> Option<Rect<i32>> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].pixel_bounds() } fn glyphs_custom_layout<'a, 'b, S, L>( &'b mut self, section: S, custom_layout: &L, ) -> PositionedGlyphIter<'b, 'font> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].glyphs() } fn fonts(&self) -> &[Font<'font>] { &self.fonts } } impl<'font, H: BuildHasher> GlyphBrush<'font, H> { /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Used to provide custom `GlyphPositioner` logic, if using built-in /// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue) /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G) where G: GlyphPositioner, S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } let section_hash = self.cache_glyphs(&section, custom_layout); self.section_buffer.push(section_hash); } /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.queue(Section { /// text: "Hello glyph_brush", /// ..Section::default() /// }); /// ``` pub fn queue<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { profile_scope!("glyph_brush_queue"); let section = section.into(); let layout = section.layout; self.queue_custom_layout(section, &layout) } #[inline] fn hash<T: Hash>(&self, hashable: &T) -> SectionHash { let mut s = self.section_hasher.build_hasher(); hashable.hash(&mut s); s.finish() } /// Returns the calculate_glyph_cache key for this sections glyphs fn cache_glyphs<L>(&mut self, section: &VariedSection<'_>, layout: &L) -> SectionHash where L: GlyphPositioner, { profile_scope!("glyph_brush_cache_glyphs"); let section_hash = self.hash(&(section, layout)); if self.cache_glyph_positioning { if let Entry::Vacant(entry) = self.calculate_glyph_cache.entry(section_hash) { let geometry = SectionGeometry::from(section); entry.insert(GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }); } } else { let geometry = SectionGeometry::from(section); self.calculate_glyph_cache.insert( section_hash, GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }, ); } section_hash } /// Processes all queued sections, calling texture update logic when necessary & /// returning a `BrushAction`. /// See [`queue`](struct.GlyphBrush.html#method.queue). /// /// Two closures are required: /// * `update_texture` is called when new glyph texture data has been drawn for update in the /// actual texture. /// The arguments are the rect position of the data in the texture & the byte data itself /// which is a single `u8` alpha value per pixel. /// * `to_vertex` maps a single glyph's `GlyphVertex` data into a generic vertex type. The /// mapped vertices are returned in an `Ok(BrushAction::Draw(vertices))` result. /// It's recommended to use a single vertex per glyph quad for best performance. /// /// Trims the cache, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # fn main() -> Result<(), BrushError> { /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// # let update_texture = |_, _| {}; /// # let into_vertex = |_| (); /// glyph_brush.process_queued( /// (1024, 768), /// |rect, tex_data| update_texture(rect, tex_data), /// |vertex_data| into_vertex(vertex_data), /// )? /// # ; /// # Ok(()) /// # } /// ``` pub fn process_queued<V, F1, F2>( &mut self, (screen_w, screen_h): (u32, u32), update_texture: F1, to_vertex: F2, ) -> Result<BrushAction<V>, BrushError> where F1: FnMut(Rect<u32>, &[u8]), F2: Fn(GlyphVertex) -> V, { profile_scope!("glyph_brush_process_queue"); let current_text_state = self.hash(&(&self.section_buffer, screen_w, screen_h)); let result = if !self.cache_glyph_drawing || self.last_draw.text_state != current_text_state { let mut some_text = false; for section_hash in &self.section_buffer { let GlyphedSection { ref glyphs, .. } = self.calculate_glyph_cache[section_hash]; for &(ref glyph, _, font_id) in glyphs { self.texture_cache.queue_glyph(font_id.0, glyph.clone()); some_text = true; } } if some_text && self.texture_cache.cache_queued(update_texture).is_err()
let verts: Vec<V> = if some_text { let sections: Vec<_> = self .section_buffer .iter() .map(|hash| &self.calculate_glyph_cache[hash]) .collect(); let mut verts = Vec::with_capacity( sections .iter() .map(|section| section.glyphs.len()) .sum::<usize>(), ); for &GlyphedSection { ref glyphs, bounds, z, } in sections { verts.extend(glyphs.iter().filter_map(|(glyph, color, font_id)| { match self.texture_cache.rect_for(font_id.0, glyph) { Err(err) => { error!("Cache miss?: {:?}, {:?}: {}", font_id, glyph, err); None } Ok(None) => None, Ok(Some((tex_coords, pixel_coords))) => { if pixel_coords.min.x as f32 > bounds.max.x || pixel_coords.min.y as f32 > bounds.max.y || bounds.min.x > pixel_coords.max.x as f32 || bounds.min.y > pixel_coords.max.y as f32 { // glyph is totally outside the bounds None } else { Some(to_vertex(GlyphVertex { tex_coords, pixel_coords, bounds, screen_dimensions: (screen_w as f32, screen_h as f32), color: *color, z, })) } } } })); } verts } else { vec![] }; self.last_draw
{ let (width, height) = self.texture_cache.dimensions(); return Err(BrushError::TextureTooSmall { suggested: (width * 2, height * 2), }); }
conditional_block
glyph_brush.rs
).build(); /// glyph_brush.queue(Section { /// text: "Hello glyph_brush", /// ..Section::default() /// }); /// ``` pub fn queue<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { profile_scope!("glyph_brush_queue"); let section = section.into(); let layout = section.layout; self.queue_custom_layout(section, &layout) } #[inline] fn hash<T: Hash>(&self, hashable: &T) -> SectionHash { let mut s = self.section_hasher.build_hasher(); hashable.hash(&mut s); s.finish() } /// Returns the calculate_glyph_cache key for this sections glyphs fn cache_glyphs<L>(&mut self, section: &VariedSection<'_>, layout: &L) -> SectionHash where L: GlyphPositioner, { profile_scope!("glyph_brush_cache_glyphs"); let section_hash = self.hash(&(section, layout)); if self.cache_glyph_positioning { if let Entry::Vacant(entry) = self.calculate_glyph_cache.entry(section_hash) { let geometry = SectionGeometry::from(section); entry.insert(GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }); } } else { let geometry = SectionGeometry::from(section); self.calculate_glyph_cache.insert( section_hash, GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }, ); } section_hash } /// Processes all queued sections, calling texture update logic when necessary & /// returning a `BrushAction`. /// See [`queue`](struct.GlyphBrush.html#method.queue). /// /// Two closures are required: /// * `update_texture` is called when new glyph texture data has been drawn for update in the /// actual texture. /// The arguments are the rect position of the data in the texture & the byte data itself /// which is a single `u8` alpha value per pixel. /// * `to_vertex` maps a single glyph's `GlyphVertex` data into a generic vertex type. The /// mapped vertices are returned in an `Ok(BrushAction::Draw(vertices))` result. /// It's recommended to use a single vertex per glyph quad for best performance. /// /// Trims the cache, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # fn main() -> Result<(), BrushError> { /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// # let update_texture = |_, _| {}; /// # let into_vertex = |_| (); /// glyph_brush.process_queued( /// (1024, 768), /// |rect, tex_data| update_texture(rect, tex_data), /// |vertex_data| into_vertex(vertex_data), /// )? /// # ; /// # Ok(()) /// # } /// ``` pub fn process_queued<V, F1, F2>( &mut self, (screen_w, screen_h): (u32, u32), update_texture: F1, to_vertex: F2, ) -> Result<BrushAction<V>, BrushError> where F1: FnMut(Rect<u32>, &[u8]), F2: Fn(GlyphVertex) -> V, { profile_scope!("glyph_brush_process_queue"); let current_text_state = self.hash(&(&self.section_buffer, screen_w, screen_h)); let result = if !self.cache_glyph_drawing || self.last_draw.text_state != current_text_state { let mut some_text = false; for section_hash in &self.section_buffer { let GlyphedSection { ref glyphs, .. } = self.calculate_glyph_cache[section_hash]; for &(ref glyph, _, font_id) in glyphs { self.texture_cache.queue_glyph(font_id.0, glyph.clone()); some_text = true; } } if some_text && self.texture_cache.cache_queued(update_texture).is_err() { let (width, height) = self.texture_cache.dimensions(); return Err(BrushError::TextureTooSmall { suggested: (width * 2, height * 2), }); } let verts: Vec<V> = if some_text { let sections: Vec<_> = self .section_buffer .iter() .map(|hash| &self.calculate_glyph_cache[hash]) .collect(); let mut verts = Vec::with_capacity( sections .iter() .map(|section| section.glyphs.len()) .sum::<usize>(), ); for &GlyphedSection { ref glyphs, bounds, z, } in sections { verts.extend(glyphs.iter().filter_map(|(glyph, color, font_id)| { match self.texture_cache.rect_for(font_id.0, glyph) { Err(err) => { error!("Cache miss?: {:?}, {:?}: {}", font_id, glyph, err); None } Ok(None) => None, Ok(Some((tex_coords, pixel_coords))) => { if pixel_coords.min.x as f32 > bounds.max.x || pixel_coords.min.y as f32 > bounds.max.y || bounds.min.x > pixel_coords.max.x as f32 || bounds.min.y > pixel_coords.max.y as f32 { // glyph is totally outside the bounds None } else { Some(to_vertex(GlyphVertex { tex_coords, pixel_coords, bounds, screen_dimensions: (screen_w as f32, screen_h as f32), color: *color, z, })) } } } })); } verts } else { vec![] }; self.last_draw.text_state = current_text_state; BrushAction::Draw(verts) } else { BrushAction::ReDraw }; self.clear_section_buffer(); Ok(result) } /// Rebuilds the logical texture cache with new dimensions. Should be avoided if possible. /// /// # Example /// /// ```no_run /// # use glyph_brush::GlyphBrushBuilder; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.resize_texture(512, 512); /// ``` pub fn resize_texture(&mut self, new_width: u32, new_height: u32) { self.texture_cache .to_builder() .dimensions(new_width, new_height) .rebuild(&mut self.texture_cache); self.last_draw = LastDrawInfo::default(); } /// Returns the logical texture cache pixel dimensions `(width, height)`. pub fn texture_dimensions(&self) -> (u32, u32) { self.texture_cache.dimensions() } fn clear_section_buffer(&mut self) { if self.cache_glyph_positioning { // clear section_buffer & trim calculate_glyph_cache to active sections let active: hashbrown::HashSet<_> = self .section_buffer .drain(..) .chain(self.keep_in_cache.drain()) .collect(); self.calculate_glyph_cache .retain(|key, _| active.contains(key)); } else { self.section_buffer.clear(); self.calculate_glyph_cache.clear(); self.keep_in_cache.clear(); } } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. /// /// # Example /// /// ```no_run /// use glyph_brush::{GlyphBrushBuilder, Section}; /// # fn main() { /// /// // dejavu is built as default `FontId(0)` /// let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// /// // some time later, add another font referenced by a new `FontId` /// let open_sans_italic: &[u8] = include_bytes!("../../fonts/OpenSans-Italic.ttf"); /// let open_sans_italic_id = glyph_brush.add_font_bytes(open_sans_italic); /// # } /// ``` pub fn add_font_bytes<'a: 'font, B: Into<SharedBytes<'a>>>(&mut self, font_data: B) -> FontId
{ self.add_font(Font::from_bytes(font_data.into()).unwrap()) }
identifier_body
glyph_brush.rs
= self.section_hasher.build_hasher(); hashable.hash(&mut s); s.finish() } /// Returns the calculate_glyph_cache key for this sections glyphs fn cache_glyphs<L>(&mut self, section: &VariedSection<'_>, layout: &L) -> SectionHash where L: GlyphPositioner, { profile_scope!("glyph_brush_cache_glyphs"); let section_hash = self.hash(&(section, layout)); if self.cache_glyph_positioning { if let Entry::Vacant(entry) = self.calculate_glyph_cache.entry(section_hash) { let geometry = SectionGeometry::from(section); entry.insert(GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }); } } else { let geometry = SectionGeometry::from(section); self.calculate_glyph_cache.insert( section_hash, GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }, ); } section_hash } /// Processes all queued sections, calling texture update logic when necessary & /// returning a `BrushAction`. /// See [`queue`](struct.GlyphBrush.html#method.queue). /// /// Two closures are required: /// * `update_texture` is called when new glyph texture data has been drawn for update in the /// actual texture. /// The arguments are the rect position of the data in the texture & the byte data itself /// which is a single `u8` alpha value per pixel. /// * `to_vertex` maps a single glyph's `GlyphVertex` data into a generic vertex type. The /// mapped vertices are returned in an `Ok(BrushAction::Draw(vertices))` result. /// It's recommended to use a single vertex per glyph quad for best performance. /// /// Trims the cache, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # fn main() -> Result<(), BrushError> { /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// # let update_texture = |_, _| {}; /// # let into_vertex = |_| (); /// glyph_brush.process_queued( /// (1024, 768), /// |rect, tex_data| update_texture(rect, tex_data), /// |vertex_data| into_vertex(vertex_data), /// )? /// # ; /// # Ok(()) /// # } /// ``` pub fn process_queued<V, F1, F2>( &mut self, (screen_w, screen_h): (u32, u32), update_texture: F1, to_vertex: F2, ) -> Result<BrushAction<V>, BrushError> where F1: FnMut(Rect<u32>, &[u8]), F2: Fn(GlyphVertex) -> V, { profile_scope!("glyph_brush_process_queue"); let current_text_state = self.hash(&(&self.section_buffer, screen_w, screen_h)); let result = if !self.cache_glyph_drawing || self.last_draw.text_state != current_text_state { let mut some_text = false; for section_hash in &self.section_buffer { let GlyphedSection { ref glyphs, .. } = self.calculate_glyph_cache[section_hash]; for &(ref glyph, _, font_id) in glyphs { self.texture_cache.queue_glyph(font_id.0, glyph.clone()); some_text = true; } } if some_text && self.texture_cache.cache_queued(update_texture).is_err() { let (width, height) = self.texture_cache.dimensions(); return Err(BrushError::TextureTooSmall { suggested: (width * 2, height * 2), }); } let verts: Vec<V> = if some_text { let sections: Vec<_> = self .section_buffer .iter() .map(|hash| &self.calculate_glyph_cache[hash]) .collect(); let mut verts = Vec::with_capacity( sections .iter() .map(|section| section.glyphs.len()) .sum::<usize>(), ); for &GlyphedSection { ref glyphs, bounds, z, } in sections { verts.extend(glyphs.iter().filter_map(|(glyph, color, font_id)| { match self.texture_cache.rect_for(font_id.0, glyph) { Err(err) => { error!("Cache miss?: {:?}, {:?}: {}", font_id, glyph, err); None } Ok(None) => None, Ok(Some((tex_coords, pixel_coords))) => { if pixel_coords.min.x as f32 > bounds.max.x || pixel_coords.min.y as f32 > bounds.max.y || bounds.min.x > pixel_coords.max.x as f32 || bounds.min.y > pixel_coords.max.y as f32 { // glyph is totally outside the bounds None } else { Some(to_vertex(GlyphVertex { tex_coords, pixel_coords, bounds, screen_dimensions: (screen_w as f32, screen_h as f32), color: *color, z, })) } } } })); } verts } else { vec![] }; self.last_draw.text_state = current_text_state; BrushAction::Draw(verts) } else { BrushAction::ReDraw }; self.clear_section_buffer(); Ok(result) } /// Rebuilds the logical texture cache with new dimensions. Should be avoided if possible. /// /// # Example /// /// ```no_run /// # use glyph_brush::GlyphBrushBuilder; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.resize_texture(512, 512); /// ``` pub fn resize_texture(&mut self, new_width: u32, new_height: u32) { self.texture_cache .to_builder() .dimensions(new_width, new_height) .rebuild(&mut self.texture_cache); self.last_draw = LastDrawInfo::default(); } /// Returns the logical texture cache pixel dimensions `(width, height)`. pub fn texture_dimensions(&self) -> (u32, u32) { self.texture_cache.dimensions() } fn clear_section_buffer(&mut self) { if self.cache_glyph_positioning { // clear section_buffer & trim calculate_glyph_cache to active sections let active: hashbrown::HashSet<_> = self .section_buffer .drain(..) .chain(self.keep_in_cache.drain()) .collect(); self.calculate_glyph_cache .retain(|key, _| active.contains(key)); } else { self.section_buffer.clear(); self.calculate_glyph_cache.clear(); self.keep_in_cache.clear(); } } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. /// /// # Example /// /// ```no_run /// use glyph_brush::{GlyphBrushBuilder, Section}; /// # fn main() { /// /// // dejavu is built as default `FontId(0)` /// let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// /// // some time later, add another font referenced by a new `FontId` /// let open_sans_italic: &[u8] = include_bytes!("../../fonts/OpenSans-Italic.ttf"); /// let open_sans_italic_id = glyph_brush.add_font_bytes(open_sans_italic); /// # } /// ``` pub fn add_font_bytes<'a: 'font, B: Into<SharedBytes<'a>>>(&mut self, font_data: B) -> FontId { self.add_font(Font::from_bytes(font_data.into()).unwrap()) } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. pub fn add_font<'a: 'font>(&mut self, font_data: Font<'a>) -> FontId { self.fonts.push(font_data); FontId(self.fonts.len() - 1) } /// Retains the section in the cache as if it had been used in the last draw-frame. /// /// Should not generally be necessary, see [caching behaviour](#caching-behaviour). pub fn
keep_cached_custom_layout
identifier_name
main.go
AtWords()) if err != nil { panic(err) } if err := blocksWidget.Write("Latest block height " + networkStatus.Get("result.sync_info.latest_block_height").String() + "\n"); err != nil { panic(err) } // Transaction parsing widget transactionWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := transactionWidget.Write("Transactions will appear as soon as they are confirmed in a block.\n\n"); err != nil { panic(err) } validatorWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := validatorWidget.Write("List available validators.\n\n"); err != nil { panic(err) } peerWidget, err := text.New() if err != nil { panic(err) } if err := peerWidget.Write("0"); err != nil { panic(err) } healthWidget, err := text.New() if err != nil { panic(err) } if err := healthWidget.Write("🔴 no connection"); err != nil { panic(err) } timeWidget, err := text.New() if err != nil { panic(err) } currentTime := time.Now() if err := timeWidget.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } maxBlocksizeWidget, err := text.New() maxBlockSize := gjson.Get(getFromRPC("consensus_params"), "result.consensus_params.block.max_bytes").Int() if err != nil { panic(err) } if err := maxBlocksizeWidget.Write(fmt.Sprintf("%s", byteCountDecimal(maxBlockSize))); err != nil { panic(err) } // system powered widgets go writeTime(ctx, timeWidget, 1*time.Second) // rpc widgets go writePeers(ctx, peerWidget, 1*time.Second) go writeHealth(ctx, healthWidget, 500*time.Millisecond, connectionSignal) // websocket powered widgets go writeValidators(ctx, validatorWidget, connectionSignal) go writeBlocks(ctx, blocksWidget, connectionSignal) go writeTransactions(ctx, transactionWidget, connectionSignal) // blockchain download gauge syncWidget, err := gauge.New( gauge.Height(1), gauge.Color(cell.ColorBlue), gauge.Border(linestyle.Light), gauge.BorderTitle("Blockchain download %"), ) if err != nil { panic(err) } if networkStatus.Get("result.sync_info.catching_up").String() == "false" { if err := syncWidget.Absolute(100, 100); err != nil { panic(err) } } else { if networkStatus.Get("result.node_info.network").String() == "cosmoshub-3" { go syncGauge(ctx, syncWidget, networkStatus.Get("result.sync_info.latest_block_height").Int()) } else { // There is no way to detect maximum height in the network via RPC or websocket yet if err := syncWidget.Absolute(70, 100); err != nil { panic(err) } } } // Draw Dashboard c, err := container.New( t, container.Border(linestyle.Light), container.BorderTitle("PRESS Q or ESC TO QUIT | Network "+networkStatus.Get("result.node_info.network").String()+" Version "+networkStatus.Get("result.node_info.version").String()), container.BorderColor(cell.ColorNumber(2)), container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Health"), container.PlaceWidget(healthWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("System Time"), container.PlaceWidget(timeWidget), ), ), ), container.Right( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Max Block Size"), container.PlaceWidget(maxBlocksizeWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Connected Peers"), container.PlaceWidget(peerWidget), ), ), ), ), ), container.Bottom( container.PlaceWidget(syncWidget), ), ), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Validators"), container.PlaceWidget(validatorWidget), ), ), ), container.Bottom( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Latest Blocks"), container.PlaceWidget(blocksWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Latest Confirmed Transactions"), container.PlaceWidget(transactionWidget), ), ), ), ), ) if err != nil { panic(err) } quitter := func(k *terminalapi.Keyboard) { if k.Key == 'q' || k.Key == 'Q' || k.Key == keyboard.KeyEsc { cancel() } } if err := termdash.Run(ctx, t, c, termdash.KeyboardSubscriber(quitter)); err != nil { panic(err) } } func getFromRPC(endpoint string) string { port := *givenPort resp, _ := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(appRPC + ":" + port + "/" + endpoint) return resp.String()
func getTendermintRPC(endpoint string) string { resp, err := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(tendermintRPC + endpoint) if err != nil { panic(err) } return resp.String() } // writeTime writes the current system time to the timeWidget. // Exits when the context expires. func writeTime(ctx context.Context, t *text.Text, delay time.Duration) { ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: currentTime := time.Now() t.Reset() if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } case <-ctx.Done(): return } } } // writeHealth writes the status to the healthWidget. // Exits when the context expires. func writeHealth(ctx context.Context, t *text.Text, delay time.Duration, connectionSignal chan string) { reconnect := false health := gjson.Get(getFromRPC("health"), "result") t.Reset() if health.Exists() { t.Write("🟢 good") } else { t.Write("🔴 no connection") } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: health := gjson.Get(getFromRPC("health"), "result") if health.Exists() { t.Write("🟢 good") if reconnect == true { connectionSignal <- "reconnect" connectionSignal <- "reconnect" connectionSignal <- "reconnect" reconnect = false } } else { t.Write("🔴 no connection") if reconnect == false { connectionSignal <- "no_connection" connectionSignal <- "no_connection" connectionSignal <- "no_connection" reconnect = true } } case <-ctx.Done(): return } } } // writePeers writes the connected Peers to the peerWidget. // Exits when the context expires. func writePeers(ctx context.Context, t *text.Text, delay time.Duration) { peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() t.Reset() if peers != "" { t.Write(peers) } if err := t.Write(peers); err != nil { panic(err) } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: t.Reset() peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() if peers != "" { t.Write(peers) } case <-ctx.Done(): return } } } // writeTransactions writes the latest Transactions to the transactionsWidget. // Exits when the context expires. func writeTransactions(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentTx := gjson.Get(message, "result.data.value.TxResult.result.log") currentTime := time.Now() if currentTx.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02 03:04:05 PM")+"\n"+currentTx.String())); err != nil { panic(err
}
random_line_split
main.go
Words()) if err != nil { panic(err) } if err := blocksWidget.Write("Latest block height " + networkStatus.Get("result.sync_info.latest_block_height").String() + "\n"); err != nil { panic(err) } // Transaction parsing widget transactionWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := transactionWidget.Write("Transactions will appear as soon as they are confirmed in a block.\n\n"); err != nil { panic(err) } validatorWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := validatorWidget.Write("List available validators.\n\n"); err != nil { panic(err) } peerWidget, err := text.New() if err != nil { panic(err) } if err := peerWidget.Write("0"); err != nil { panic(err) } healthWidget, err := text.New() if err != nil { panic(err) } if err := healthWidget.Write("🔴 no connection"); err != nil { panic(err) } timeWidget, err := text.New() if err != nil { panic(err) } currentTime := time.Now() if err := timeWidget.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } maxBlocksizeWidget, err := text.New() maxBlockSize := gjson.Get(getFromRPC("consensus_params"), "result.consensus_params.block.max_bytes").Int() if err != nil { panic(err) } if err := maxBlocksizeWidget.Write(fmt.Sprintf("%s", byteCountDecimal(maxBlockSize))); err != nil { panic(err) } // system powered widgets go writeTime(ctx, timeWidget, 1*time.Second) // rpc widgets go writePeers(ctx, peerWidget, 1*time.Second) go writeHealth(ctx, healthWidget, 500*time.Millisecond, connectionSignal) // websocket powered widgets go writeValidators(ctx, validatorWidget, connectionSignal) go writeBlocks(ctx, blocksWidget, connectionSignal) go writeTransactions(ctx, transactionWidget, connectionSignal) // blockchain download gauge syncWidget, err := gauge.New( gauge.Height(1), gauge.Color(cell.ColorBlue), gauge.Border(linestyle.Light), gauge.BorderTitle("Blockchain download %"), ) if err != nil { panic(err) } if networkStatus.Get("result.sync_info.catching_up").String() == "false" { if err := syncWidget.Absolute(100, 100); err != nil { panic(err) } } else { if networkStatus.Get("result.node_info.network").String() == "cosmoshub-3" { go syncGauge(ctx, syncWidget, networkStatus.Get("result.sync_info.latest_block_height").Int()) } else { // There is no way to detect maximum height in the network via RPC or websocket yet if err := syncWidget.Absolute(70, 100); err != nil { panic(err) } } } // Draw Dashboard c, err := container.New( t, container.Border(linestyle.Light), container.BorderTitle("PRESS Q or ESC TO QUIT | Network "+networkStatus.Get("result.node_info.network").String()+" Version "+networkStatus.Get("result.node_info.version").String()), container.BorderColor(cell.ColorNumber(2)), container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Health"), container.PlaceWidget(healthWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("System Time"), container.PlaceWidget(timeWidget), ), ), ), container.Right( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Max Block Size"), container.PlaceWidget(maxBlocksizeWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Connected Peers"), container.PlaceWidget(peerWidget), ), ), ), ), ), container.Bottom( container.PlaceWidget(syncWidget), ), ), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Validators"), container.PlaceWidget(validatorWidget), ), ), ), container.Bottom( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Latest Blocks"), container.PlaceWidget(blocksWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Latest Confirmed Transactions"), container.PlaceWidget(transactionWidget), ), ), ), ), ) if err != nil { panic(err) } quitter := func(k *terminalapi.Keyboard) { if k.Key == 'q' || k.Key == 'Q' || k.Key == keyboard.KeyEsc { cancel() } } if err := termdash.Run(ctx, t, c, termdash.KeyboardSubscriber(quitter)); err != nil { panic(err) } } func getFromRPC(endpoint string) string { port := *givenPort resp, _ := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(appRPC + ":" + port + "/" + endpoint) return resp.String() } func getTendermintRPC(endpoint string) string { resp, err := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(tendermintRPC + endpoint) if err != nil { panic(err) } return resp.String() } // writeTime writes the current system time to the timeWidget. // Exits when the context expires. func writeTime(ctx context.Context, t *text.Text, delay time.Duration) { ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: currentTime := time.Now() t.Reset() if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } case <-ctx.Done(): return } } } // writeHealth writes the status to the healthWidget. // Exits when the context expires. func writeHealth(ctx context.Context, t *text.Text, delay time.Duration, connectionSignal chan string) { reconnect := false health := gjson.Get(getFromRPC("health"), "result") t.Reset() if health.Exists() { t.Write("🟢 good") } else { t.Write("🔴 no connection") } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: health := gjson.Get(getFromRPC("health"), "result") if health.Exists() { t.Write("🟢 good") if reconnect == true { connectionSignal <- "reconnect" connectionSignal <- "reconnect" connectionSignal <- "reconnect" reconnect = false } } else { t.Write("🔴 no connection") if reconnect == false { connectionSignal <- "no_connection" connectionSignal <- "no_connection" connectionSignal <- "no_connection" reconnect = true } } case <-ctx.Done(): return } } } // writePeers writes the connected Peers to the peerWidget. // Exits when the context expires. func writePeers(ctx
ntext, t *text.Text, delay time.Duration) { peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() t.Reset() if peers != "" { t.Write(peers) } if err := t.Write(peers); err != nil { panic(err) } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: t.Reset() peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() if peers != "" { t.Write(peers) } case <-ctx.Done(): return } } } // writeTransactions writes the latest Transactions to the transactionsWidget. // Exits when the context expires. func writeTransactions(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentTx := gjson.Get(message, "result.data.value.TxResult.result.log") currentTime := time.Now() if currentTx.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02 03:04:05 PM")+"\n"+currentTx.String())); err != nil {
context.Co
identifier_name
main.go
blocksWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := blocksWidget.Write("Latest block height " + networkStatus.Get("result.sync_info.latest_block_height").String() + "\n"); err != nil { panic(err) } // Transaction parsing widget transactionWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := transactionWidget.Write("Transactions will appear as soon as they are confirmed in a block.\n\n"); err != nil { panic(err) } validatorWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := validatorWidget.Write("List available validators.\n\n"); err != nil { panic(err) } peerWidget, err := text.New() if err != nil { panic(err) } if err := peerWidget.Write("0"); err != nil { panic(err) } healthWidget, err := text.New() if err != nil { panic(err) } if err := healthWidget.Write("🔴 no connection"); err != nil { panic(err) } timeWidget, err := text.New() if err != nil { panic(err) } currentTime := time.Now() if err := timeWidget.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } maxBlocksizeWidget, err := text.New() maxBlockSize := gjson.Get(getFromRPC("consensus_params"), "result.consensus_params.block.max_bytes").Int() if err != nil { panic(err) } if err := maxBlocksizeWidget.Write(fmt.Sprintf("%s", byteCountDecimal(maxBlockSize))); err != nil { panic(err) } // system powered widgets go writeTime(ctx, timeWidget, 1*time.Second) // rpc widgets go writePeers(ctx, peerWidget, 1*time.Second) go writeHealth(ctx, healthWidget, 500*time.Millisecond, connectionSignal) // websocket powered widgets go writeValidators(ctx, validatorWidget, connectionSignal) go writeBlocks(ctx, blocksWidget, connectionSignal) go writeTransactions(ctx, transactionWidget, connectionSignal) // blockchain download gauge syncWidget, err := gauge.New( gauge.Height(1), gauge.Color(cell.ColorBlue), gauge.Border(linestyle.Light), gauge.BorderTitle("Blockchain download %"), ) if err != nil { panic(err) } if networkStatus.Get("result.sync_info.catching_up").String() == "false" { if err := syncWidget.Absolute(100, 100); err != nil { panic(err) } } else { if networkStatus.Get("result.node_info.network").String() == "cosmoshub-3" { go syncGauge(ctx, syncWidget, networkStatus.Get("result.sync_info.latest_block_height").Int()) } else { // There is no way to detect maximum height in the network via RPC or websocket yet if err := syncWidget.Absolute(70, 100); err != nil { panic(err) } } } // Draw Dashboard c, err := container.New( t, container.Border(linestyle.Light), container.BorderTitle("PRESS Q or ESC TO QUIT | Network "+networkStatus.Get("result.node_info.network").String()+" Version "+networkStatus.Get("result.node_info.version").String()), container.BorderColor(cell.ColorNumber(2)), container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Health"), container.PlaceWidget(healthWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("System Time"), container.PlaceWidget(timeWidget), ), ), ), container.Right( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Max Block Size"), container.PlaceWidget(maxBlocksizeWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Connected Peers"), container.PlaceWidget(peerWidget), ), ), ), ), ), container.Bottom( container.PlaceWidget(syncWidget), ), ), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Validators"), container.PlaceWidget(validatorWidget), ), ), ), container.Bottom( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Latest Blocks"), container.PlaceWidget(blocksWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Latest Confirmed Transactions"), container.PlaceWidget(transactionWidget), ), ), ), ), ) if err != nil { panic(err) } quitter := func(k *terminalapi.Keyboard) { if k.Key == 'q' || k.Key == 'Q' || k.Key == keyboard.KeyEsc { cancel() } } if err := termdash.Run(ctx, t, c, termdash.KeyboardSubscriber(quitter)); err != nil { panic(err) } } f unc getFromRPC(endpoint string) string { port := *givenPort resp, _ := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(appRPC + ":" + port + "/" + endpoint) return resp.String() } func getTendermintRPC(endpoint string) string { resp, err := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(tendermintRPC + endpoint) if err != nil { panic(err) } return resp.String() } // writeTime writes the current system time to the timeWidget. // Exits when the context expires. func writeTime(ctx context.Context, t *text.Text, delay time.Duration) { ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: currentTime := time.Now() t.Reset() if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } case <-ctx.Done(): return } } } // writeHealth writes the status to the healthWidget. // Exits when the context expires. func writeHealth(ctx context.Context, t *text.Text, delay time.Duration, connectionSignal chan string) { reconnect := false health := gjson.Get(getFromRPC("health"), "result") t.Reset() if health.Exists() { t.Write("🟢 good") } else { t.Write("🔴 no connection") } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: health := gjson.Get(getFromRPC("health"), "result") if health.Exists() { t.Write("🟢 good") if reconnect == true { connectionSignal <- "reconnect" connectionSignal <- "reconnect" connectionSignal <- "reconnect" reconnect = false } } else { t.Write("🔴 no connection") if reconnect == false { connectionSignal <- "no_connection" connectionSignal <- "no_connection" connectionSignal <- "no_connection" reconnect = true } } case <-ctx.Done(): return } } } // writePeers writes the connected Peers to the peerWidget. // Exits when the context expires. func writePeers(ctx context.Context, t *text.Text, delay time.Duration) { peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() t.Reset() if peers != "" { t.Write(peers) } if err := t.Write(peers); err != nil { panic(err) } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: t.Reset() peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() if peers != "" { t.Write(peers) } case <-ctx.Done(): return } } } // writeTransactions writes the latest Transactions to the transactionsWidget. // Exits when the context expires. func writeTransactions(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gow
{ view() connectionSignal := make(chan string) t, err := termbox.New() if err != nil { panic(err) } defer t.Close() flag.Parse() networkInfo := getFromRPC("status") networkStatus := gjson.Parse(networkInfo) if !networkStatus.Exists() { panic("Application not running on localhost:" + fmt.Sprintf("%s", *givenPort)) } ctx, cancel := context.WithCancel(context.Background()) // Blocks parsing widget
identifier_body
main.go
AtWords()) if err != nil { panic(err) } if err := blocksWidget.Write("Latest block height " + networkStatus.Get("result.sync_info.latest_block_height").String() + "\n"); err != nil { panic(err) } // Transaction parsing widget transactionWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := transactionWidget.Write("Transactions will appear as soon as they are confirmed in a block.\n\n"); err != nil { panic(err) } validatorWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := validatorWidget.Write("List available validators.\n\n"); err != nil { panic(err) } peerWidget, err := text.New() if err != nil { panic(err) } if err := peerWidget.Write("0"); err != nil { panic(err) } healthWidget, err := text.New() if err != nil { panic(err) } if err := healthWidget.Write("🔴 no connection"); err != nil { panic(err) } timeWidget, err := text.New() if err != nil { panic(err) } currentTime := time.Now() if err := timeWidget.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } maxBlocksizeWidget, err := text.New() maxBlockSize := gjson.Get(getFromRPC("consensus_params"), "result.consensus_params.block.max_bytes").Int() if err != nil { panic(err) } if err := maxBlocksizeWidget.Write(fmt.Sprintf("%s", byteCountDecimal(maxBlockSize))); err != nil { panic(err) } // system powered widgets go writeTime(ctx, timeWidget, 1*time.Second) // rpc widgets go writePeers(ctx, peerWidget, 1*time.Second) go writeHealth(ctx, healthWidget, 500*time.Millisecond, connectionSignal) // websocket powered widgets go writeValidators(ctx, validatorWidget, connectionSignal) go writeBlocks(ctx, blocksWidget, connectionSignal) go writeTransactions(ctx, transactionWidget, connectionSignal) // blockchain download gauge syncWidget, err := gauge.New( gauge.Height(1), gauge.Color(cell.ColorBlue), gauge.Border(linestyle.Light), gauge.BorderTitle("Blockchain download %"), ) if err != nil { panic(err) } if networkStatus.Get("result.sync_info.catching_up").String() == "false" { if err := syncWidget.Absolute(100, 100); err != nil { panic(err) } } else { if networkStatus.Get("result.node_info.network").String() == "cosmoshub-3" { go syncGauge(ctx, syncWidget, networkStatus.Get("result.sync_info.latest_block_height").Int()) } else { // There is no way to detect maximum height in the network via RPC or websocket yet if err := syncWidget.Absolute(70, 100); err != nil { panic(err) } } } // Draw Dashboard c, err := container.New( t, container.Border(linestyle.Light), container.BorderTitle("PRESS Q or ESC TO QUIT | Network "+networkStatus.Get("result.node_info.network").String()+" Version "+networkStatus.Get("result.node_info.version").String()), container.BorderColor(cell.ColorNumber(2)), container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Health"), container.PlaceWidget(healthWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("System Time"), container.PlaceWidget(timeWidget), ), ), ), container.Right( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Max Block Size"), container.PlaceWidget(maxBlocksizeWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Connected Peers"), container.PlaceWidget(peerWidget), ), ), ), ), ), container.Bottom( container.PlaceWidget(syncWidget), ), ), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Validators"), container.PlaceWidget(validatorWidget), ), ), ), container.Bottom( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Latest Blocks"), container.PlaceWidget(blocksWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Latest Confirmed Transactions"), container.PlaceWidget(transactionWidget), ), ), ), ), ) if err != nil { panic(err) } quitter := func(k *terminalapi.Keyboard) { if k.Key == 'q' || k.Key == 'Q' || k.Key == keyboard.KeyEsc { cancel() } } if err := termdash.Run(ctx, t, c, termdash.KeyboardSubscriber(quitter)); err != nil { panic(err) } } func getFromRPC(endpoint string) string { port := *givenPort resp, _ := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(appRPC + ":" + port + "/" + endpoint) return resp.String() } func getTendermintRPC(endpoint string) string { resp, err := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(tendermintRPC + endpoint) if err != nil {
return resp.String() } // writeTime writes the current system time to the timeWidget. // Exits when the context expires. func writeTime(ctx context.Context, t *text.Text, delay time.Duration) { ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: currentTime := time.Now() t.Reset() if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } case <-ctx.Done(): return } } } // writeHealth writes the status to the healthWidget. // Exits when the context expires. func writeHealth(ctx context.Context, t *text.Text, delay time.Duration, connectionSignal chan string) { reconnect := false health := gjson.Get(getFromRPC("health"), "result") t.Reset() if health.Exists() { t.Write("🟢 good") } else { t.Write("🔴 no connection") } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: health := gjson.Get(getFromRPC("health"), "result") if health.Exists() { t.Write("🟢 good") if reconnect == true { connectionSignal <- "reconnect" connectionSignal <- "reconnect" connectionSignal <- "reconnect" reconnect = false } } else { t.Write("🔴 no connection") if reconnect == false { connectionSignal <- "no_connection" connectionSignal <- "no_connection" connectionSignal <- "no_connection" reconnect = true } } case <-ctx.Done(): return } } } // writePeers writes the connected Peers to the peerWidget. // Exits when the context expires. func writePeers(ctx context.Context, t *text.Text, delay time.Duration) { peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() t.Reset() if peers != "" { t.Write(peers) } if err := t.Write(peers); err != nil { panic(err) } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: t.Reset() peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() if peers != "" { t.Write(peers) } case <-ctx.Done(): return } } } // writeTransactions writes the latest Transactions to the transactionsWidget. // Exits when the context expires. func writeTransactions(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentTx := gjson.Get(message, "result.data.value.TxResult.result.log") currentTime := time.Now() if currentTx.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02 03:04:05 PM")+"\n"+currentTx.String())); err != nil { panic
panic(err) }
conditional_block
shared.rs
]; /// the root of AsRef,Borrow and Deref. fn get_slice(&self) -> &[u8]; } //////////////////// // public methods // //////////////////// impl Nbstr { #[cfg(feature="unstable")] /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. pub const MAX_LENGTH: usize = MAX_LENGTH; /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. /// /// This method will get deprecated once associated consts become stable. pub fn max_length() -> usize { MAX_LENGTH } // keeping all public methods under one impl gives cleaner rustdoc /// Create a Nbstr from a borrowed str with a limited lifetime. /// If the str is short enough it will be stored the inside struct itself and not boxed. pub fn from_str(s: &str) -> Self { Self::try_stack(s).unwrap_or_else(|| s.to_owned().into() ) } } //////////////// //Constructors// //////////////// impl Default for Nbstr { fn default() -> Self { let s = unsafe{ slice::from_raw_parts(1 as *const u8, 0) };//pointer must be nonzero Self::with_pointer(LITERAL, unsafe{ mem::transmute(s) }) } } impl From<&'static str> for Nbstr { fn from(s: &'static str) -> Self { Self::with_pointer(LITERAL, s) } } impl Nbstr { fn try_stack(s: &str) -> Option<Self> {match s.len() as u8 { // Cannot have stack str with length 0, as variant might be NonZero 0 => Some(Self::default()), 1...MAX_STACK => { let mut z = Self::new(s.len() as u8); for (d, s) in z.data().iter_mut().zip(s.bytes()) { *d = s; } Some(z) }, _ => None, }} } impl From<Box<str>> for Nbstr { fn from(s: Box<str>) -> Self { // Don't try stack; users might turn it back into a box later let z = if s.is_empty() {Self::default()}// Make it clear we don't own any memory. else {Self::with_pointer(BOX, &s)}; mem::forget(s); return z; } } impl From<String> for Nbstr { fn from(s: String) -> Self { if s.capacity() != s.len() {// into_boxed will reallocate if let Some(inline) = Self::try_stack(&s) { return inline;// and drop s } } return Self::from(s.into_boxed_str()); } } impl From<Cow<'static, str>> for Nbstr { fn from(cow: Cow<'static, str>) -> Self {match cow { Cow::Owned(owned) => Self::from(owned), Cow::Borrowed(borrowed) => Self::from(borrowed), }} } impl Clone for Nbstr { fn clone(&self) -> Self { if self.variant() == BOX {// try stack Nbstr::from_str(self.deref()) } else {// copy the un-copyable unsafe{ ptr::read(self) } } } fn clone_from(&mut self, from: &Self) { // keep existing box if possible if self.variant() == BOX && self.len() == from.len() { unsafe{ ptr::copy_nonoverlapping( from.as_ptr(), mem::transmute(self.as_ptr()), self.len() )}; } else { *self = from.clone(); } } } /////////// //Getters// /////////// impl AsRef<[u8]> for Nbstr { fn as_ref(&self) -> &[u8] { self.get_slice() } } impl AsRef<str> for Nbstr { fn as_ref(&self) -> &str { let bytes: &[u8] = self.as_ref(); unsafe{ Str::from_utf8_unchecked( bytes )} } } impl Deref for Nbstr { type Target = str; fn deref(&self) -> &Self::Target { self.as_ref() } } impl Borrow<[u8]> for Nbstr { fn borrow(&self) -> &[u8] { self.as_ref() } } impl Borrow<str> for Nbstr { fn borrow(&self) -> &str { self.as_ref() } } ///////////////// //Common traits// ///////////////// impl hash::Hash for Nbstr { fn hash<H:hash::Hasher>(&self, h: &mut H) { self.deref().hash(h); } } impl fmt::Display for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self.deref(), fmtr) } } impl PartialOrd for Nbstr { fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { self.deref().partial_cmp(rhs.deref()) } } impl Ord for Nbstr { fn cmp(&self, rhs: &Self) -> Ordering { self.deref().cmp(rhs.deref()) } } impl PartialEq for Nbstr { fn eq(&self, rhs: &Self) -> bool { self.deref() == rhs.deref() } } impl Eq for Nbstr {} /// Displays how the string is stored by prepending "stack: ", "literal: " or "boxed: ". impl fmt::Debug for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { write!(fmtr, "{}: {}", match self.variant() { 1...MAX_STACK => "stack", LITERAL => "literal", BOX => "boxed", _ => unreachable!("Unknown variant of Nbstr: {}", self.variant()) }, self.deref()) } } /////////////// //Destructors// /////////////// /// Returns Some if z contains a Box pub fn take_box(z: &mut Nbstr) -> Option<Box<str>> { if z.variant() == BOX { // I asked on #rust, and transmuting from & to mut is apparently undefined behaviour. // Is it really in this case? let s: *mut str = unsafe{ mem::transmute(z.get_slice()) }; // Cannot just assign default; then rust tries to drop the previous value! // .. which then calls this function. mem::forget(mem::replace(z, Nbstr::default())); Some(unsafe{ Box::from_raw(s) }) } else { None } } impl From<Nbstr> for Box<str> { fn from(mut z: Nbstr) -> Box<str> { take_box(&mut z).unwrap_or_else(|| z.deref().to_owned().into_boxed_str() ) } } impl From<Nbstr> for String { fn from(mut z: Nbstr) -> String { take_box(&mut z) .map(|b| b.into_string() ) .unwrap_or_else(|| z.deref().to_owned() ) } } impl From<Nbstr> for Cow<'static, str> { fn from(mut z: Nbstr) -> Cow<'static, str> { take_box(&mut z) .map(|b| Cow::from(b.into_string()) ) .unwrap_or_else(|| if z.variant() == LITERAL { let s: &'static str = unsafe{ mem::transmute(z.deref()) }; Cow::from(s) } else { Cow::from(z.deref().to_owned()) } ) } } #[cfg(not(test))]// Bugs in drop might cause stack overflow in suprising places. // The tests below should catch said bugs. impl Drop for Nbstr { fn drop(&mut self) { let _ = take_box(self); } } ////////////////////////////////////////////////////////////////////// // Tests that need private access or tests code that use cfg!(test) // ////////////////////////////////////////////////////////////////////// #[cfg(test)] mod tests { use nbstr::{Nbstr, MAX_STACK}; use super::*; use std::ops::Deref; use std::str as Str; use std::{mem,slice}; const STR: &'static str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; #[test] fn literal() { let mut z = Nbstr::from(STR); assert_eq!(z.deref().len(), STR.len()); assert_eq!(z.deref().as_ptr(), STR.as_ptr()); assert_eq!(z.deref(), STR); assert_eq!(take_box(&mut z), None); } #[test] fn stack() { let s = "abc"; let mut z = Nbstr::from_str(s); assert_eq!(z.deref().len(), s.len()); assert_eq!(z.deref().as_ptr() as usize, z.data().as_ptr() as usize); assert_eq!(z.deref(), s); assert_eq!(take_box(&mut z), None); } #[test]
fn boxed() { let b: Box<str> = STR.to_string().into_boxed_str(); let len = b.len(); let ptr = b.as_ptr();
random_line_split
shared.rs
use std::{mem,slice,ptr, fmt,hash}; use std::borrow::{Borrow,Cow}; /// Protected methods used by the impls below. pub trait Protected { /// create new of this variant with possibly uninitialized data fn new(u8) -> Self; /// store this str, which is either &'static or boxed fn with_pointer(u8, &str) -> Self; fn variant(&self) -> u8; /// get the area of self where (length,pointer)|inline is. fn data(&mut self) -> &mut [u8]; /// the root of AsRef,Borrow and Deref. fn get_slice(&self) -> &[u8]; } //////////////////// // public methods // //////////////////// impl Nbstr { #[cfg(feature="unstable")] /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. pub const MAX_LENGTH: usize = MAX_LENGTH; /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. /// /// This method will get deprecated once associated consts become stable. pub fn max_length() -> usize { MAX_LENGTH } // keeping all public methods under one impl gives cleaner rustdoc /// Create a Nbstr from a borrowed str with a limited lifetime. /// If the str is short enough it will be stored the inside struct itself and not boxed. pub fn from_str(s: &str) -> Self { Self::try_stack(s).unwrap_or_else(|| s.to_owned().into() ) } } //////////////// //Constructors// //////////////// impl Default for Nbstr { fn default() -> Self { let s = unsafe{ slice::from_raw_parts(1 as *const u8, 0) };//pointer must be nonzero Self::with_pointer(LITERAL, unsafe{ mem::transmute(s) }) } } impl From<&'static str> for Nbstr { fn from(s: &'static str) -> Self { Self::with_pointer(LITERAL, s) } } impl Nbstr { fn try_stack(s: &str) -> Option<Self> {match s.len() as u8 { // Cannot have stack str with length 0, as variant might be NonZero 0 => Some(Self::default()), 1...MAX_STACK => { let mut z = Self::new(s.len() as u8); for (d, s) in z.data().iter_mut().zip(s.bytes()) { *d = s; } Some(z) }, _ => None, }} } impl From<Box<str>> for Nbstr { fn from(s: Box<str>) -> Self { // Don't try stack; users might turn it back into a box later let z = if s.is_empty() {Self::default()}// Make it clear we don't own any memory. else {Self::with_pointer(BOX, &s)}; mem::forget(s); return z; } } impl From<String> for Nbstr { fn from(s: String) -> Self { if s.capacity() != s.len() {// into_boxed will reallocate if let Some(inline) = Self::try_stack(&s) { return inline;// and drop s } } return Self::from(s.into_boxed_str()); } } impl From<Cow<'static, str>> for Nbstr { fn from(cow: Cow<'static, str>) -> Self {match cow { Cow::Owned(owned) => Self::from(owned), Cow::Borrowed(borrowed) => Self::from(borrowed), }} } impl Clone for Nbstr { fn clone(&self) -> Self { if self.variant() == BOX {// try stack Nbstr::from_str(self.deref()) } else {// copy the un-copyable unsafe{ ptr::read(self) } } } fn clone_from(&mut self, from: &Self) { // keep existing box if possible if self.variant() == BOX && self.len() == from.len() { unsafe{ ptr::copy_nonoverlapping( from.as_ptr(), mem::transmute(self.as_ptr()), self.len() )}; } else { *self = from.clone(); } } } /////////// //Getters// /////////// impl AsRef<[u8]> for Nbstr { fn as_ref(&self) -> &[u8] { self.get_slice() } } impl AsRef<str> for Nbstr { fn as_ref(&self) -> &str { let bytes: &[u8] = self.as_ref(); unsafe{ Str::from_utf8_unchecked( bytes )} } } impl Deref for Nbstr { type Target = str; fn deref(&self) -> &Self::Target { self.as_ref() } } impl Borrow<[u8]> for Nbstr { fn borrow(&self) -> &[u8] { self.as_ref() } } impl Borrow<str> for Nbstr { fn borrow(&self) -> &str { self.as_ref() } } ///////////////// //Common traits// ///////////////// impl hash::Hash for Nbstr { fn hash<H:hash::Hasher>(&self, h: &mut H) { self.deref().hash(h); } } impl fmt::Display for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self.deref(), fmtr) } } impl PartialOrd for Nbstr { fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { self.deref().partial_cmp(rhs.deref()) } } impl Ord for Nbstr { fn cmp(&self, rhs: &Self) -> Ordering { self.deref().cmp(rhs.deref()) } } impl PartialEq for Nbstr { fn eq(&self, rhs: &Self) -> bool { self.deref() == rhs.deref() } } impl Eq for Nbstr {} /// Displays how the string is stored by prepending "stack: ", "literal: " or "boxed: ". impl fmt::Debug for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { write!(fmtr, "{}: {}", match self.variant() { 1...MAX_STACK => "stack", LITERAL => "literal", BOX => "boxed", _ => unreachable!("Unknown variant of Nbstr: {}", self.variant()) }, self.deref()) } } /////////////// //Destructors// /////////////// /// Returns Some if z contains a Box pub fn take_box(z: &mut Nbstr) -> Option<Box<str>> {
impl From<Nbstr> for Box<str> { fn from(mut z: Nbstr) -> Box<str> { take_box(&mut z).unwrap_or_else(|| z.deref().to_owned().into_boxed_str() ) } } impl From<Nbstr> for String { fn from(mut z: Nbstr) -> String { take_box(&mut z) .map(|b| b.into_string() ) .unwrap_or_else(|| z.deref().to_owned() ) } } impl From<Nbstr> for Cow<'static, str> { fn from(mut z: Nbstr) -> Cow<'static, str> { take_box(&mut z) .map(|b| Cow::from(b.into_string()) ) .unwrap_or_else(|| if z.variant() == LITERAL { let s: &'static str = unsafe{ mem::transmute(z.deref()) }; Cow::from(s) } else { Cow::from(z.deref().to_owned()) } ) } } #[cfg(not(test))]// Bugs in drop might cause stack overflow in suprising places. // The tests below should catch said bugs. impl Drop for Nbstr { fn drop(&mut self) { let _ = take_box(self); } } ////////////////////////////////////////////////////////////////////// // Tests that need private access or tests code that use cfg!(test) // ////////////////////////////////////////////////////////////////////// #[cfg(test)] mod tests { use nbstr::{Nbstr, MAX_STACK}; use super::*; use std::ops::Deref; use std::str as Str; use std::{mem,slice}; const STR: &'static str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; #[test] fn literal() { let mut z = Nbstr::from(STR); assert_eq!(z.deref().len(), STR.len()); assert_eq!(z.deref().as_ptr(), STR.as_ptr()); assert_eq!(z.deref(), STR); assert_eq!(take_box(&mut z), None); } #[test
if z.variant() == BOX { // I asked on #rust, and transmuting from & to mut is apparently undefined behaviour. // Is it really in this case? let s: *mut str = unsafe{ mem::transmute(z.get_slice()) }; // Cannot just assign default; then rust tries to drop the previous value! // .. which then calls this function. mem::forget(mem::replace(z, Nbstr::default())); Some(unsafe{ Box::from_raw(s) }) } else { None } }
identifier_body
shared.rs
use std::{mem,slice,ptr, fmt,hash}; use std::borrow::{Borrow,Cow}; /// Protected methods used by the impls below. pub trait Protected { /// create new of this variant with possibly uninitialized data fn new(u8) -> Self; /// store this str, which is either &'static or boxed fn with_pointer(u8, &str) -> Self; fn variant(&self) -> u8; /// get the area of self where (length,pointer)|inline is. fn data(&mut self) -> &mut [u8]; /// the root of AsRef,Borrow and Deref. fn get_slice(&self) -> &[u8]; } //////////////////// // public methods // //////////////////// impl Nbstr { #[cfg(feature="unstable")] /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. pub const MAX_LENGTH: usize = MAX_LENGTH; /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. /// /// This method will get deprecated once associated consts become stable. pub fn max_length() -> usize { MAX_LENGTH } // keeping all public methods under one impl gives cleaner rustdoc /// Create a Nbstr from a borrowed str with a limited lifetime. /// If the str is short enough it will be stored the inside struct itself and not boxed. pub fn from_str(s: &str) -> Self { Self::try_stack(s).unwrap_or_else(|| s.to_owned().into() ) } } //////////////// //Constructors// //////////////// impl Default for Nbstr { fn default() -> Self { let s = unsafe{ slice::from_raw_parts(1 as *const u8, 0) };//pointer must be nonzero Self::with_pointer(LITERAL, unsafe{ mem::transmute(s) }) } } impl From<&'static str> for Nbstr { fn f
s: &'static str) -> Self { Self::with_pointer(LITERAL, s) } } impl Nbstr { fn try_stack(s: &str) -> Option<Self> {match s.len() as u8 { // Cannot have stack str with length 0, as variant might be NonZero 0 => Some(Self::default()), 1...MAX_STACK => { let mut z = Self::new(s.len() as u8); for (d, s) in z.data().iter_mut().zip(s.bytes()) { *d = s; } Some(z) }, _ => None, }} } impl From<Box<str>> for Nbstr { fn from(s: Box<str>) -> Self { // Don't try stack; users might turn it back into a box later let z = if s.is_empty() {Self::default()}// Make it clear we don't own any memory. else {Self::with_pointer(BOX, &s)}; mem::forget(s); return z; } } impl From<String> for Nbstr { fn from(s: String) -> Self { if s.capacity() != s.len() {// into_boxed will reallocate if let Some(inline) = Self::try_stack(&s) { return inline;// and drop s } } return Self::from(s.into_boxed_str()); } } impl From<Cow<'static, str>> for Nbstr { fn from(cow: Cow<'static, str>) -> Self {match cow { Cow::Owned(owned) => Self::from(owned), Cow::Borrowed(borrowed) => Self::from(borrowed), }} } impl Clone for Nbstr { fn clone(&self) -> Self { if self.variant() == BOX {// try stack Nbstr::from_str(self.deref()) } else {// copy the un-copyable unsafe{ ptr::read(self) } } } fn clone_from(&mut self, from: &Self) { // keep existing box if possible if self.variant() == BOX && self.len() == from.len() { unsafe{ ptr::copy_nonoverlapping( from.as_ptr(), mem::transmute(self.as_ptr()), self.len() )}; } else { *self = from.clone(); } } } /////////// //Getters// /////////// impl AsRef<[u8]> for Nbstr { fn as_ref(&self) -> &[u8] { self.get_slice() } } impl AsRef<str> for Nbstr { fn as_ref(&self) -> &str { let bytes: &[u8] = self.as_ref(); unsafe{ Str::from_utf8_unchecked( bytes )} } } impl Deref for Nbstr { type Target = str; fn deref(&self) -> &Self::Target { self.as_ref() } } impl Borrow<[u8]> for Nbstr { fn borrow(&self) -> &[u8] { self.as_ref() } } impl Borrow<str> for Nbstr { fn borrow(&self) -> &str { self.as_ref() } } ///////////////// //Common traits// ///////////////// impl hash::Hash for Nbstr { fn hash<H:hash::Hasher>(&self, h: &mut H) { self.deref().hash(h); } } impl fmt::Display for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self.deref(), fmtr) } } impl PartialOrd for Nbstr { fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { self.deref().partial_cmp(rhs.deref()) } } impl Ord for Nbstr { fn cmp(&self, rhs: &Self) -> Ordering { self.deref().cmp(rhs.deref()) } } impl PartialEq for Nbstr { fn eq(&self, rhs: &Self) -> bool { self.deref() == rhs.deref() } } impl Eq for Nbstr {} /// Displays how the string is stored by prepending "stack: ", "literal: " or "boxed: ". impl fmt::Debug for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { write!(fmtr, "{}: {}", match self.variant() { 1...MAX_STACK => "stack", LITERAL => "literal", BOX => "boxed", _ => unreachable!("Unknown variant of Nbstr: {}", self.variant()) }, self.deref()) } } /////////////// //Destructors// /////////////// /// Returns Some if z contains a Box pub fn take_box(z: &mut Nbstr) -> Option<Box<str>> { if z.variant() == BOX { // I asked on #rust, and transmuting from & to mut is apparently undefined behaviour. // Is it really in this case? let s: *mut str = unsafe{ mem::transmute(z.get_slice()) }; // Cannot just assign default; then rust tries to drop the previous value! // .. which then calls this function. mem::forget(mem::replace(z, Nbstr::default())); Some(unsafe{ Box::from_raw(s) }) } else { None } } impl From<Nbstr> for Box<str> { fn from(mut z: Nbstr) -> Box<str> { take_box(&mut z).unwrap_or_else(|| z.deref().to_owned().into_boxed_str() ) } } impl From<Nbstr> for String { fn from(mut z: Nbstr) -> String { take_box(&mut z) .map(|b| b.into_string() ) .unwrap_or_else(|| z.deref().to_owned() ) } } impl From<Nbstr> for Cow<'static, str> { fn from(mut z: Nbstr) -> Cow<'static, str> { take_box(&mut z) .map(|b| Cow::from(b.into_string()) ) .unwrap_or_else(|| if z.variant() == LITERAL { let s: &'static str = unsafe{ mem::transmute(z.deref()) }; Cow::from(s) } else { Cow::from(z.deref().to_owned()) } ) } } #[cfg(not(test))]// Bugs in drop might cause stack overflow in suprising places. // The tests below should catch said bugs. impl Drop for Nbstr { fn drop(&mut self) { let _ = take_box(self); } } ////////////////////////////////////////////////////////////////////// // Tests that need private access or tests code that use cfg!(test) // ////////////////////////////////////////////////////////////////////// #[cfg(test)] mod tests { use nbstr::{Nbstr, MAX_STACK}; use super::*; use std::ops::Deref; use std::str as Str; use std::{mem,slice}; const STR: &'static str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; #[test] fn literal() { let mut z = Nbstr::from(STR); assert_eq!(z.deref().len(), STR.len()); assert_eq!(z.deref().as_ptr(), STR.as_ptr()); assert_eq!(z.deref(), STR); assert_eq!(take_box(&mut z), None); } #[test
rom(
identifier_name
packfile.rs
{ entry.encode_to(&mut buf)?; } // footer buf.extend_from_slice(&sha1::Sha1::digest(&buf[..])); original_buf.unsplit(buf); Ok(()) } } #[derive(Debug)] pub struct Commit<'a> { pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray // pub parent: [u8; 20], pub author: CommitUserInfo<'a>, pub committer: CommitUserInfo<'a>, // pub gpgsig: &str, pub message: &'a str, } impl Commit<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { let mut tree_hex = [0_u8; 20 * 2]; hex::encode_to_slice(self.tree, &mut tree_hex)?; out.write_str("tree ")?; out.extend_from_slice(&tree_hex); out.write_char('\n')?; writeln!(out, "author {}", self.author.encode())?; writeln!(out, "committer {}", self.committer.encode())?; write!(out, "\n{}", self.message)?; Ok(()) } #[must_use] pub fn size(&self) -> usize { let mut len = 0; len += "tree ".len() + (self.tree.len() * 2) + "\n".len(); len += "author ".len() + self.author.size() + "\n".len(); len += "committer ".len() + self.committer.size() + "\n".len(); len += "\n".len() + self.message.len(); len } } #[derive(Copy, Clone, Debug)] pub struct CommitUserInfo<'a> { pub name: &'a str, pub email: &'a str, pub time: chrono::DateTime<chrono::Utc>, } impl CommitUserInfo<'_> { fn encode(&self) -> String { // TODO: remove `format!`, `format_args!`? format!( "{} <{}> {} +0000", self.name, self.email, self.time.timestamp() ) } #[must_use] pub fn size(&self) -> usize { let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len(); self.name.len() + "< ".len() + self.email.len() + "> ".len() + timestamp_len
} #[derive(Debug)] pub enum TreeItemKind { File, Directory, } impl TreeItemKind { #[must_use] pub const fn mode(&self) -> &'static str { match self { Self::File => "100644", Self::Directory => "40000", } } } #[derive(Debug)] pub struct TreeItem<'a> { pub kind: TreeItemKind, pub name: &'a str, pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays } // `[mode] [name]\0[hash]` impl TreeItem<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { out.write_str(self.kind.mode())?; write!(out, " {}\0", self.name)?; out.extend_from_slice(&self.hash); Ok(()) } #[must_use] pub fn size(&self) -> usize { self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len() } } #[derive(Debug)] pub enum PackFileEntry<'a> { // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc // commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c // parent c2a862612a14346ae95234f26efae1ee69b5b7a9 // author Jordan Doyle <[email protected]> 1630244577 +0100 // committer Jordan Doyle <[email protected]> 1630244577 +0100 // gpgsig -----BEGIN PGP SIGNATURE----- // // iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt // xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2 // tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ // tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6 // omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX // fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E // UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO // FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G // hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG // QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE // Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf // bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y= // =fXoH // -----END PGP SIGNATURE----- // // test Commit(Commit<'a>), // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc // tree 20940000 .cargo���CYy��Ve�������100644 .gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/�� // kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut� Tree(Vec<TreeItem<'a>>), // jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e
+ " +0000".len() }
random_line_split
packfile.rs
a013f96a3550374e3 | gzip -dc // commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c // parent c2a862612a14346ae95234f26efae1ee69b5b7a9 // author Jordan Doyle <[email protected]> 1630244577 +0100 // committer Jordan Doyle <[email protected]> 1630244577 +0100 // gpgsig -----BEGIN PGP SIGNATURE----- // // iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt // xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2 // tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ // tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6 // omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX // fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E // UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO // FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G // hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG // QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE // Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf // bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y= // =fXoH // -----END PGP SIGNATURE----- // // test Commit(Commit<'a>), // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc // tree 20940000 .cargo���CYy��Ve�������100644 .gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/�� // kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut� Tree(Vec<TreeItem<'a>>), // jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3| gzip -dc // blob 23try and find me in .git Blob(&'a [u8]), // Tag, // OfsDelta, // RefDelta, } impl PackFileEntry<'_> { fn write_header(&self, buf: &mut BytesMut) { let mut size = self.uncompressed_size(); // write header { let mut val = 0b1000_0000_u8; val |= match self { Self::Commit(_) => 0b001, Self::Tree(_) => 0b010, Self::Blob(_) => 0b011, // Self::Tag => 0b100, // Self::OfsDelta => 0b110, // Self::RefDelta => 0b111, } << 4; // pack the 4 LSBs of the size into the header #[allow(clippy::cast_possible_truncation)] // value is masked { val |= (size & 0b1111) as u8; } size >>= 4; buf.put_u8(val); } // write size bytes while size != 0 { // read 7 LSBs from the `size` and push them off for the next iteration #[allow(clippy::cast_possible_truncation)] // value is masked let mut val = (size & 0b111_1111) as u8; size >>= 7; if size != 0 { // MSB set to 1 implies there's more size bytes to come, otherwise // the data starts after this byte val |= 1 << 7; } buf.put_u8(val); } } pub fn encode_to(&self, original_out: &mut BytesMut) -> Result<(), anyhow::Error> { self.write_header(original_out); // TODO: this needs space reserving for it // todo is there a way to stream through the zlibencoder so we don't have to // have this intermediate bytesmut and vec? let mut out = BytesMut::new(); let size = self.uncompressed_size(); original_out.reserve(size); // the data ends up getting compressed but we'll need at least this many bytes out.reserve(size); match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => { for item in items { item.encode_to(&mut out)?; } } Self::Blob(data) => { out.extend_from_slice(data); } } debug_assert_eq!(out.len(), size); let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(&out)?; let compressed_data = e.finish()?; original_out.extend_from_slice(&compressed_data); Ok(()) } #[must_use] pub fn uncompressed_size(&self) -> usize { match self { Self::Commit(commit) => commit.size(), Self::Tree(items) => items.iter().map(TreeItem::size).sum(), Self::Blob(data) => data.len(), } } // wen const generics for RustCrypto? :-( pub fn hash( &self, ) -> Result<GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, anyhow::Error> { let size = self.uncompressed_size(); let file_prefix = match self { Self::Commit(_) => "commit", Self::Tree(_) => "tree", Self::Blob(_) => "blob", }; let size_len = itoa::Buffer::new().format(size).len(); let mut out = BytesMut::with_capacity(file_prefix.len() + " ".len() + size_len + "\n".len() + size); write!(out, "{} {}\0", file_prefix, size)?; match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => {
for item in items { item.encod
conditional_block
packfile.rs
pub fn encode_to(&self, original_buf: &mut BytesMut) -> Result<(), anyhow::Error> { let mut buf = original_buf.split_off(original_buf.len()); buf.reserve(Self::header_size() + Self::footer_size()); // header buf.extend_from_slice(b"PACK"); // magic header buf.put_u32(2); // version buf.put_u32(self.entries.len().try_into()?); // number of entries in the packfile // body for entry in &self.entries { entry.encode_to(&mut buf)?; } // footer buf.extend_from_slice(&sha1::Sha1::digest(&buf[..])); original_buf.unsplit(buf); Ok(()) } } #[derive(Debug)] pub struct Commit<'a> { pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray // pub parent: [u8; 20], pub author: CommitUserInfo<'a>, pub committer: CommitUserInfo<'a>, // pub gpgsig: &str, pub message: &'a str, } impl Commit<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { let mut tree_hex = [0_u8; 20 * 2]; hex::encode_to_slice(self.tree, &mut tree_hex)?; out.write_str("tree ")?; out.extend_from_slice(&tree_hex); out.write_char('\n')?; writeln!(out, "author {}", self.author.encode())?; writeln!(out, "committer {}", self.committer.encode())?; write!(out, "\n{}", self.message)?; Ok(()) } #[must_use] pub fn size(&self) -> usize { let mut len = 0; len += "tree ".len() + (self.tree.len() * 2) + "\n".len(); len += "author ".len() + self.author.size() + "\n".len(); len += "committer ".len() + self.committer.size() + "\n".len(); len += "\n".len() + self.message.len(); len } } #[derive(Copy, Clone, Debug)] pub struct CommitUserInfo<'a> { pub name: &'a str, pub email: &'a str, pub time: chrono::DateTime<chrono::Utc>, } impl CommitUserInfo<'_> { fn encode(&self) -> String { // TODO: remove `format!`, `format_args!`? format!( "{} <{}> {} +0000", self.name, self.email, self.time.timestamp() ) } #[must_use] pub fn size(&self) -> usize { let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len(); self.name.len() + "< ".len() + self.email.len() + "> ".len() + timestamp_len + " +0000".len() } } #[derive(Debug)] pub enum TreeItemKind { File, Directory, } impl TreeItemKind { #[must_use] pub const fn mode(&self) -> &'static str { match self { Self::File => "100644", Self::Directory => "40000", } } } #[derive(Debug)] pub struct TreeItem<'a> { pub kind: TreeItemKind, pub name: &'a str, pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays } // `[mode] [name]\0[hash]` impl TreeItem<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { out.write_str(self.kind.mode())?; write!(out, " {}\0", self.name)?; out.extend_from_slice(&self.hash); Ok(()) } #[must_use] pub fn size(&self) -> usize { self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len() } } #[derive(Debug)] pub enum PackFileEntry<'a> { // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc // commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c // parent c2a862612a14346ae95234f26efae1ee69b5b7a9 // author Jordan Doyle <[email protected]> 1630244577 +0100 // committer Jordan Doyle <[email protected]> 1630244577 +0100 // gpgsig -----BEGIN PGP SIGNATURE----- // // iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt // xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2 // tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ // tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6 // omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX // fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E // UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO // FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G // hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG // QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE // Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf // bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y= // =fXoH // -----END PGP SIGNATURE----- // // test Commit(Commit<'a>), // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc // tree 20940000 .cargo���CYy��Ve�������100644 .gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/�� // kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�
{ 20 }
identifier_body
packfile.rs
{ entry.encode_to(&mut buf)?; } // footer buf.extend_from_slice(&sha1::Sha1::digest(&buf[..])); original_buf.unsplit(buf); Ok(()) } } #[derive(Debug)] pub struct Commit<'a> { pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray // pub parent: [u8; 20], pub author: CommitUserInfo<'a>, pub committer: CommitUserInfo<'a>, // pub gpgsig: &str, pub message: &'a str, } impl Commit<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { let mut tree_hex = [0_u8; 20 * 2]; hex::encode_to_slice(self.tree, &mut tree_hex)?; out.write_str("tree ")?; out.extend_from_slice(&tree_hex); out.write_char('\n')?; writeln!(out, "author {}", self.author.encode())?; writeln!(out, "committer {}", self.committer.encode())?; write!(out, "\n{}", self.message)?; Ok(()) } #[must_use] pub fn size(&self) -> usize { let mut len = 0; len += "tree ".len() + (self.tree.len() * 2) + "\n".len(); len += "author ".len() + self.author.size() + "\n".len(); len += "committer ".len() + self.committer.size() + "\n".len(); len += "\n".len() + self.message.len(); len } } #[derive(Copy, Clone, Debug)] pub struct CommitUserInfo<'a> { pub name: &'a str, pub email: &'a str, pub time: chrono::DateTime<chrono::Utc>, } impl CommitUserInfo<'_> { fn encode(&self) -> String { // TODO: remove `format!`, `format_args!`? format!( "{} <{}> {} +0000", self.name, self.email, self.time.timestamp() ) } #[must_use] pub fn size(&self) -> usize { let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len(); self.name.len() + "< ".len() + self.email.len() + "> ".len() + timestamp_len + " +0000".len() } } #[derive(Debug)] pub enum TreeItemKind { File, Directory, } impl TreeItemKind { #[must_use] pub const fn mode(&self) -> &'static str { match self { Self::File => "100644", Self::Directory => "40000", } } } #[derive(Debug)] pub struct TreeItem<'a> { pub kind: TreeItemKind, pub name: &'a str, pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays } // `[mode] [name]\0[hash]` impl TreeItem<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { out.write_str(self.kind.mode())?; write!(out, " {}\0", self.name)?; out.extend_from_slice(&self.hash); Ok(()) } #[must_use] pub fn
(&self) -> usize { self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len() } } #[derive(Debug)] pub enum PackFileEntry<'a> { // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc // commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c // parent c2a862612a14346ae95234f26efae1ee69b5b7a9 // author Jordan Doyle <[email protected]> 1630244577 +0100 // committer Jordan Doyle <[email protected]> 1630244577 +0100 // gpgsig -----BEGIN PGP SIGNATURE----- // // iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt // xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2 // tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ // tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6 // omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX // fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E // UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO // FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G // hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG // QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE // Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf // bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y= // =fXoH // -----END PGP SIGNATURE----- // // test Commit(Commit<'a>), // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc // tree 20940000 .cargo���CYy��Ve�������100644 .gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/�� // kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut� Tree(Vec<TreeItem<'a>>), // jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a355037
size
identifier_name
poll.rs
file descriptor is ready. /// /// [`read`]: tcp/struct.TcpStream.html#method.read /// [`register`]: #method.register /// [`reregister`]: #method.reregister /// /// # Examples /// /// A basic example -- establishing a `TcpStream` connection. /// /// ```no_run /// # extern crate mio; /// # extern crate mio_pool; /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Events, Poll, Token}; /// use mio::net::TcpStream; /// /// use std::net::{TcpListener, SocketAddr}; /// /// // Bind a server socket to connect to. /// let addr: SocketAddr = "127.0.0.1:0".parse()?; /// let server = TcpListener::bind(&addr)?; /// /// // Construct a new `Poll` handle as well as the `Events` we'll store into /// let poll = Poll::new()?; /// let mut events = Events::with_capacity(1024); /// /// // Connect the stream /// let stream = TcpStream::connect(&server.local_addr()?)?; /// /// // Register the stream with `Poll` /// poll.register(&stream, Token(0))?; /// /// // Wait for the socket to become ready. This has to happens in a loop to /// // handle spurious wakeups. /// loop { /// poll.poll(&mut events, None)?; /// /// for Token(t) in &events { /// if t == 0 { /// // The socket connected (probably; it could be a spurious wakeup) /// return Ok(()); /// } /// } /// } /// # Ok(()) /// # } /// #
/// # } /// ``` /// /// # Exclusive access /// /// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file /// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued /// for a given descriptor, not more events will be issued for that descriptor until it has been /// re-registered using [`reregister`]. pub struct Poll(RawFd); /// Associates an event with a file descriptor. /// /// `Token` is a wrapper around `usize`, and is used as an argument to /// [`Poll::register`] and [`Poll::reregister`]. /// /// See [`Poll`] for more documentation on polling. You will likely want to use something like /// [`slab`] for creating and managing these. /// /// [`Poll`]: struct.Poll.html /// [`Poll::register`]: struct.Poll.html#method.register /// [`Poll::reregister`]: struct.Poll.html#method.reregister /// [`slab`]: https://crates.io/crates/slab pub struct Token(pub usize); /// A collection of readiness events. /// /// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received /// since the last poll. Usually, a single `Events` instance is created at the same time as a /// [`Poll`] and reused on each call to [`Poll::poll`]. /// /// See [`Poll`] for more documentation on polling. /// /// [`Poll::poll`]: struct.Poll.html#method.poll /// [`Poll`]: struct.Poll.html pub struct Events { all: Vec<epoll::EpollEvent>, /// How many of the events in `.all` are filled with responses to the last `poll()`? current: usize, } impl Events { /// Return a new `Events` capable of holding up to `capacity` events. pub fn with_capacity(capacity: usize) -> Events { let mut events = Vec::new(); events.resize(capacity, epoll::EpollEvent::empty()); Events { all: events, current: 0, } } } fn nix_to_io_err(e: nix::Error) -> io::Error { match e { nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32), nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e), } } impl Poll { /// Return a new `Poll` handle. /// /// This function will make a syscall to the operating system to create the system selector. If /// this syscall fails, `Poll::new` will return with the error. /// /// See [struct] level docs for more details. /// /// [struct]: struct.Poll.html /// /// # Examples /// /// ``` /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Poll, Events}; /// use std::time::Duration; /// /// let poll = match Poll::new() { /// Ok(poll) => poll, /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), /// }; /// /// // Create a structure to receive polled events /// let mut events = Events::with_capacity(1024); /// /// // Wait for events, but none will be received because no `Evented` /// // handles have been registered with this `Poll` instance. /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?; /// assert_eq!(n, 0); /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` pub fn new() -> io::Result<Self> { epoll::epoll_create1(epoll::EpollCreateFlags::empty()) .map(Poll) .map_err(nix_to_io_err) } fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> { let mut event = epoll::EpollEvent::new( epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT, t.0 as u64, ); epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err) } /// Register a file descriptor with this `Poll` instance. /// /// Once registered, the `Poll` instance monitors the given descriptor for readiness state /// changes. When it notices a state change, it will return a readiness event for the handle /// the next time [`poll`] is called. /// /// See the [`struct`] docs for a high level overview. /// /// `token` is user-defined value that is associated with the given `file`. When [`poll`] /// returns an event for `file`, this token is included. This allows the caller to map the /// event back to its descriptor. The token associated with a file descriptor can be changed at /// any time by calling [`reregister`]. pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlAdd) } /// Re-register a file descriptor with this `Poll` instance. /// /// When you re-register a file descriptor, you can change the details of the registration. /// Specifically, you can update the `token` specified in previous `register` and `reregister` /// calls. /// /// See the [`register`] documentation for details about the function /// arguments and see the [`struct`] docs for a high level overview of /// polling. /// /// [`struct`]: # /// [`register`]: #method.register pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlMod) } /// Deregister a file descriptor from this `Poll` instance. /// /// When you deregister a file descriptor, it will no longer be modified for readiness events, /// and it will no longer produce events from `poll`. pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> { epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None) .map_err(nix_to_io_err) } /// Wait for events on file descriptors associated with this `Poll` instance. /// /// Blocks the current thread and waits for events for any of the file descriptors that are /// registered with this `Poll` instance. The function blocks until either at least one /// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means /// that `poll` blocks until a readiness event has been received. /// /// The supplied `events` will be cleared and newly received readiness events will be pushed /// onto the end. At
/// # fn main() { /// # try_main().unwrap();
random_line_split
poll.rs
wrapper around `usize`, and is used as an argument to /// [`Poll::register`] and [`Poll::reregister`]. /// /// See [`Poll`] for more documentation on polling. You will likely want to use something like /// [`slab`] for creating and managing these. /// /// [`Poll`]: struct.Poll.html /// [`Poll::register`]: struct.Poll.html#method.register /// [`Poll::reregister`]: struct.Poll.html#method.reregister /// [`slab`]: https://crates.io/crates/slab pub struct Token(pub usize); /// A collection of readiness events. /// /// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received /// since the last poll. Usually, a single `Events` instance is created at the same time as a /// [`Poll`] and reused on each call to [`Poll::poll`]. /// /// See [`Poll`] for more documentation on polling. /// /// [`Poll::poll`]: struct.Poll.html#method.poll /// [`Poll`]: struct.Poll.html pub struct Events { all: Vec<epoll::EpollEvent>, /// How many of the events in `.all` are filled with responses to the last `poll()`? current: usize, } impl Events { /// Return a new `Events` capable of holding up to `capacity` events. pub fn with_capacity(capacity: usize) -> Events { let mut events = Vec::new(); events.resize(capacity, epoll::EpollEvent::empty()); Events { all: events, current: 0, } } } fn nix_to_io_err(e: nix::Error) -> io::Error { match e { nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32), nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e), } } impl Poll { /// Return a new `Poll` handle. /// /// This function will make a syscall to the operating system to create the system selector. If /// this syscall fails, `Poll::new` will return with the error. /// /// See [struct] level docs for more details. /// /// [struct]: struct.Poll.html /// /// # Examples /// /// ``` /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Poll, Events}; /// use std::time::Duration; /// /// let poll = match Poll::new() { /// Ok(poll) => poll, /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), /// }; /// /// // Create a structure to receive polled events /// let mut events = Events::with_capacity(1024); /// /// // Wait for events, but none will be received because no `Evented` /// // handles have been registered with this `Poll` instance. /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?; /// assert_eq!(n, 0); /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` pub fn new() -> io::Result<Self> { epoll::epoll_create1(epoll::EpollCreateFlags::empty()) .map(Poll) .map_err(nix_to_io_err) } fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> { let mut event = epoll::EpollEvent::new( epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT, t.0 as u64, ); epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err) } /// Register a file descriptor with this `Poll` instance. /// /// Once registered, the `Poll` instance monitors the given descriptor for readiness state /// changes. When it notices a state change, it will return a readiness event for the handle /// the next time [`poll`] is called. /// /// See the [`struct`] docs for a high level overview. /// /// `token` is user-defined value that is associated with the given `file`. When [`poll`] /// returns an event for `file`, this token is included. This allows the caller to map the /// event back to its descriptor. The token associated with a file descriptor can be changed at /// any time by calling [`reregister`]. pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlAdd) } /// Re-register a file descriptor with this `Poll` instance. /// /// When you re-register a file descriptor, you can change the details of the registration. /// Specifically, you can update the `token` specified in previous `register` and `reregister` /// calls. /// /// See the [`register`] documentation for details about the function /// arguments and see the [`struct`] docs for a high level overview of /// polling. /// /// [`struct`]: # /// [`register`]: #method.register pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlMod) } /// Deregister a file descriptor from this `Poll` instance. /// /// When you deregister a file descriptor, it will no longer be modified for readiness events, /// and it will no longer produce events from `poll`. pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> { epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None) .map_err(nix_to_io_err) } /// Wait for events on file descriptors associated with this `Poll` instance. /// /// Blocks the current thread and waits for events for any of the file descriptors that are /// registered with this `Poll` instance. The function blocks until either at least one /// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means /// that `poll` blocks until a readiness event has been received. /// /// The supplied `events` will be cleared and newly received readiness events will be pushed /// onto the end. At most `events.capacity()` events will be returned. If there are further /// pending readiness events, they are returned on the next call to `poll`. /// /// Note that once an event has been issued for a given `token` (or rather, for the token's /// file descriptor), no further events will be issued for that descriptor until it has been /// re-registered. Note also that the `timeout` is rounded up to the system clock granularity /// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun /// by a small amount. /// /// `poll` returns the number of events that have been pushed into `events`, or `Err` when an /// error has been encountered with the system selector. /// /// See the [struct] level documentation for a higher level discussion of polling. /// /// [struct]: # pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> { let timeout = match timeout { None => -1, Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize, }; events.current = epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?; Ok(events.current) } } /// [`Events`] iterator. /// /// This struct is created by the `into_iter` method on [`Events`]. /// /// [`Events`]: struct.Events.html pub struct EventsIterator<'a> { events: &'a Events, at: usize, } impl<'a> IntoIterator for &'a Events { type IntoIter = EventsIterator<'a>; type Item = Token; fn into_iter(self) -> Self::IntoIter { EventsIterator { events: self, at: 0, } } } impl<'a> Iterator for EventsIterator<'a> { type Item = Token; fn next(&mut self) -> Option<Self::Item> { let at = &mut self.at; if *at >= self.events.current
{ // events beyond .1 are old return None; }
conditional_block
poll.rs
/// } /// } /// } /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` /// /// # Exclusive access /// /// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file /// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued /// for a given descriptor, not more events will be issued for that descriptor until it has been /// re-registered using [`reregister`]. pub struct Poll(RawFd); /// Associates an event with a file descriptor. /// /// `Token` is a wrapper around `usize`, and is used as an argument to /// [`Poll::register`] and [`Poll::reregister`]. /// /// See [`Poll`] for more documentation on polling. You will likely want to use something like /// [`slab`] for creating and managing these. /// /// [`Poll`]: struct.Poll.html /// [`Poll::register`]: struct.Poll.html#method.register /// [`Poll::reregister`]: struct.Poll.html#method.reregister /// [`slab`]: https://crates.io/crates/slab pub struct Token(pub usize); /// A collection of readiness events. /// /// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received /// since the last poll. Usually, a single `Events` instance is created at the same time as a /// [`Poll`] and reused on each call to [`Poll::poll`]. /// /// See [`Poll`] for more documentation on polling. /// /// [`Poll::poll`]: struct.Poll.html#method.poll /// [`Poll`]: struct.Poll.html pub struct Events { all: Vec<epoll::EpollEvent>, /// How many of the events in `.all` are filled with responses to the last `poll()`? current: usize, } impl Events { /// Return a new `Events` capable of holding up to `capacity` events. pub fn with_capacity(capacity: usize) -> Events { let mut events = Vec::new(); events.resize(capacity, epoll::EpollEvent::empty()); Events { all: events, current: 0, } } } fn nix_to_io_err(e: nix::Error) -> io::Error { match e { nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32), nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e), } } impl Poll { /// Return a new `Poll` handle. /// /// This function will make a syscall to the operating system to create the system selector. If /// this syscall fails, `Poll::new` will return with the error. /// /// See [struct] level docs for more details. /// /// [struct]: struct.Poll.html /// /// # Examples /// /// ``` /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Poll, Events}; /// use std::time::Duration; /// /// let poll = match Poll::new() { /// Ok(poll) => poll, /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), /// }; /// /// // Create a structure to receive polled events /// let mut events = Events::with_capacity(1024); /// /// // Wait for events, but none will be received because no `Evented` /// // handles have been registered with this `Poll` instance. /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?; /// assert_eq!(n, 0); /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` pub fn new() -> io::Result<Self> { epoll::epoll_create1(epoll::EpollCreateFlags::empty()) .map(Poll) .map_err(nix_to_io_err) } fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> { let mut event = epoll::EpollEvent::new( epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT, t.0 as u64, ); epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err) } /// Register a file descriptor with this `Poll` instance. /// /// Once registered, the `Poll` instance monitors the given descriptor for readiness state /// changes. When it notices a state change, it will return a readiness event for the handle /// the next time [`poll`] is called. /// /// See the [`struct`] docs for a high level overview. /// /// `token` is user-defined value that is associated with the given `file`. When [`poll`] /// returns an event for `file`, this token is included. This allows the caller to map the /// event back to its descriptor. The token associated with a file descriptor can be changed at /// any time by calling [`reregister`]. pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlAdd) } /// Re-register a file descriptor with this `Poll` instance. /// /// When you re-register a file descriptor, you can change the details of the registration. /// Specifically, you can update the `token` specified in previous `register` and `reregister` /// calls. /// /// See the [`register`] documentation for details about the function /// arguments and see the [`struct`] docs for a high level overview of /// polling. /// /// [`struct`]: # /// [`register`]: #method.register pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlMod) } /// Deregister a file descriptor from this `Poll` instance. /// /// When you deregister a file descriptor, it will no longer be modified for readiness events, /// and it will no longer produce events from `poll`. pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> { epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None) .map_err(nix_to_io_err) } /// Wait for events on file descriptors associated with this `Poll` instance. /// /// Blocks the current thread and waits for events for any of the file descriptors that are /// registered with this `Poll` instance. The function blocks until either at least one /// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means /// that `poll` blocks until a readiness event has been received. /// /// The supplied `events` will be cleared and newly received readiness events will be pushed /// onto the end. At most `events.capacity()` events will be returned. If there are further /// pending readiness events, they are returned on the next call to `poll`. /// /// Note that once an event has been issued for a given `token` (or rather, for the token's /// file descriptor), no further events will be issued for that descriptor until it has been /// re-registered. Note also that the `timeout` is rounded up to the system clock granularity /// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun /// by a small amount. /// /// `poll` returns the number of events that have been pushed into `events`, or `Err` when an /// error has been encountered with the system selector. /// /// See the [struct] level documentation for a higher level discussion of polling. /// /// [struct]: # pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> { let timeout = match timeout { None => -1, Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize, }; events.current = epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?; Ok(events.current) } } /// [`Events`] iterator. /// /// This struct is created by the `into_iter` method on [`Events`]. /// /// [`Events`]: struct.Events.html pub struct
EventsIterator
identifier_name
poll.rs
Poll`] for more documentation on polling. You will likely want to use something like /// [`slab`] for creating and managing these. /// /// [`Poll`]: struct.Poll.html /// [`Poll::register`]: struct.Poll.html#method.register /// [`Poll::reregister`]: struct.Poll.html#method.reregister /// [`slab`]: https://crates.io/crates/slab pub struct Token(pub usize); /// A collection of readiness events. /// /// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received /// since the last poll. Usually, a single `Events` instance is created at the same time as a /// [`Poll`] and reused on each call to [`Poll::poll`]. /// /// See [`Poll`] for more documentation on polling. /// /// [`Poll::poll`]: struct.Poll.html#method.poll /// [`Poll`]: struct.Poll.html pub struct Events { all: Vec<epoll::EpollEvent>, /// How many of the events in `.all` are filled with responses to the last `poll()`? current: usize, } impl Events { /// Return a new `Events` capable of holding up to `capacity` events. pub fn with_capacity(capacity: usize) -> Events { let mut events = Vec::new(); events.resize(capacity, epoll::EpollEvent::empty()); Events { all: events, current: 0, } } } fn nix_to_io_err(e: nix::Error) -> io::Error { match e { nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32), nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e), } } impl Poll { /// Return a new `Poll` handle. /// /// This function will make a syscall to the operating system to create the system selector. If /// this syscall fails, `Poll::new` will return with the error. /// /// See [struct] level docs for more details. /// /// [struct]: struct.Poll.html /// /// # Examples /// /// ``` /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Poll, Events}; /// use std::time::Duration; /// /// let poll = match Poll::new() { /// Ok(poll) => poll, /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), /// }; /// /// // Create a structure to receive polled events /// let mut events = Events::with_capacity(1024); /// /// // Wait for events, but none will be received because no `Evented` /// // handles have been registered with this `Poll` instance. /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?; /// assert_eq!(n, 0); /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` pub fn new() -> io::Result<Self> { epoll::epoll_create1(epoll::EpollCreateFlags::empty()) .map(Poll) .map_err(nix_to_io_err) } fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> { let mut event = epoll::EpollEvent::new( epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT, t.0 as u64, ); epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err) } /// Register a file descriptor with this `Poll` instance. /// /// Once registered, the `Poll` instance monitors the given descriptor for readiness state /// changes. When it notices a state change, it will return a readiness event for the handle /// the next time [`poll`] is called. /// /// See the [`struct`] docs for a high level overview. /// /// `token` is user-defined value that is associated with the given `file`. When [`poll`] /// returns an event for `file`, this token is included. This allows the caller to map the /// event back to its descriptor. The token associated with a file descriptor can be changed at /// any time by calling [`reregister`]. pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlAdd) } /// Re-register a file descriptor with this `Poll` instance. /// /// When you re-register a file descriptor, you can change the details of the registration. /// Specifically, you can update the `token` specified in previous `register` and `reregister` /// calls. /// /// See the [`register`] documentation for details about the function /// arguments and see the [`struct`] docs for a high level overview of /// polling. /// /// [`struct`]: # /// [`register`]: #method.register pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlMod) } /// Deregister a file descriptor from this `Poll` instance. /// /// When you deregister a file descriptor, it will no longer be modified for readiness events, /// and it will no longer produce events from `poll`. pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> { epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None) .map_err(nix_to_io_err) } /// Wait for events on file descriptors associated with this `Poll` instance. /// /// Blocks the current thread and waits for events for any of the file descriptors that are /// registered with this `Poll` instance. The function blocks until either at least one /// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means /// that `poll` blocks until a readiness event has been received. /// /// The supplied `events` will be cleared and newly received readiness events will be pushed /// onto the end. At most `events.capacity()` events will be returned. If there are further /// pending readiness events, they are returned on the next call to `poll`. /// /// Note that once an event has been issued for a given `token` (or rather, for the token's /// file descriptor), no further events will be issued for that descriptor until it has been /// re-registered. Note also that the `timeout` is rounded up to the system clock granularity /// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun /// by a small amount. /// /// `poll` returns the number of events that have been pushed into `events`, or `Err` when an /// error has been encountered with the system selector. /// /// See the [struct] level documentation for a higher level discussion of polling. /// /// [struct]: # pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> { let timeout = match timeout { None => -1, Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize, }; events.current = epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?; Ok(events.current) } } /// [`Events`] iterator. /// /// This struct is created by the `into_iter` method on [`Events`]. /// /// [`Events`]: struct.Events.html pub struct EventsIterator<'a> { events: &'a Events, at: usize, } impl<'a> IntoIterator for &'a Events { type IntoIter = EventsIterator<'a>; type Item = Token; fn into_iter(self) -> Self::IntoIter { EventsIterator { events: self, at: 0, } } } impl<'a> Iterator for EventsIterator<'a> { type Item = Token; fn next(&mut self) -> Option<Self::Item>
{ let at = &mut self.at; if *at >= self.events.current { // events beyond .1 are old return None; } self.events.all.get(*at).map(|e| { *at += 1; Token(e.data() as usize) }) }
identifier_body