file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
laptop.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: laptop.proto package pc import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" timestamp "github.com/golang/protobuf/ptypes/timestamp" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type Laptop struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Brand string `protobuf:"bytes,2,opt,name=brand,proto3" json:"brand,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` Cpu *CPU `protobuf:"bytes,4,opt,name=cpu,proto3" json:"cpu,omitempty"` Ram *Memory `protobuf:"bytes,5,opt,name=ram,proto3" json:"ram,omitempty"` Gpus []*GPU `protobuf:"bytes,6,rep,name=gpus,proto3" json:"gpus,omitempty"` Storages []*Storage `protobuf:"bytes,7,rep,name=storages,proto3" json:"storages,omitempty"` Screen *Screen `protobuf:"bytes,8,opt,name=screen,proto3" json:"screen,omitempty"` Keyboard *Keyboard `protobuf:"bytes,9,opt,name=keyboard,proto3" json:"keyboard,omitempty"` // Types that are valid to be assigned to Weight: // *Laptop_WeightKg // *Laptop_WeightLb Weight isLaptop_Weight `protobuf_oneof:"weight"` PriceUsd float64 `protobuf:"fixed64,12,opt,name=price_usd,json=priceUsd,proto3" json:"price_usd,omitempty"` ReleaseYear uint32 `protobuf:"varint,13,opt,name=release_year,json=releaseYear,proto3" json:"release_year,omitempty"` UpdatedAt *timestamp.Timestamp `protobuf:"bytes,14,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Laptop) Reset() { *m = Laptop{} } func (m *Laptop) String() string { return proto.CompactTextString(m) } func (*Laptop) ProtoMessage() {} func (*Laptop) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{0} } func (m *Laptop) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Laptop.Unmarshal(m, b) } func (m *Laptop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Laptop.Marshal(b, m, deterministic) } func (m *Laptop) XXX_Merge(src proto.Message) { xxx_messageInfo_Laptop.Merge(m, src) } func (m *Laptop) XXX_Size() int { return xxx_messageInfo_Laptop.Size(m) } func (m *Laptop) XXX_DiscardUnknown() { xxx_messageInfo_Laptop.DiscardUnknown(m) } var xxx_messageInfo_Laptop proto.InternalMessageInfo func (m *Laptop) GetId() string { if m != nil { return m.ID } return "" } func (m *Laptop) GetBrand() string { if m != nil { return m.Brand } return "" } func (m *Laptop) GetName() string { if m != nil { return m.Name } return "" } func (m *Laptop) GetCpu() *CPU { if m != nil { return m.Cpu } return nil } func (m *Laptop) GetRam() *Memory { if m != nil { return m.Ram } return nil } func (m *Laptop) GetGpus() []*GPU { if m != nil { return m.Gpus } return nil } func (m *Laptop) GetStorages() []*Storage { if m != nil { return m.Storages } return nil } func (m *Laptop) GetScreen() *Screen { if m != nil { return m.Screen } return nil } func (m *Laptop) GetKeyboard() *Keyboard { if m != nil { return m.Keyboard } return nil } type isLaptop_Weight interface { isLaptop_Weight() } type Laptop_WeightKg struct { WeightKg float64 `protobuf:"fixed64,10,opt,name=weight_kg,json=weightKg,proto3,oneof"` } type Laptop_WeightLb struct { WeightLb float64 `protobuf:"fixed64,11,opt,name=weight_lb,json=weightLb,proto3,oneof"` } func (*Laptop_WeightKg) isLaptop_Weight() {} func (*Laptop_WeightLb) isLaptop_Weight() {} func (m *Laptop) GetWeight() isLaptop_Weight { if m != nil { return m.Weight } return nil } func (m *Laptop) GetWeightKg() float64 { if x, ok := m.GetWeight().(*Laptop_WeightKg); ok { return x.WeightKg } return 0 } func (m *Laptop) GetWeightLb() float64 { if x, ok := m.GetWeight().(*Laptop_WeightLb); ok { return x.WeightLb } return 0 } func (m *Laptop) GetPriceUsd() float64 { if m != nil { return m.PriceUsd } return 0 } func (m *Laptop) GetReleaseYear() uint32 { if m != nil { return m.ReleaseYear } return 0 } func (m *Laptop) GetUpdatedAt() *timestamp.Timestamp { if m != nil { return m.UpdatedAt } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Laptop) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Laptop_WeightKg)(nil), (*Laptop_WeightLb)(nil), } } type CreateLaptopRequest struct { Laptop *Laptop `protobuf:"bytes,1,opt,name=laptop,proto3" json:"laptop,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateLaptopRequest) Reset() { *m = CreateLaptopRequest{} } func (m *CreateLaptopRequest) String() string { return proto.CompactTextString(m) } func (*CreateLaptopRequest) ProtoMessage() {} func (*CreateLaptopRequest) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{1} } func (m *CreateLaptopRequest) XXX_Unmarshal(b []byte) error
func (m *CreateLaptopRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopRequest.Marshal(b, m, deterministic) } func (m *CreateLaptopRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopRequest.Merge(m, src) } func (m *CreateLaptopRequest) XXX_Size() int { return xxx_messageInfo_CreateLaptopRequest.Size(m) } func (m *CreateLaptopRequest) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopRequest.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopRequest proto.InternalMessageInfo func (m *CreateLaptopRequest) GetLaptop() *Laptop { if m != nil { return m.Laptop } return nil } type CreateLaptopResponse struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateLaptopResponse) Reset() { *m = CreateLaptopResponse{} } func (m *CreateLaptopResponse) String() string { return proto.CompactTextString(m) } func (*CreateLaptopResponse) ProtoMessage() {} func (*CreateLaptopResponse) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{2} } func (m *CreateLaptopResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateLaptopResponse.Unmarshal(m, b) } func (m *CreateLaptopResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopResponse.Marshal(b, m, deterministic) } func (m *CreateLaptopResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopResponse.Merge(m, src) } func (m *CreateLaptopResponse) XXX_Size() int { return xxx_messageInfo_CreateLaptopResponse.Size(m) } func (m *CreateLaptopResponse) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopResponse.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopResponse proto.InternalMessageInfo func (m *CreateLaptopResponse) GetId() string { if m != nil { return m.ID } return "" } func init() { proto.RegisterType((*Laptop)(nil), "pc.Laptop") proto.RegisterType((*CreateLaptopRequest)(nil), "pc.CreateLaptopRequest") proto.RegisterType((*CreateLaptopResponse)(nil), "pc.CreateLaptopResponse") } func init() { proto.RegisterFile("laptop.proto", fileDescriptor_28a7e4886f546705) } var fileDescriptor_28a7e4886f546705 = []byte{ // 459 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x6f, 0xd3, 0x30, 0x14, 0xc7, 0x49, 0xd3, 0x76, 0xe9, 0x6b, 0x5a, 0x24, 0x33, 0x09, 0xd3, 0x81, 0x08, 0x3d, 0x40, 0x4e, 0xa9, 0x34, 0x4e, 0x3b, 0x6e, 0x3b, 0x80, 0xb4, 0x21, 0x4d, 0x2e, 0x3d, 0xc0, 0xa5, 0x72, 0x92, 0x47, 0x88, 0xd6, 0xd4, 0xc6, 0x76, 0x40, 0xfd, 0x37, 0xf9, 0x8b, 0x50, 0x6c, 0x67, 0xec, 0xc7, 0xcd, 0xef, 0xf3, 0xfd, 0x5a, 0xef, 0x27, 0xc4, 0x3b, 0x2e, 0x8d, 0x90, 0x99, 0x54, 0xc2, 0x08, 0x32, 0x90, 0xc5, 0xe2, 0xb9, 0x54, 0xa2, 0x40, 0xad, 0x85, 0x72, 0x70, 0x11, 0x37, 0xd8, 0x08, 0x75, 0xf0, 0xd1, 0x4c, 0x1b, 0xa1, 0x78, 0x85, 0xbd, 0xa8, 0x0b, 0x85, 0xb8, 0xf7, 0xd1, 0xfc, 0x16, 0x0f, 0xb9, 0xe0, 0xaa, 0xf4, 0xf1, 0xdb, 0x4a, 0x88, 0x6a, 0x87, 0x2b, 0x1b, 0xe5, 0xed, 0x8f, 0x95, 0xa9, 0x1b, 0xd4, 0x86, 0x37, 0x3e, 0xe1, 0xf2, 0x6f, 0x08, 0xe3, 0x6b, 0x5b, 0x01, 0x99, 0xc3, 0xa0, 0x2e, 0x69, 0x90, 0x04, 0xe9, 0x84, 0x0d, 0xea, 0x92, 0x1c, 0xc3, 0x28, 0x57, 0x7c, 0x5f, 0xd2, 0x81, 0x45, 0x2e, 0x20, 0x04, 0x86, 0x7b, 0xde, 0x20, 0x0d, 0x2d, 0xb4, 0x6f, 0xf2, 0x0a, 0xc2, 0x42, 0xb6, 0x74, 0x98, 0x04, 0xe9, 0xf4, 0xf4, 0x28, 0x93, 0x45, 0x76, 0x79, 0xb3, 0x61, 0x1d, 0x23, 0xaf, 0x21, 0x54, 0xbc, 0xa1, 0x23, 0x2b, 0x41, 0x27, 0x7d, 0xb1, 0xcd, 0xb0, 0x0e, 0x93, 0x13, 0x18, 0x56, 0xb2, 0xd5, 0x74, 0x9c, 0x84, 0xfd, 0xcf, 0x4f, 0x37, 0x1b, 0x66, 0x21, 0xf9, 0x00, 0x91, 0x6f, 0x55, 0xd3, 0x23, 0x6b, 0x98, 0x76, 0x86, 0xb5, 0x63, 0xec, 0x4e, 0x24, 0x4b, 0x18, 0xbb, 0x21, 0xd0, 0xe8, 0x7f, 0x9a, 0xb5, 0x25, 0xcc, 0x2b, 0x24, 0x85, 0xa8, 0x1f, 0x0d, 0x9d, 0x58, 0x57, 0xdc, 0xb9, 0xae, 0x3c, 0x63, 0x77, 0x2a, 0x79, 0x03, 0x93, 0x3f, 0x58, 0x57, 0x3f, 0xcd, 0xf6, 0xb6, 0xa2, 0x90, 0x04, 0x69, 0xf0, 0xf9, 0x19, 0x8b, 0x1c, 0xba, 0xaa, 0xee, 0xc9, 0xbb, 0x9c, 0x4e, 0x1f, 0xca, 0xd7, 0x39, 0x39, 0x81, 0x89, 0x54, 0x75, 0x81, 0xdb, 0x56, 0x97, 0x34, 0xee, 0x64, 0x16, 0x59, 0xb0, 0xd1, 0x25, 0x79, 0x07, 0xb1, 0xc2, 0x1d, 0x72, 0x8d, 0xdb, 0x03, 0x72, 0x45, 0x67, 0x49, 0x90, 0xce, 0xd8, 0xd4, 0xb3, 0x6f, 0xc8, 0x15, 0x39, 0x03, 0x68, 0x65, 0xc9, 0x0d, 0x96, 0x5b, 0x6e, 0xe8, 0xdc, 0x56, 0xba, 0xc8, 0xdc, 0x16, 0xb3, 0x7e, 0x8b, 0xd9, 0xd7, 0x7e, 0x8b, 0x6c, 0xe2, 0xdd, 0xe7, 0xe6, 0x22, 0x82, 0xb1, 0x2b, 0x63, 0x79, 0x06, 0x2f, 0x2e, 0x15, 0x72, 0x83, 0x6e, 0xb3, 0x0c, 0x7f, 0xb5, 0xa8, 0x4d, 0x37, 0x27, 0x77, 0x6c, 0x76, 0xc9, 0x7e, 0x4e, 0xde, 0xe2, 0x95, 0xe5, 0x7b, 0x38, 0x7e, 0xf8, 0x55, 0x4b, 0xb1, 0xd7, 0xf8, 0xf8, 0x38, 0x4e, 0x19, 0xcc, 0x9c, 0x63, 0x8d, 0xea, 0x77, 0x5d, 0x20, 0x39, 0x87, 0xf8, 0xfe, 0x47, 0xf2, 0xd2, 0x9e, 0xc1, 0xd3, 0x2a, 0x16, 0xf4, 0xa9, 0xe0, 0x72, 0x5c, 0x8c, 0xbe, 0x87, 0x2b, 0x59, 0xe4, 0x63, 0xdb, 0xe6, 0xc7, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x25, 0xab, 0x41, 0x1a, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // LaptopServiceClient is the client API for LaptopService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type LaptopServiceClient interface { CreateLaptop(ctx context.Context, in *CreateLaptopRequest, opts ...grpc.CallOption) (*CreateLaptopResponse, error) } type laptopServiceClient struct { cc grpc.ClientConnInterface } func NewLaptopServiceClient(cc grpc.ClientConnInterface) LaptopServiceClient { return &laptopServiceClient{cc} } func (c *laptopServiceClient) CreateLaptop(ctx context.Context, in *CreateLaptopRequest, opts ...grpc.CallOption) (*CreateLaptopResponse, error) { out := new(CreateLaptopResponse) err := c.cc.Invoke(ctx, "/pc.LaptopService/CreateLaptop", in, out, opts...) if err != nil { return nil, err } return out, nil } // LaptopServiceServer is the server API for LaptopService service. type LaptopServiceServer interface { CreateLaptop(context.Context, *CreateLaptopRequest) (*CreateLaptopResponse, error) } // UnimplementedLaptopServiceServer can be embedded to have forward compatible implementations. type UnimplementedLaptopServiceServer struct { } func (*UnimplementedLaptopServiceServer) CreateLaptop(ctx context.Context, req *CreateLaptopRequest) (*CreateLaptopResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateLaptop not implemented") } func RegisterLaptopServiceServer(s *grpc.Server, srv LaptopServiceServer) { s.RegisterService(&_LaptopService_serviceDesc, srv) } func _LaptopService_CreateLaptop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateLaptopRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(LaptopServiceServer).CreateLaptop(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pc.LaptopService/CreateLaptop", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LaptopServiceServer).CreateLaptop(ctx, req.(*CreateLaptopRequest)) } return interceptor(ctx, in, info, handler) } var _LaptopService_serviceDesc = grpc.ServiceDesc{ ServiceName: "pc.LaptopService", HandlerType: (*LaptopServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CreateLaptop", Handler: _LaptopService_CreateLaptop_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "laptop.proto", }
{ return xxx_messageInfo_CreateLaptopRequest.Unmarshal(m, b) }
identifier_body
renderer.rs
use gpukit::wgpu; use std::sync::Arc; pub struct Renderer { context: Arc<gpukit::Context>, pipeline: wgpu::RenderPipeline, vertex_buffer: gpukit::Buffer<Vertex>, index_buffer: gpukit::Buffer<u32>, bind_group: gpukit::BindGroup, screen_uniforms: UniformBuffer<ScreenUniforms>, texture_bind_group: gpukit::BindGroup, texture_version: Option<u64>, texture: gpukit::Texture<gpukit::format::R8Unorm>, } #[derive(gpukit::Bindings)] struct Bindings<'a> { #[uniform(binding = 0)] screen_uniforms: &'a gpukit::Buffer<ScreenUniforms>, #[sampler(binding = 1, filtering)] sampler: &'a wgpu::Sampler, } #[derive(gpukit::Bindings)] struct TextureBindings<'a> { #[texture(binding = 0)] texture: &'a gpukit::TextureView<gpukit::format::R8Unorm>, } #[repr(C)] #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] struct ScreenUniforms { width_in_points: f32, height_in_points: f32, pixels_per_point: f32, _padding: u32, } struct UniformBuffer<T: bytemuck::Pod> { buffer: gpukit::Buffer<T>, value: T, } impl<T: bytemuck::Pod> std::ops::Deref for UniformBuffer<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.value } } impl<T: bytemuck::Pod> std::ops::DerefMut for UniformBuffer<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.value } } impl<T: bytemuck::Pod> UniformBuffer<T> { pub fn new(context: &gpukit::Context, value: T) -> Self { let buffer = context .build_buffer() .with_usage(wgpu::BufferUsages::UNIFORM) .init_with_data(std::slice::from_ref(&value)); UniformBuffer { buffer, value } } fn update(&self, context: &gpukit::Context) { self.buffer .update(context, std::slice::from_ref(&self.value)); } } #[repr(transparent)] #[derive(Debug, Copy, Clone)] struct Vertex(egui::paint::Vertex); // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. unsafe impl bytemuck::Pod for Vertex {} unsafe impl bytemuck::Zeroable for Vertex {} impl Vertex { // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. fn cast_slice(vertices: &[egui::paint::Vertex]) -> &[Vertex] { let ptr = vertices.as_ptr() as *const Vertex; let len = vertices.len(); unsafe { std::slice::from_raw_parts(ptr, len) } } } pub struct RendererDescriptor { pub context: Arc<gpukit::Context>, pub target_format: wgpu::TextureFormat, // Size of the screen: [width, height] pub size: [u32; 2], // Number of pixels per point pub pixels_per_point: f32, } impl Renderer { pub fn new(desc: RendererDescriptor) -> anyhow::Result<Renderer> { let RendererDescriptor { context, target_format, size, pixels_per_point, } = desc; let vertex_buffer = Self::create_vertex_buffer(&context, 0); let index_buffer = Self::create_index_buffer(&context, 0); let uniforms = UniformBuffer::new( &context, ScreenUniforms { width_in_points: size[0] as f32 / pixels_per_point, height_in_points: size[1] as f32 / pixels_per_point, pixels_per_point, _padding: 0, }, ); let sampler = context.device.create_sampler(&wgpu::SamplerDescriptor { label: None, address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge, mag_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Linear, ..Default::default() }); let bind_group = gpukit::BindGroup::new( &context, &Bindings { screen_uniforms: &uniforms.buffer, sampler: &sampler, }, ); let texture = context .build_texture() .with_usage(wgpu::TextureUsages::TEXTURE_BINDING) .init_with_data([1, 1], &[0]); let texture_bind_group = gpukit::BindGroup::new( &context, &TextureBindings { texture: &texture.create_view(), }, ); let vertex = context .build_shader("gukit_egui vertex shader") .init_from_glsl(include_str!("shader.vert"), gpukit::ShaderStage::Vertex)?; let fragment = context .build_shader("gukit_egui fragment shader") .init_from_glsl(include_str!("shader.frag"), gpukit::ShaderStage::Fragment)?; let pipeline = context.create_render_pipeline(gpukit::RenderPipelineDescriptor { label: Some("gpukit_egui renderer"), vertex: vertex.entry("main"), fragment: fragment.entry("main"), vertex_buffers: &[gpukit::vertex_buffer_layout![ // Position 0 => Float32x2, // Texture Coordinates 1 => Float32x2, // Color 2 => Uint32, ]], bind_group_layouts: &[&bind_group.layout, &texture_bind_group.layout], color_targets: &[wgpu::ColorTargetState { format: target_format, blend: Some(wgpu::BlendState { color: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::One, dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha, operation: wgpu::BlendOperation::Add, }, alpha: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::OneMinusDstAlpha, dst_factor: wgpu::BlendFactor::One, operation: wgpu::BlendOperation::Add, }, }), write_mask: wgpu::ColorWrites::all(), }], depth_stencil: None, primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleList, ..Default::default() }, })?; Ok(Renderer { context, pipeline, vertex_buffer, index_buffer, bind_group, screen_uniforms: uniforms, texture_bind_group, texture_version: None, texture, }) } fn create_vertex_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<Vertex> { context .build_buffer() .with_usage(wgpu::BufferUsages::VERTEX) .init_with_capacity(len) } fn create_index_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<u32> { context .build_buffer() .with_usage(wgpu::BufferUsages::INDEX) .init_with_capacity(len) } pub fn set_size(&mut self, size: [u32; 2], scale_factor: f32) { self.screen_uniforms.width_in_points = size[0] as f32 / scale_factor; self.screen_uniforms.height_in_points = size[1] as f32 / scale_factor; self.screen_uniforms.pixels_per_point = scale_factor; self.screen_uniforms.update(&self.context) } pub fn render<'encoder>( &'encoder mut self, rpass: &mut wgpu::RenderPass<'encoder>, meshes: &[egui::ClippedMesh], texture: &egui::Texture, ) { use gpukit::RenderPassExt; let offsets = self.update_buffers(meshes); self.update_texture(texture); rpass.set_pipeline(&self.pipeline); rpass.set_vertex_buffer(0, *self.vertex_buffer.slice(..)); rpass.set_index_buffer_ext(self.index_buffer.slice(..)); rpass.set_bind_group(0, &self.bind_group, &[]); rpass.set_bind_group(1, &self.texture_bind_group, &[]); for (egui::ClippedMesh(rect, mesh), offset) in meshes.iter().zip(offsets) { if Self::set_scissor_region(rpass, &self.screen_uniforms, *rect) { let index_range = offset.index..offset.index + mesh.indices.len() as u32; rpass.draw_indexed(index_range, offset.vertex as i32, 0..1); } } } fn set_scissor_region( rpass: &mut wgpu::RenderPass, screen: &ScreenUniforms, rect: egui::Rect, ) -> bool { let left = rect.left() * screen.pixels_per_point; let right = rect.right() * screen.pixels_per_point; let top = rect.top() * screen.pixels_per_point; let bottom = rect.bottom() * screen.pixels_per_point; let screen_width = screen.width_in_points * screen.pixels_per_point; let screen_height = screen.height_in_points * screen.pixels_per_point; let left = left.clamp(0.0, screen_width); let top = top.clamp(0.0, screen_height); let right = right.clamp(left, screen_width); let bottom = bottom.clamp(top, screen_height); let left = left.round() as u32; let top = top.round() as u32; let right = right.round() as u32; let bottom = bottom.round() as u32; let width = right - left; let height = bottom - top; if width == 0 || height == 0 { false } else { rpass.set_scissor_rect(left, top, width, height); true } } fn update_buffers(&mut self, meshes: &[egui::ClippedMesh]) -> Vec<BufferOffset> { let mut offsets = Vec::with_capacity(meshes.len()); // Find out how many vertices/indices we need to render let mut vertex_count = 0; let mut index_count = 0; for egui::ClippedMesh(_, mesh) in meshes { offsets.push(BufferOffset { vertex: vertex_count, index: index_count, }); vertex_count += align_to_power_of_two(mesh.vertices.len() as u32, BUFFER_ALIGNMENT); index_count += align_to_power_of_two(mesh.indices.len() as u32, BUFFER_ALIGNMENT); } // Allocate space for the vertices/indices if vertex_count as usize > self.vertex_buffer.len()
if index_count as usize > self.index_buffer.len() { self.index_buffer = Self::create_index_buffer(&self.context, index_count as usize); } // Write vertices/indices to their respective buffers for (egui::ClippedMesh(_, mesh), offset) in meshes.iter().zip(&offsets) { let vertex_slice = Vertex::cast_slice(&mesh.vertices); self.vertex_buffer .write(&self.context, offset.vertex as usize, vertex_slice); self.index_buffer .write(&self.context, offset.index as usize, &mesh.indices); } offsets } fn update_texture(&mut self, texture: &egui::Texture) { if self.texture_version != Some(texture.version) { self.texture_version = Some(texture.version); self.texture = self .context .build_texture() .with_usage(wgpu::TextureUsages::TEXTURE_BINDING) .init_with_data( [texture.width as u32, texture.height as u32], &texture.pixels, ); self.texture_bind_group.update( &self.context, &TextureBindings { texture: &self.texture.create_view(), }, ); } } } struct BufferOffset { vertex: u32, index: u32, } const BUFFER_ALIGNMENT: u32 = wgpu::COPY_BUFFER_ALIGNMENT.next_power_of_two() as u32; const fn align_to_power_of_two(x: u32, power: u32) -> u32 { (x + (power - 1)) & !(power - 1) }
{ self.vertex_buffer = Self::create_vertex_buffer(&self.context, vertex_count as usize); }
conditional_block
renderer.rs
use gpukit::wgpu; use std::sync::Arc; pub struct Renderer { context: Arc<gpukit::Context>, pipeline: wgpu::RenderPipeline, vertex_buffer: gpukit::Buffer<Vertex>, index_buffer: gpukit::Buffer<u32>, bind_group: gpukit::BindGroup, screen_uniforms: UniformBuffer<ScreenUniforms>, texture_bind_group: gpukit::BindGroup, texture_version: Option<u64>, texture: gpukit::Texture<gpukit::format::R8Unorm>, } #[derive(gpukit::Bindings)] struct Bindings<'a> { #[uniform(binding = 0)] screen_uniforms: &'a gpukit::Buffer<ScreenUniforms>, #[sampler(binding = 1, filtering)] sampler: &'a wgpu::Sampler, } #[derive(gpukit::Bindings)] struct TextureBindings<'a> { #[texture(binding = 0)] texture: &'a gpukit::TextureView<gpukit::format::R8Unorm>, } #[repr(C)] #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] struct ScreenUniforms { width_in_points: f32, height_in_points: f32, pixels_per_point: f32, _padding: u32, } struct UniformBuffer<T: bytemuck::Pod> { buffer: gpukit::Buffer<T>, value: T, } impl<T: bytemuck::Pod> std::ops::Deref for UniformBuffer<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.value } } impl<T: bytemuck::Pod> std::ops::DerefMut for UniformBuffer<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.value } } impl<T: bytemuck::Pod> UniformBuffer<T> { pub fn new(context: &gpukit::Context, value: T) -> Self { let buffer = context .build_buffer() .with_usage(wgpu::BufferUsages::UNIFORM) .init_with_data(std::slice::from_ref(&value)); UniformBuffer { buffer, value } } fn update(&self, context: &gpukit::Context) { self.buffer .update(context, std::slice::from_ref(&self.value)); } } #[repr(transparent)] #[derive(Debug, Copy, Clone)] struct Vertex(egui::paint::Vertex); // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. unsafe impl bytemuck::Pod for Vertex {} unsafe impl bytemuck::Zeroable for Vertex {} impl Vertex { // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. fn cast_slice(vertices: &[egui::paint::Vertex]) -> &[Vertex] { let ptr = vertices.as_ptr() as *const Vertex; let len = vertices.len(); unsafe { std::slice::from_raw_parts(ptr, len) } } } pub struct RendererDescriptor { pub context: Arc<gpukit::Context>, pub target_format: wgpu::TextureFormat, // Size of the screen: [width, height] pub size: [u32; 2], // Number of pixels per point pub pixels_per_point: f32, } impl Renderer { pub fn new(desc: RendererDescriptor) -> anyhow::Result<Renderer> { let RendererDescriptor { context, target_format, size, pixels_per_point, } = desc; let vertex_buffer = Self::create_vertex_buffer(&context, 0); let index_buffer = Self::create_index_buffer(&context, 0); let uniforms = UniformBuffer::new( &context, ScreenUniforms { width_in_points: size[0] as f32 / pixels_per_point, height_in_points: size[1] as f32 / pixels_per_point, pixels_per_point, _padding: 0, }, ); let sampler = context.device.create_sampler(&wgpu::SamplerDescriptor { label: None, address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge, mag_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Linear, ..Default::default() }); let bind_group = gpukit::BindGroup::new( &context, &Bindings { screen_uniforms: &uniforms.buffer, sampler: &sampler, }, ); let texture = context .build_texture() .with_usage(wgpu::TextureUsages::TEXTURE_BINDING) .init_with_data([1, 1], &[0]); let texture_bind_group = gpukit::BindGroup::new( &context, &TextureBindings { texture: &texture.create_view(), }, ); let vertex = context .build_shader("gukit_egui vertex shader") .init_from_glsl(include_str!("shader.vert"), gpukit::ShaderStage::Vertex)?; let fragment = context .build_shader("gukit_egui fragment shader") .init_from_glsl(include_str!("shader.frag"), gpukit::ShaderStage::Fragment)?; let pipeline = context.create_render_pipeline(gpukit::RenderPipelineDescriptor { label: Some("gpukit_egui renderer"), vertex: vertex.entry("main"), fragment: fragment.entry("main"), vertex_buffers: &[gpukit::vertex_buffer_layout![ // Position 0 => Float32x2, // Texture Coordinates 1 => Float32x2, // Color
color_targets: &[wgpu::ColorTargetState { format: target_format, blend: Some(wgpu::BlendState { color: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::One, dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha, operation: wgpu::BlendOperation::Add, }, alpha: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::OneMinusDstAlpha, dst_factor: wgpu::BlendFactor::One, operation: wgpu::BlendOperation::Add, }, }), write_mask: wgpu::ColorWrites::all(), }], depth_stencil: None, primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleList, ..Default::default() }, })?; Ok(Renderer { context, pipeline, vertex_buffer, index_buffer, bind_group, screen_uniforms: uniforms, texture_bind_group, texture_version: None, texture, }) } fn create_vertex_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<Vertex> { context .build_buffer() .with_usage(wgpu::BufferUsages::VERTEX) .init_with_capacity(len) } fn create_index_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<u32> { context .build_buffer() .with_usage(wgpu::BufferUsages::INDEX) .init_with_capacity(len) } pub fn set_size(&mut self, size: [u32; 2], scale_factor: f32) { self.screen_uniforms.width_in_points = size[0] as f32 / scale_factor; self.screen_uniforms.height_in_points = size[1] as f32 / scale_factor; self.screen_uniforms.pixels_per_point = scale_factor; self.screen_uniforms.update(&self.context) } pub fn render<'encoder>( &'encoder mut self, rpass: &mut wgpu::RenderPass<'encoder>, meshes: &[egui::ClippedMesh], texture: &egui::Texture, ) { use gpukit::RenderPassExt; let offsets = self.update_buffers(meshes); self.update_texture(texture); rpass.set_pipeline(&self.pipeline); rpass.set_vertex_buffer(0, *self.vertex_buffer.slice(..)); rpass.set_index_buffer_ext(self.index_buffer.slice(..)); rpass.set_bind_group(0, &self.bind_group, &[]); rpass.set_bind_group(1, &self.texture_bind_group, &[]); for (egui::ClippedMesh(rect, mesh), offset) in meshes.iter().zip(offsets) { if Self::set_scissor_region(rpass, &self.screen_uniforms, *rect) { let index_range = offset.index..offset.index + mesh.indices.len() as u32; rpass.draw_indexed(index_range, offset.vertex as i32, 0..1); } } } fn set_scissor_region( rpass: &mut wgpu::RenderPass, screen: &ScreenUniforms, rect: egui::Rect, ) -> bool { let left = rect.left() * screen.pixels_per_point; let right = rect.right() * screen.pixels_per_point; let top = rect.top() * screen.pixels_per_point; let bottom = rect.bottom() * screen.pixels_per_point; let screen_width = screen.width_in_points * screen.pixels_per_point; let screen_height = screen.height_in_points * screen.pixels_per_point; let left = left.clamp(0.0, screen_width); let top = top.clamp(0.0, screen_height); let right = right.clamp(left, screen_width); let bottom = bottom.clamp(top, screen_height); let left = left.round() as u32; let top = top.round() as u32; let right = right.round() as u32; let bottom = bottom.round() as u32; let width = right - left; let height = bottom - top; if width == 0 || height == 0 { false } else { rpass.set_scissor_rect(left, top, width, height); true } } fn update_buffers(&mut self, meshes: &[egui::ClippedMesh]) -> Vec<BufferOffset> { let mut offsets = Vec::with_capacity(meshes.len()); // Find out how many vertices/indices we need to render let mut vertex_count = 0; let mut index_count = 0; for egui::ClippedMesh(_, mesh) in meshes { offsets.push(BufferOffset { vertex: vertex_count, index: index_count, }); vertex_count += align_to_power_of_two(mesh.vertices.len() as u32, BUFFER_ALIGNMENT); index_count += align_to_power_of_two(mesh.indices.len() as u32, BUFFER_ALIGNMENT); } // Allocate space for the vertices/indices if vertex_count as usize > self.vertex_buffer.len() { self.vertex_buffer = Self::create_vertex_buffer(&self.context, vertex_count as usize); } if index_count as usize > self.index_buffer.len() { self.index_buffer = Self::create_index_buffer(&self.context, index_count as usize); } // Write vertices/indices to their respective buffers for (egui::ClippedMesh(_, mesh), offset) in meshes.iter().zip(&offsets) { let vertex_slice = Vertex::cast_slice(&mesh.vertices); self.vertex_buffer .write(&self.context, offset.vertex as usize, vertex_slice); self.index_buffer .write(&self.context, offset.index as usize, &mesh.indices); } offsets } fn update_texture(&mut self, texture: &egui::Texture) { if self.texture_version != Some(texture.version) { self.texture_version = Some(texture.version); self.texture = self .context .build_texture() .with_usage(wgpu::TextureUsages::TEXTURE_BINDING) .init_with_data( [texture.width as u32, texture.height as u32], &texture.pixels, ); self.texture_bind_group.update( &self.context, &TextureBindings { texture: &self.texture.create_view(), }, ); } } } struct BufferOffset { vertex: u32, index: u32, } const BUFFER_ALIGNMENT: u32 = wgpu::COPY_BUFFER_ALIGNMENT.next_power_of_two() as u32; const fn align_to_power_of_two(x: u32, power: u32) -> u32 { (x + (power - 1)) & !(power - 1) }
2 => Uint32, ]], bind_group_layouts: &[&bind_group.layout, &texture_bind_group.layout],
random_line_split
renderer.rs
use gpukit::wgpu; use std::sync::Arc; pub struct Renderer { context: Arc<gpukit::Context>, pipeline: wgpu::RenderPipeline, vertex_buffer: gpukit::Buffer<Vertex>, index_buffer: gpukit::Buffer<u32>, bind_group: gpukit::BindGroup, screen_uniforms: UniformBuffer<ScreenUniforms>, texture_bind_group: gpukit::BindGroup, texture_version: Option<u64>, texture: gpukit::Texture<gpukit::format::R8Unorm>, } #[derive(gpukit::Bindings)] struct Bindings<'a> { #[uniform(binding = 0)] screen_uniforms: &'a gpukit::Buffer<ScreenUniforms>, #[sampler(binding = 1, filtering)] sampler: &'a wgpu::Sampler, } #[derive(gpukit::Bindings)] struct TextureBindings<'a> { #[texture(binding = 0)] texture: &'a gpukit::TextureView<gpukit::format::R8Unorm>, } #[repr(C)] #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] struct ScreenUniforms { width_in_points: f32, height_in_points: f32, pixels_per_point: f32, _padding: u32, } struct UniformBuffer<T: bytemuck::Pod> { buffer: gpukit::Buffer<T>, value: T, } impl<T: bytemuck::Pod> std::ops::Deref for UniformBuffer<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.value } } impl<T: bytemuck::Pod> std::ops::DerefMut for UniformBuffer<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.value } } impl<T: bytemuck::Pod> UniformBuffer<T> { pub fn
(context: &gpukit::Context, value: T) -> Self { let buffer = context .build_buffer() .with_usage(wgpu::BufferUsages::UNIFORM) .init_with_data(std::slice::from_ref(&value)); UniformBuffer { buffer, value } } fn update(&self, context: &gpukit::Context) { self.buffer .update(context, std::slice::from_ref(&self.value)); } } #[repr(transparent)] #[derive(Debug, Copy, Clone)] struct Vertex(egui::paint::Vertex); // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. unsafe impl bytemuck::Pod for Vertex {} unsafe impl bytemuck::Zeroable for Vertex {} impl Vertex { // SAFETY: `egui::paint::Vertex` is `#[repr(C)]`. fn cast_slice(vertices: &[egui::paint::Vertex]) -> &[Vertex] { let ptr = vertices.as_ptr() as *const Vertex; let len = vertices.len(); unsafe { std::slice::from_raw_parts(ptr, len) } } } pub struct RendererDescriptor { pub context: Arc<gpukit::Context>, pub target_format: wgpu::TextureFormat, // Size of the screen: [width, height] pub size: [u32; 2], // Number of pixels per point pub pixels_per_point: f32, } impl Renderer { pub fn new(desc: RendererDescriptor) -> anyhow::Result<Renderer> { let RendererDescriptor { context, target_format, size, pixels_per_point, } = desc; let vertex_buffer = Self::create_vertex_buffer(&context, 0); let index_buffer = Self::create_index_buffer(&context, 0); let uniforms = UniformBuffer::new( &context, ScreenUniforms { width_in_points: size[0] as f32 / pixels_per_point, height_in_points: size[1] as f32 / pixels_per_point, pixels_per_point, _padding: 0, }, ); let sampler = context.device.create_sampler(&wgpu::SamplerDescriptor { label: None, address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge, mag_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Linear, ..Default::default() }); let bind_group = gpukit::BindGroup::new( &context, &Bindings { screen_uniforms: &uniforms.buffer, sampler: &sampler, }, ); let texture = context .build_texture() .with_usage(wgpu::TextureUsages::TEXTURE_BINDING) .init_with_data([1, 1], &[0]); let texture_bind_group = gpukit::BindGroup::new( &context, &TextureBindings { texture: &texture.create_view(), }, ); let vertex = context .build_shader("gukit_egui vertex shader") .init_from_glsl(include_str!("shader.vert"), gpukit::ShaderStage::Vertex)?; let fragment = context .build_shader("gukit_egui fragment shader") .init_from_glsl(include_str!("shader.frag"), gpukit::ShaderStage::Fragment)?; let pipeline = context.create_render_pipeline(gpukit::RenderPipelineDescriptor { label: Some("gpukit_egui renderer"), vertex: vertex.entry("main"), fragment: fragment.entry("main"), vertex_buffers: &[gpukit::vertex_buffer_layout![ // Position 0 => Float32x2, // Texture Coordinates 1 => Float32x2, // Color 2 => Uint32, ]], bind_group_layouts: &[&bind_group.layout, &texture_bind_group.layout], color_targets: &[wgpu::ColorTargetState { format: target_format, blend: Some(wgpu::BlendState { color: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::One, dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha, operation: wgpu::BlendOperation::Add, }, alpha: wgpu::BlendComponent { src_factor: wgpu::BlendFactor::OneMinusDstAlpha, dst_factor: wgpu::BlendFactor::One, operation: wgpu::BlendOperation::Add, }, }), write_mask: wgpu::ColorWrites::all(), }], depth_stencil: None, primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleList, ..Default::default() }, })?; Ok(Renderer { context, pipeline, vertex_buffer, index_buffer, bind_group, screen_uniforms: uniforms, texture_bind_group, texture_version: None, texture, }) } fn create_vertex_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<Vertex> { context .build_buffer() .with_usage(wgpu::BufferUsages::VERTEX) .init_with_capacity(len) } fn create_index_buffer(context: &gpukit::Context, len: usize) -> gpukit::Buffer<u32> { context .build_buffer() .with_usage(wgpu::BufferUsages::INDEX) .init_with_capacity(len) } pub fn set_size(&mut self, size: [u32; 2], scale_factor: f32) { self.screen_uniforms.width_in_points = size[0] as f32 / scale_factor; self.screen_uniforms.height_in_points = size[1] as f32 / scale_factor; self.screen_uniforms.pixels_per_point = scale_factor; self.screen_uniforms.update(&self.context) } pub fn render<'encoder>( &'encoder mut self, rpass: &mut wgpu::RenderPass<'encoder>, meshes: &[egui::ClippedMesh], texture: &egui::Texture, ) { use gpukit::RenderPassExt; let offsets = self.update_buffers(meshes); self.update_texture(texture); rpass.set_pipeline(&self.pipeline); rpass.set_vertex_buffer(0, *self.vertex_buffer.slice(..)); rpass.set_index_buffer_ext(self.index_buffer.slice(..)); rpass.set_bind_group(0, &self.bind_group, &[]); rpass.set_bind_group(1, &self.texture_bind_group, &[]); for (egui::ClippedMesh(rect, mesh), offset) in meshes.iter().zip(offsets) { if Self::set_scissor_region(rpass, &self.screen_uniforms, *rect) { let index_range = offset.index..offset.index + mesh.indices.len() as u32; rpass.draw_indexed(index_range, offset.vertex as i32, 0..1); } } } fn set_scissor_region( rpass: &mut wgpu::RenderPass, screen: &ScreenUniforms, rect: egui::Rect, ) -> bool { let left = rect.left() * screen.pixels_per_point; let right = rect.right() * screen.pixels_per_point; let top = rect.top() * screen.pixels_per_point; let bottom = rect.bottom() * screen.pixels_per_point; let screen_width = screen.width_in_points * screen.pixels_per_point; let screen_height = screen.height_in_points * screen.pixels_per_point; let left = left.clamp(0.0, screen_width); let top = top.clamp(0.0, screen_height); let right = right.clamp(left, screen_width); let bottom = bottom.clamp(top, screen_height); let left = left.round() as u32; let top = top.round() as u32; let right = right.round() as u32; let bottom = bottom.round() as u32; let width = right - left; let height = bottom - top; if width == 0 || height == 0 { false } else { rpass.set_scissor_rect(left, top, width, height); true } } fn update_buffers(&mut self, meshes: &[egui::ClippedMesh]) -> Vec<BufferOffset> { let mut offsets = Vec::with_capacity(meshes.len()); // Find out how many vertices/indices we need to render let mut vertex_count = 0; let mut index_count = 0; for egui::ClippedMesh(_, mesh) in meshes { offsets.push(BufferOffset { vertex: vertex_count, index: index_count, }); vertex_count += align_to_power_of_two(mesh.vertices.len() as u32, BUFFER_ALIGNMENT); index_count += align_to_power_of_two(mesh.indices.len() as u32, BUFFER_ALIGNMENT); } // Allocate space for the vertices/indices if vertex_count as usize > self.vertex_buffer.len() { self.vertex_buffer = Self::create_vertex_buffer(&self.context, vertex_count as usize); } if index_count as usize > self.index_buffer.len() { self.index_buffer = Self::create_index_buffer(&self.context, index_count as usize); } // Write vertices/indices to their respective buffers for (egui::ClippedMesh(_, mesh), offset) in meshes.iter().zip(&offsets) { let vertex_slice = Vertex::cast_slice(&mesh.vertices); self.vertex_buffer .write(&self.context, offset.vertex as usize, vertex_slice); self.index_buffer .write(&self.context, offset.index as usize, &mesh.indices); } offsets } fn update_texture(&mut self, texture: &egui::Texture) { if self.texture_version != Some(texture.version) { self.texture_version = Some(texture.version); self.texture = self .context .build_texture() .with_usage(wgpu::TextureUsages::TEXTURE_BINDING) .init_with_data( [texture.width as u32, texture.height as u32], &texture.pixels, ); self.texture_bind_group.update( &self.context, &TextureBindings { texture: &self.texture.create_view(), }, ); } } } struct BufferOffset { vertex: u32, index: u32, } const BUFFER_ALIGNMENT: u32 = wgpu::COPY_BUFFER_ALIGNMENT.next_power_of_two() as u32; const fn align_to_power_of_two(x: u32, power: u32) -> u32 { (x + (power - 1)) & !(power - 1) }
new
identifier_name
exec.rs
use crate::guest; use crate::guest::{Crash, Guest}; use crate::utils::cli::{App, Arg, OptVal}; use crate::utils::free_ipv4_port; use crate::Config; use core::c::to_prog; use core::prog::Prog; use core::target::Target; use executor::transfer::{async_recv_result, async_send}; use executor::{ExecResult, Reason}; use std::env::temp_dir; use std::path::PathBuf; use std::process::exit; use tokio::fs::write; use tokio::io::AsyncReadExt; use tokio::net::{TcpListener, TcpStream}; use tokio::process::Child; use tokio::sync::oneshot; use tokio::time::{delay_for, timeout, Duration}; // config for executor #[derive(Debug, Clone, Deserialize)] pub struct ExecutorConf { pub path: PathBuf, pub host_ip: Option<String>, pub concurrency: bool, pub memleak_check: bool, pub script_mode: bool, } impl ExecutorConf { pub fn check(&self) { if !self.path.is_file() { eprintln!( "Config Error: executor executable file {} is invalid", self.path.display() ); exit(exitcode::CONFIG) } if let Some(ip) = &self.host_ip { use std::net::ToSocketAddrs; let addr = format!("{}:8080", ip); if let Err(e) = addr.to_socket_addrs() { eprintln!( "Config Error: invalid host ip `{}`: {}", self.host_ip.as_ref().unwrap(), e ); exit(exitcode::CONFIG) } } } } pub struct Executor { inner: ExecutorImpl, } enum ExecutorImpl { Linux(LinuxExecutor), Scripy(ScriptExecutor), } impl Executor { pub fn new(cfg: &Config) -> Self { let inner = if cfg.executor.script_mode { ExecutorImpl::Scripy(ScriptExecutor::new(cfg)) } else { ExecutorImpl::Linux(LinuxExecutor::new(cfg)) }; Self { inner } } pub async fn
(&mut self) { match self.inner { ExecutorImpl::Linux(ref mut e) => e.start().await, ExecutorImpl::Scripy(ref mut e) => e.start().await, } } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { match self.inner { ExecutorImpl::Linux(ref mut e) => e.exec(p).await, ExecutorImpl::Scripy(ref mut e) => e.exec(p, t).await, } } } struct ScriptExecutor { path_on_host: PathBuf, guest: Guest, } impl ScriptExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); Self { path_on_host: cfg.executor.path.clone(), guest, } } pub async fn start(&mut self) { self.guest.boot().await; } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { let p_text = to_prog(p, t); let tmp = temp_dir().join("HEALER_test_case_v1-1-1.c"); if let Err(e) = write(&tmp, &p_text).await { eprintln!( "Failed to write test case to tmp dir \"{}\": {}", tmp.display(), e ); exit(1); } let guest_case_file = self.guest.copy(&tmp).await; let mut executor = App::new(self.path_on_host.to_str().unwrap()); executor.arg(Arg::new_flag(guest_case_file.to_str().unwrap())); let mut exec_handle = self.guest.run_cmd(&executor).await; match timeout(Duration::new(15, 0), &mut exec_handle).await { Err(_) => Ok(ExecResult::Failed(Reason("Time out".to_string()))), Ok(_) => { let mut stdout = exec_handle.stdout.take().unwrap(); let mut output = String::new(); stdout.read_to_string(&mut output).await.unwrap(); self.parse_exec_result(output).await } } } pub async fn parse_exec_result(&mut self, out: String) -> Result<ExecResult, Option<Crash>> { let mut result_line = String::new(); for l in out.lines() { if l.contains("HEALER_EXEC_RESULT") { result_line = l.to_string(); } } if !result_line.is_empty() { let out = out.replace(&result_line, ""); if result_line.contains("success") { return Ok(ExecResult::Ok(Default::default())); } else if result_line.contains("failed") { return Ok(ExecResult::Failed(Reason(out))); } else if result_line.contains("crashed") { return Err(Some(Crash { inner: out })); } } if !self.guest.is_alive().await { Err(Some(Crash { inner: out })) } else { Ok(ExecResult::Ok(Default::default())) } } } struct LinuxExecutor { guest: Guest, port: u16, exec_handle: Option<Child>, conn: Option<TcpStream>, concurrency: bool, memleak_check: bool, executor_bin_path: PathBuf, target_path: PathBuf, host_ip: String, } impl LinuxExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); let port = free_ipv4_port() .unwrap_or_else(|| exits!(exitcode::TEMPFAIL, "No Free port for executor driver")); let host_ip = cfg .executor .host_ip .as_ref() .map(String::from) .unwrap_or_else(|| String::from(guest::LINUX_QEMU_HOST_IP_ADDR)); Self { guest, port, exec_handle: None, conn: None, concurrency: cfg.executor.concurrency, memleak_check: cfg.executor.memleak_check, executor_bin_path: cfg.executor.path.clone(), target_path: PathBuf::from(&cfg.fots_bin), host_ip, } } pub async fn start(&mut self) { // handle should be set to kill on drop self.exec_handle = None; self.guest.boot().await; self.start_executer().await } pub async fn start_executer(&mut self) { use tokio::io::ErrorKind::*; self.exec_handle = None; let target = self.guest.copy(&self.target_path).await; let (tx, rx) = oneshot::channel(); let mut retry = 0; let mut listener; loop { let host_addr = format!("{}:{}", self.host_ip, self.port); listener = match TcpListener::bind(&host_addr).await { Ok(l) => l, Err(e) => { if e.kind() == AddrInUse && retry != 5 { self.port = free_ipv4_port().unwrap(); retry += 1; continue; } else { eprintln!("Fail to listen on {}: {}", host_addr, e); exit(1); } } }; break; } let host_addr = listener.local_addr().unwrap(); tokio::spawn(async move { match listener.accept().await { Ok((conn, _addr)) => { tx.send(conn).unwrap(); } Err(e) => { eprintln!("Executor driver: fail to get client: {}", e); exit(exitcode::OSERR); } } }); let mut executor = App::new(self.executor_bin_path.to_str().unwrap()); executor .arg(Arg::new_opt("-t", OptVal::normal(target.to_str().unwrap()))) .arg(Arg::new_opt( "-a", OptVal::normal(&format!( "{}:{}", guest::LINUX_QEMU_USER_NET_HOST_IP_ADDR, self.port )), )); if self.memleak_check { executor.arg(Arg::new_flag("-m")); } if self.concurrency { executor.arg(Arg::new_flag("-c")); } self.exec_handle = Some(self.guest.run_cmd(&executor).await); self.conn = match timeout(Duration::new(32, 0), rx).await { Err(_) => { self.exec_handle = None; eprintln!("Time out: wait executor connection {}", host_addr); exit(1) } Ok(conn) => Some(conn.unwrap()), }; } pub async fn exec(&mut self, p: &Prog) -> Result<ExecResult, Option<Crash>> { // send must be success assert!(self.conn.is_some()); if let Err(e) = timeout( Duration::new(15, 0), async_send(p, self.conn.as_mut().unwrap()), ) .await { info!("Prog send blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } // async_send(p, self.conn.as_mut().unwrap()).await.unwrap(); let ret = { match timeout( Duration::new(15, 0), async_recv_result(self.conn.as_mut().unwrap()), ) .await { Err(e) => { info!("Prog recv blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } Ok(ret) => ret, } }; match ret { Ok(result) => { self.guest.clear().await; if let ExecResult::Failed(ref reason) = result { let rea = reason.to_string(); if rea.contains("CRASH-MEMLEAK") { return Err(Some(Crash { inner: rea })); } } return Ok(result); } Err(_) => { let mut crashed: bool; let mut retry: u8 = 0; loop { crashed = !self.guest.is_alive().await; if crashed || retry == 10 { break; } else { retry += 1; delay_for(Duration::from_millis(500)).await; } } if crashed { return Err(self.guest.try_collect_crash().await); } else { let mut handle = self.exec_handle.take().unwrap(); let mut stdout = handle.stdout.take().unwrap(); let mut stderr = handle.stderr.take().unwrap(); handle.await.unwrap_or_else(|e| { exits!(exitcode::OSERR, "Fail to wait executor handle:{}", e) }); let mut err = Vec::new(); stderr.read_to_end(&mut err).await.unwrap(); let mut out = Vec::new(); stdout.read_to_end(&mut out).await.unwrap(); warn!( "Executor: Connection lost. STDOUT:{}. STDERR: {}", String::from_utf8(out).unwrap(), String::from_utf8(err).unwrap() ); self.start_executer().await; } } } // Caused by internal err Ok(ExecResult::Ok(Vec::new())) } }
start
identifier_name
exec.rs
use crate::guest; use crate::guest::{Crash, Guest}; use crate::utils::cli::{App, Arg, OptVal}; use crate::utils::free_ipv4_port; use crate::Config; use core::c::to_prog; use core::prog::Prog; use core::target::Target; use executor::transfer::{async_recv_result, async_send}; use executor::{ExecResult, Reason}; use std::env::temp_dir; use std::path::PathBuf; use std::process::exit; use tokio::fs::write; use tokio::io::AsyncReadExt; use tokio::net::{TcpListener, TcpStream}; use tokio::process::Child; use tokio::sync::oneshot; use tokio::time::{delay_for, timeout, Duration}; // config for executor #[derive(Debug, Clone, Deserialize)] pub struct ExecutorConf { pub path: PathBuf, pub host_ip: Option<String>, pub concurrency: bool, pub memleak_check: bool, pub script_mode: bool, } impl ExecutorConf { pub fn check(&self) { if !self.path.is_file() { eprintln!( "Config Error: executor executable file {} is invalid", self.path.display() ); exit(exitcode::CONFIG) } if let Some(ip) = &self.host_ip { use std::net::ToSocketAddrs; let addr = format!("{}:8080", ip); if let Err(e) = addr.to_socket_addrs() { eprintln!( "Config Error: invalid host ip `{}`: {}", self.host_ip.as_ref().unwrap(), e ); exit(exitcode::CONFIG) } } } } pub struct Executor { inner: ExecutorImpl, } enum ExecutorImpl { Linux(LinuxExecutor), Scripy(ScriptExecutor), } impl Executor { pub fn new(cfg: &Config) -> Self { let inner = if cfg.executor.script_mode { ExecutorImpl::Scripy(ScriptExecutor::new(cfg)) } else { ExecutorImpl::Linux(LinuxExecutor::new(cfg)) }; Self { inner } } pub async fn start(&mut self) { match self.inner { ExecutorImpl::Linux(ref mut e) => e.start().await, ExecutorImpl::Scripy(ref mut e) => e.start().await, } } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { match self.inner { ExecutorImpl::Linux(ref mut e) => e.exec(p).await, ExecutorImpl::Scripy(ref mut e) => e.exec(p, t).await, } } } struct ScriptExecutor { path_on_host: PathBuf, guest: Guest, } impl ScriptExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); Self { path_on_host: cfg.executor.path.clone(), guest, } } pub async fn start(&mut self) { self.guest.boot().await; } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { let p_text = to_prog(p, t); let tmp = temp_dir().join("HEALER_test_case_v1-1-1.c"); if let Err(e) = write(&tmp, &p_text).await { eprintln!( "Failed to write test case to tmp dir \"{}\": {}", tmp.display(), e ); exit(1); } let guest_case_file = self.guest.copy(&tmp).await; let mut executor = App::new(self.path_on_host.to_str().unwrap()); executor.arg(Arg::new_flag(guest_case_file.to_str().unwrap())); let mut exec_handle = self.guest.run_cmd(&executor).await; match timeout(Duration::new(15, 0), &mut exec_handle).await { Err(_) => Ok(ExecResult::Failed(Reason("Time out".to_string()))), Ok(_) => { let mut stdout = exec_handle.stdout.take().unwrap(); let mut output = String::new(); stdout.read_to_string(&mut output).await.unwrap(); self.parse_exec_result(output).await } } } pub async fn parse_exec_result(&mut self, out: String) -> Result<ExecResult, Option<Crash>> { let mut result_line = String::new(); for l in out.lines() { if l.contains("HEALER_EXEC_RESULT") { result_line = l.to_string(); } } if !result_line.is_empty() { let out = out.replace(&result_line, ""); if result_line.contains("success") { return Ok(ExecResult::Ok(Default::default())); } else if result_line.contains("failed") { return Ok(ExecResult::Failed(Reason(out))); } else if result_line.contains("crashed") { return Err(Some(Crash { inner: out })); } } if !self.guest.is_alive().await { Err(Some(Crash { inner: out })) } else { Ok(ExecResult::Ok(Default::default())) } } } struct LinuxExecutor { guest: Guest, port: u16, exec_handle: Option<Child>, conn: Option<TcpStream>, concurrency: bool, memleak_check: bool, executor_bin_path: PathBuf, target_path: PathBuf, host_ip: String, } impl LinuxExecutor { pub fn new(cfg: &Config) -> Self
pub async fn start(&mut self) { // handle should be set to kill on drop self.exec_handle = None; self.guest.boot().await; self.start_executer().await } pub async fn start_executer(&mut self) { use tokio::io::ErrorKind::*; self.exec_handle = None; let target = self.guest.copy(&self.target_path).await; let (tx, rx) = oneshot::channel(); let mut retry = 0; let mut listener; loop { let host_addr = format!("{}:{}", self.host_ip, self.port); listener = match TcpListener::bind(&host_addr).await { Ok(l) => l, Err(e) => { if e.kind() == AddrInUse && retry != 5 { self.port = free_ipv4_port().unwrap(); retry += 1; continue; } else { eprintln!("Fail to listen on {}: {}", host_addr, e); exit(1); } } }; break; } let host_addr = listener.local_addr().unwrap(); tokio::spawn(async move { match listener.accept().await { Ok((conn, _addr)) => { tx.send(conn).unwrap(); } Err(e) => { eprintln!("Executor driver: fail to get client: {}", e); exit(exitcode::OSERR); } } }); let mut executor = App::new(self.executor_bin_path.to_str().unwrap()); executor .arg(Arg::new_opt("-t", OptVal::normal(target.to_str().unwrap()))) .arg(Arg::new_opt( "-a", OptVal::normal(&format!( "{}:{}", guest::LINUX_QEMU_USER_NET_HOST_IP_ADDR, self.port )), )); if self.memleak_check { executor.arg(Arg::new_flag("-m")); } if self.concurrency { executor.arg(Arg::new_flag("-c")); } self.exec_handle = Some(self.guest.run_cmd(&executor).await); self.conn = match timeout(Duration::new(32, 0), rx).await { Err(_) => { self.exec_handle = None; eprintln!("Time out: wait executor connection {}", host_addr); exit(1) } Ok(conn) => Some(conn.unwrap()), }; } pub async fn exec(&mut self, p: &Prog) -> Result<ExecResult, Option<Crash>> { // send must be success assert!(self.conn.is_some()); if let Err(e) = timeout( Duration::new(15, 0), async_send(p, self.conn.as_mut().unwrap()), ) .await { info!("Prog send blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } // async_send(p, self.conn.as_mut().unwrap()).await.unwrap(); let ret = { match timeout( Duration::new(15, 0), async_recv_result(self.conn.as_mut().unwrap()), ) .await { Err(e) => { info!("Prog recv blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } Ok(ret) => ret, } }; match ret { Ok(result) => { self.guest.clear().await; if let ExecResult::Failed(ref reason) = result { let rea = reason.to_string(); if rea.contains("CRASH-MEMLEAK") { return Err(Some(Crash { inner: rea })); } } return Ok(result); } Err(_) => { let mut crashed: bool; let mut retry: u8 = 0; loop { crashed = !self.guest.is_alive().await; if crashed || retry == 10 { break; } else { retry += 1; delay_for(Duration::from_millis(500)).await; } } if crashed { return Err(self.guest.try_collect_crash().await); } else { let mut handle = self.exec_handle.take().unwrap(); let mut stdout = handle.stdout.take().unwrap(); let mut stderr = handle.stderr.take().unwrap(); handle.await.unwrap_or_else(|e| { exits!(exitcode::OSERR, "Fail to wait executor handle:{}", e) }); let mut err = Vec::new(); stderr.read_to_end(&mut err).await.unwrap(); let mut out = Vec::new(); stdout.read_to_end(&mut out).await.unwrap(); warn!( "Executor: Connection lost. STDOUT:{}. STDERR: {}", String::from_utf8(out).unwrap(), String::from_utf8(err).unwrap() ); self.start_executer().await; } } } // Caused by internal err Ok(ExecResult::Ok(Vec::new())) } }
{ let guest = Guest::new(cfg); let port = free_ipv4_port() .unwrap_or_else(|| exits!(exitcode::TEMPFAIL, "No Free port for executor driver")); let host_ip = cfg .executor .host_ip .as_ref() .map(String::from) .unwrap_or_else(|| String::from(guest::LINUX_QEMU_HOST_IP_ADDR)); Self { guest, port, exec_handle: None, conn: None, concurrency: cfg.executor.concurrency, memleak_check: cfg.executor.memleak_check, executor_bin_path: cfg.executor.path.clone(), target_path: PathBuf::from(&cfg.fots_bin), host_ip, } }
identifier_body
exec.rs
use crate::guest; use crate::guest::{Crash, Guest}; use crate::utils::cli::{App, Arg, OptVal}; use crate::utils::free_ipv4_port; use crate::Config; use core::c::to_prog; use core::prog::Prog; use core::target::Target; use executor::transfer::{async_recv_result, async_send}; use executor::{ExecResult, Reason}; use std::env::temp_dir; use std::path::PathBuf; use std::process::exit; use tokio::fs::write; use tokio::io::AsyncReadExt; use tokio::net::{TcpListener, TcpStream}; use tokio::process::Child; use tokio::sync::oneshot; use tokio::time::{delay_for, timeout, Duration}; // config for executor #[derive(Debug, Clone, Deserialize)] pub struct ExecutorConf { pub path: PathBuf, pub host_ip: Option<String>, pub concurrency: bool, pub memleak_check: bool, pub script_mode: bool, } impl ExecutorConf { pub fn check(&self) { if !self.path.is_file() { eprintln!( "Config Error: executor executable file {} is invalid", self.path.display() ); exit(exitcode::CONFIG) } if let Some(ip) = &self.host_ip { use std::net::ToSocketAddrs; let addr = format!("{}:8080", ip); if let Err(e) = addr.to_socket_addrs() { eprintln!( "Config Error: invalid host ip `{}`: {}", self.host_ip.as_ref().unwrap(), e ); exit(exitcode::CONFIG) } } } } pub struct Executor { inner: ExecutorImpl, } enum ExecutorImpl { Linux(LinuxExecutor), Scripy(ScriptExecutor), } impl Executor { pub fn new(cfg: &Config) -> Self { let inner = if cfg.executor.script_mode { ExecutorImpl::Scripy(ScriptExecutor::new(cfg)) } else { ExecutorImpl::Linux(LinuxExecutor::new(cfg)) }; Self { inner } } pub async fn start(&mut self) { match self.inner { ExecutorImpl::Linux(ref mut e) => e.start().await, ExecutorImpl::Scripy(ref mut e) => e.start().await, } } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { match self.inner { ExecutorImpl::Linux(ref mut e) => e.exec(p).await, ExecutorImpl::Scripy(ref mut e) => e.exec(p, t).await, } } } struct ScriptExecutor { path_on_host: PathBuf, guest: Guest, } impl ScriptExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); Self { path_on_host: cfg.executor.path.clone(), guest, } } pub async fn start(&mut self) { self.guest.boot().await; } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { let p_text = to_prog(p, t); let tmp = temp_dir().join("HEALER_test_case_v1-1-1.c"); if let Err(e) = write(&tmp, &p_text).await { eprintln!( "Failed to write test case to tmp dir \"{}\": {}", tmp.display(), e ); exit(1); } let guest_case_file = self.guest.copy(&tmp).await; let mut executor = App::new(self.path_on_host.to_str().unwrap()); executor.arg(Arg::new_flag(guest_case_file.to_str().unwrap())); let mut exec_handle = self.guest.run_cmd(&executor).await; match timeout(Duration::new(15, 0), &mut exec_handle).await { Err(_) => Ok(ExecResult::Failed(Reason("Time out".to_string()))), Ok(_) => { let mut stdout = exec_handle.stdout.take().unwrap(); let mut output = String::new(); stdout.read_to_string(&mut output).await.unwrap(); self.parse_exec_result(output).await } } } pub async fn parse_exec_result(&mut self, out: String) -> Result<ExecResult, Option<Crash>> { let mut result_line = String::new(); for l in out.lines() { if l.contains("HEALER_EXEC_RESULT") { result_line = l.to_string(); } } if !result_line.is_empty() { let out = out.replace(&result_line, ""); if result_line.contains("success") { return Ok(ExecResult::Ok(Default::default())); } else if result_line.contains("failed") { return Ok(ExecResult::Failed(Reason(out))); } else if result_line.contains("crashed") { return Err(Some(Crash { inner: out })); } } if !self.guest.is_alive().await { Err(Some(Crash { inner: out })) } else { Ok(ExecResult::Ok(Default::default())) } } } struct LinuxExecutor { guest: Guest, port: u16, exec_handle: Option<Child>, conn: Option<TcpStream>, concurrency: bool, memleak_check: bool, executor_bin_path: PathBuf, target_path: PathBuf, host_ip: String, } impl LinuxExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); let port = free_ipv4_port() .unwrap_or_else(|| exits!(exitcode::TEMPFAIL, "No Free port for executor driver")); let host_ip = cfg .executor .host_ip .as_ref() .map(String::from) .unwrap_or_else(|| String::from(guest::LINUX_QEMU_HOST_IP_ADDR)); Self { guest, port, exec_handle: None, conn: None, concurrency: cfg.executor.concurrency, memleak_check: cfg.executor.memleak_check, executor_bin_path: cfg.executor.path.clone(), target_path: PathBuf::from(&cfg.fots_bin), host_ip, } } pub async fn start(&mut self) { // handle should be set to kill on drop self.exec_handle = None; self.guest.boot().await; self.start_executer().await } pub async fn start_executer(&mut self) { use tokio::io::ErrorKind::*; self.exec_handle = None; let target = self.guest.copy(&self.target_path).await; let (tx, rx) = oneshot::channel(); let mut retry = 0; let mut listener; loop { let host_addr = format!("{}:{}", self.host_ip, self.port); listener = match TcpListener::bind(&host_addr).await { Ok(l) => l, Err(e) => { if e.kind() == AddrInUse && retry != 5 { self.port = free_ipv4_port().unwrap(); retry += 1; continue; } else { eprintln!("Fail to listen on {}: {}", host_addr, e); exit(1); } } }; break; } let host_addr = listener.local_addr().unwrap(); tokio::spawn(async move { match listener.accept().await { Ok((conn, _addr)) => { tx.send(conn).unwrap(); } Err(e) => { eprintln!("Executor driver: fail to get client: {}", e); exit(exitcode::OSERR); } } }); let mut executor = App::new(self.executor_bin_path.to_str().unwrap()); executor .arg(Arg::new_opt("-t", OptVal::normal(target.to_str().unwrap()))) .arg(Arg::new_opt( "-a", OptVal::normal(&format!( "{}:{}", guest::LINUX_QEMU_USER_NET_HOST_IP_ADDR, self.port )), )); if self.memleak_check { executor.arg(Arg::new_flag("-m")); } if self.concurrency { executor.arg(Arg::new_flag("-c")); } self.exec_handle = Some(self.guest.run_cmd(&executor).await); self.conn = match timeout(Duration::new(32, 0), rx).await { Err(_) =>
Ok(conn) => Some(conn.unwrap()), }; } pub async fn exec(&mut self, p: &Prog) -> Result<ExecResult, Option<Crash>> { // send must be success assert!(self.conn.is_some()); if let Err(e) = timeout( Duration::new(15, 0), async_send(p, self.conn.as_mut().unwrap()), ) .await { info!("Prog send blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } // async_send(p, self.conn.as_mut().unwrap()).await.unwrap(); let ret = { match timeout( Duration::new(15, 0), async_recv_result(self.conn.as_mut().unwrap()), ) .await { Err(e) => { info!("Prog recv blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } Ok(ret) => ret, } }; match ret { Ok(result) => { self.guest.clear().await; if let ExecResult::Failed(ref reason) = result { let rea = reason.to_string(); if rea.contains("CRASH-MEMLEAK") { return Err(Some(Crash { inner: rea })); } } return Ok(result); } Err(_) => { let mut crashed: bool; let mut retry: u8 = 0; loop { crashed = !self.guest.is_alive().await; if crashed || retry == 10 { break; } else { retry += 1; delay_for(Duration::from_millis(500)).await; } } if crashed { return Err(self.guest.try_collect_crash().await); } else { let mut handle = self.exec_handle.take().unwrap(); let mut stdout = handle.stdout.take().unwrap(); let mut stderr = handle.stderr.take().unwrap(); handle.await.unwrap_or_else(|e| { exits!(exitcode::OSERR, "Fail to wait executor handle:{}", e) }); let mut err = Vec::new(); stderr.read_to_end(&mut err).await.unwrap(); let mut out = Vec::new(); stdout.read_to_end(&mut out).await.unwrap(); warn!( "Executor: Connection lost. STDOUT:{}. STDERR: {}", String::from_utf8(out).unwrap(), String::from_utf8(err).unwrap() ); self.start_executer().await; } } } // Caused by internal err Ok(ExecResult::Ok(Vec::new())) } }
{ self.exec_handle = None; eprintln!("Time out: wait executor connection {}", host_addr); exit(1) }
conditional_block
exec.rs
use crate::guest; use crate::guest::{Crash, Guest}; use crate::utils::cli::{App, Arg, OptVal}; use crate::utils::free_ipv4_port; use crate::Config; use core::c::to_prog; use core::prog::Prog; use core::target::Target; use executor::transfer::{async_recv_result, async_send}; use executor::{ExecResult, Reason}; use std::env::temp_dir; use std::path::PathBuf; use std::process::exit; use tokio::fs::write; use tokio::io::AsyncReadExt; use tokio::net::{TcpListener, TcpStream}; use tokio::process::Child; use tokio::sync::oneshot; use tokio::time::{delay_for, timeout, Duration}; // config for executor #[derive(Debug, Clone, Deserialize)] pub struct ExecutorConf { pub path: PathBuf, pub host_ip: Option<String>, pub concurrency: bool, pub memleak_check: bool, pub script_mode: bool, } impl ExecutorConf { pub fn check(&self) { if !self.path.is_file() { eprintln!( "Config Error: executor executable file {} is invalid", self.path.display() ); exit(exitcode::CONFIG) } if let Some(ip) = &self.host_ip { use std::net::ToSocketAddrs; let addr = format!("{}:8080", ip); if let Err(e) = addr.to_socket_addrs() { eprintln!( "Config Error: invalid host ip `{}`: {}", self.host_ip.as_ref().unwrap(), e ); exit(exitcode::CONFIG) } } } } pub struct Executor { inner: ExecutorImpl, } enum ExecutorImpl { Linux(LinuxExecutor), Scripy(ScriptExecutor), } impl Executor { pub fn new(cfg: &Config) -> Self { let inner = if cfg.executor.script_mode { ExecutorImpl::Scripy(ScriptExecutor::new(cfg)) } else { ExecutorImpl::Linux(LinuxExecutor::new(cfg)) }; Self { inner } } pub async fn start(&mut self) { match self.inner { ExecutorImpl::Linux(ref mut e) => e.start().await, ExecutorImpl::Scripy(ref mut e) => e.start().await, } } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { match self.inner { ExecutorImpl::Linux(ref mut e) => e.exec(p).await, ExecutorImpl::Scripy(ref mut e) => e.exec(p, t).await, } } } struct ScriptExecutor { path_on_host: PathBuf, guest: Guest, } impl ScriptExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); Self { path_on_host: cfg.executor.path.clone(), guest, } } pub async fn start(&mut self) { self.guest.boot().await; } pub async fn exec(&mut self, p: &Prog, t: &Target) -> Result<ExecResult, Option<Crash>> { let p_text = to_prog(p, t); let tmp = temp_dir().join("HEALER_test_case_v1-1-1.c"); if let Err(e) = write(&tmp, &p_text).await { eprintln!( "Failed to write test case to tmp dir \"{}\": {}", tmp.display(), e ); exit(1); } let guest_case_file = self.guest.copy(&tmp).await; let mut executor = App::new(self.path_on_host.to_str().unwrap()); executor.arg(Arg::new_flag(guest_case_file.to_str().unwrap())); let mut exec_handle = self.guest.run_cmd(&executor).await; match timeout(Duration::new(15, 0), &mut exec_handle).await { Err(_) => Ok(ExecResult::Failed(Reason("Time out".to_string()))), Ok(_) => { let mut stdout = exec_handle.stdout.take().unwrap(); let mut output = String::new(); stdout.read_to_string(&mut output).await.unwrap(); self.parse_exec_result(output).await } } } pub async fn parse_exec_result(&mut self, out: String) -> Result<ExecResult, Option<Crash>> { let mut result_line = String::new(); for l in out.lines() { if l.contains("HEALER_EXEC_RESULT") { result_line = l.to_string(); } } if !result_line.is_empty() { let out = out.replace(&result_line, ""); if result_line.contains("success") { return Ok(ExecResult::Ok(Default::default())); } else if result_line.contains("failed") { return Ok(ExecResult::Failed(Reason(out))); } else if result_line.contains("crashed") { return Err(Some(Crash { inner: out })); } } if !self.guest.is_alive().await { Err(Some(Crash { inner: out })) } else { Ok(ExecResult::Ok(Default::default())) } } } struct LinuxExecutor { guest: Guest, port: u16, exec_handle: Option<Child>, conn: Option<TcpStream>, concurrency: bool, memleak_check: bool, executor_bin_path: PathBuf, target_path: PathBuf, host_ip: String, } impl LinuxExecutor { pub fn new(cfg: &Config) -> Self { let guest = Guest::new(cfg); let port = free_ipv4_port() .unwrap_or_else(|| exits!(exitcode::TEMPFAIL, "No Free port for executor driver")); let host_ip = cfg .executor .host_ip .as_ref() .map(String::from) .unwrap_or_else(|| String::from(guest::LINUX_QEMU_HOST_IP_ADDR)); Self { guest, port, exec_handle: None, conn: None, concurrency: cfg.executor.concurrency, memleak_check: cfg.executor.memleak_check, executor_bin_path: cfg.executor.path.clone(), target_path: PathBuf::from(&cfg.fots_bin), host_ip, } } pub async fn start(&mut self) { // handle should be set to kill on drop self.exec_handle = None; self.guest.boot().await; self.start_executer().await } pub async fn start_executer(&mut self) { use tokio::io::ErrorKind::*; self.exec_handle = None; let target = self.guest.copy(&self.target_path).await; let (tx, rx) = oneshot::channel(); let mut retry = 0; let mut listener; loop {
listener = match TcpListener::bind(&host_addr).await { Ok(l) => l, Err(e) => { if e.kind() == AddrInUse && retry != 5 { self.port = free_ipv4_port().unwrap(); retry += 1; continue; } else { eprintln!("Fail to listen on {}: {}", host_addr, e); exit(1); } } }; break; } let host_addr = listener.local_addr().unwrap(); tokio::spawn(async move { match listener.accept().await { Ok((conn, _addr)) => { tx.send(conn).unwrap(); } Err(e) => { eprintln!("Executor driver: fail to get client: {}", e); exit(exitcode::OSERR); } } }); let mut executor = App::new(self.executor_bin_path.to_str().unwrap()); executor .arg(Arg::new_opt("-t", OptVal::normal(target.to_str().unwrap()))) .arg(Arg::new_opt( "-a", OptVal::normal(&format!( "{}:{}", guest::LINUX_QEMU_USER_NET_HOST_IP_ADDR, self.port )), )); if self.memleak_check { executor.arg(Arg::new_flag("-m")); } if self.concurrency { executor.arg(Arg::new_flag("-c")); } self.exec_handle = Some(self.guest.run_cmd(&executor).await); self.conn = match timeout(Duration::new(32, 0), rx).await { Err(_) => { self.exec_handle = None; eprintln!("Time out: wait executor connection {}", host_addr); exit(1) } Ok(conn) => Some(conn.unwrap()), }; } pub async fn exec(&mut self, p: &Prog) -> Result<ExecResult, Option<Crash>> { // send must be success assert!(self.conn.is_some()); if let Err(e) = timeout( Duration::new(15, 0), async_send(p, self.conn.as_mut().unwrap()), ) .await { info!("Prog send blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } // async_send(p, self.conn.as_mut().unwrap()).await.unwrap(); let ret = { match timeout( Duration::new(15, 0), async_recv_result(self.conn.as_mut().unwrap()), ) .await { Err(e) => { info!("Prog recv blocked: {}, restarting...", e); self.start().await; return Ok(ExecResult::Failed(Reason("Prog send blocked".into()))); } Ok(ret) => ret, } }; match ret { Ok(result) => { self.guest.clear().await; if let ExecResult::Failed(ref reason) = result { let rea = reason.to_string(); if rea.contains("CRASH-MEMLEAK") { return Err(Some(Crash { inner: rea })); } } return Ok(result); } Err(_) => { let mut crashed: bool; let mut retry: u8 = 0; loop { crashed = !self.guest.is_alive().await; if crashed || retry == 10 { break; } else { retry += 1; delay_for(Duration::from_millis(500)).await; } } if crashed { return Err(self.guest.try_collect_crash().await); } else { let mut handle = self.exec_handle.take().unwrap(); let mut stdout = handle.stdout.take().unwrap(); let mut stderr = handle.stderr.take().unwrap(); handle.await.unwrap_or_else(|e| { exits!(exitcode::OSERR, "Fail to wait executor handle:{}", e) }); let mut err = Vec::new(); stderr.read_to_end(&mut err).await.unwrap(); let mut out = Vec::new(); stdout.read_to_end(&mut out).await.unwrap(); warn!( "Executor: Connection lost. STDOUT:{}. STDERR: {}", String::from_utf8(out).unwrap(), String::from_utf8(err).unwrap() ); self.start_executer().await; } } } // Caused by internal err Ok(ExecResult::Ok(Vec::new())) } }
let host_addr = format!("{}:{}", self.host_ip, self.port);
random_line_split
PoissonDiscSampleGenerator.py
"""Poisson Disc Sampling Generator This script uses a generator to create random points with a minimum distance of r from all other points within a defined space. This is often called Poisson Disc Sampling or blue noise. The script used is Bridson's Algorithm to generate the points. """ import numpy as np from Utilities import metric class PoissonDiscSampleGenerator(object): """Poisson Disc Sample Generator A generator used to create a poisson disc sample with given user parameters. """ def __init__(self, radius=5, k=30, extent=[100, 100], seed=None): """ PoissonDiscSampleGenerator Constructor Initializes the Poisson Disc Sample Generator. Arguments: radius (float): Radial distance for each random sample. (Default: 1) metric (function): Function used to calculate distance. (Default: distance.euclidean) k (int): Number of points used to generate a sample. extent (list<int>): The maximum length of each dimension. seed (int): The seed used when generating the random sample. (Optional) (Default: None) """ # -------------------------------- # User Parameters # -------------------------------- # Defines self._radius self.radius = radius # Defines self._k self.k = k # Defines both self._extent and self._dim self.extent = np.array(extent) # Defines self._seed self.seed = seed self._metric = metric.euclidean self._changes = self._create_neighbor_distances() # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def __call__(self): """Calls the generate function. Returns: (np.ndarray) """ return self.generate() def generate(self): """Generates the samples with the given user parameters. Returns: (np.ndarray) Returns an array of samples of shape (n_samples, dim) where dim is the length of the extent given. """ # -------------------------------- # Initializing Variables # -------------------------------- # Set NumPy Random Seed np.random.seed(self._seed) # Create Active List active = [] # Clear previously generated examples if len(self._samples) > 0: self._clear_previous_samples() # -------------------------------- # Begin Generating Samples # -------------------------------- # Create the first sample self._samples.append(np.random.uniform(low=np.zeros(shape=(self._dim,)), high=self._extent, size=(self._dim,))) active.append(self._samples[0]) self._grid[self._get_grid_coord(self._samples[0])] = 0 while active: # Choose Random Active Sample idx = np.random.choice(len(active)) # Make new point & confirm it is valid new_point = self._make_point(active[idx]) if new_point is None: active.pop(idx) else: # Add sample to listings and store in grid for neighboring locations. self._samples.append(new_point) active.append(new_point) self._grid[self._get_grid_coord(new_point)] = len(self._samples) - 1 # Return samples as numpy array self._samples = np.array(self._samples) return self._samples @property def radius(self): """The minimum distance between any two points. Returns: (float) """ return self._radius @property def extent(self): """The measurements of the plane where the sample will be produced (length, width, height, depth, etc.) Returns: (np.ndarray) """ return np.array(self._extent) @property def k(self): """The number of attempts each active point to make a new point. Returns: (int) """ return self._k @property def metric(self): """The distance function used to measure the distance between two points. Returns: (function) """ return self._metric @property def seed(self): """The random seed used for generating samples. Returns: (int) Returns None if no seed is set, otherwise a user-defined int. """ return self._seed @property def dimension_size(self): """The length of the extent and all of the generated sample point. Returns: (int) """ return self._dim @property def samples(self): """The samples generated by the generator. May not match specs if parameters were re-adjusted after use. Returns: (np.ndarray) """ if len(self._samples) == 0: return np.empty((0, 2)) return self._samples @radius.setter def radius(self, radius): """Setter for radius Args: radius (float): Minimum distance between two points. """ if radius <= 0: raise ValueError("Radius must be a number that is greater than 0.") self._radius = radius @extent.setter def extent(self, extent): """Setter for extent Args: extent (list<float>): The dimension lengths. """ if len(extent) < 2: raise ValueError("Extent must have a length of at least 2.") extent = np.array(extent, dtype=float) if np.any(extent <= 0): raise ValueError("All extents must be greater than 0.") self._extent = extent self._dim = len(extent) @k.setter def k(self, k): """Setter for k Args: k (int): Number of attempts for each active point to generate a new point. """ if k <= 0: raise ValueError("K must be greater than 0.") if int(k) != k: raise ValueError("K must be an integer.") self._k = k @seed.setter def seed(self, seed): if seed is not None: if type(seed) is not int:
if seed < 0: raise ValueError("Seed must be non-negative.") self._seed = seed def _clear_previous_samples(self): """Clears grid and samples for generating new samples. """ del self._grid del self._samples # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def _get_grid_coord(self, point): """Returns the grid coordinate of the point. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: The grid coordinate that the point is separated in. """ return tuple([int(point[i] / self._cell_length) for i in range(self._dim)]) def _make_point(self, active_point): """ Attempts to make a random point in proximity of active_point. Attempts to make a random point around the active_point k times. If the new point is too close to another point, it will discard and try. If it fails k times, the function returns None. Args: active_point (np.ndarray): An array of size (number_of_dimensions,). Returns: (np.ndarray). Returns an array of size (number_of_dimensions,) if succeeds. Otherwise, returns None. """ # -------------------------------- # Create Random Parameters # -------------------------------- for _ in range(self._k): # Defines radial distance from active_point. rho = np.random.uniform(self._radius, 2 * self._radius) # Defines angle from active_point. Requires multiple angles for higher dimensional planes. theta = [np.random.uniform(0, 2 * np.pi) for _ in range(self._dim - 1)] # -------------------------------- # Create New Point # -------------------------------- # Create a 2D point using first theta angle. new_point = [active_point[0] + rho * np.cos(theta[0]), active_point[1] + rho * np.sin(theta[0])] # Generate more components of the coordinate for higher dimensional planes. new_point.extend([active_point[i] + rho * np.sin(theta[i-1]) for i in range(2, active_point.shape[0])]) new_point = np.array(new_point) # Confirm point is valid if self._valid_point(new_point): return new_point return None def _valid_point(self, point): """Confirms that a point is valid. If a point is too close to another point or is outside of bounds, it will fail. Otherwise, it will succeed. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: (bool) If succeeds, returns True. Otherwise, returns False. """ # -------------------------------- # Check Bounds # -------------------------------- # Get grid point and confirm it is within range coord = self._get_grid_coord(point) if np.logical_or(np.any(point < 0), np.any(point >= self._extent)): return False # -------------------------------- # Check Distance of Neighbors # -------------------------------- for idx in self._get_neighbors(coord): # No points in grid cell if self._grid[idx] == -1: continue # Obtains point in grid cell and confirms its distance is less than the radius. near_point = self._samples[self._grid[idx]] if metric.euclidean(near_point, point) < self._radius: return False return True def _get_neighbors(self, coord): """Obtains neighboring cells that are surrounding a given cell coordinate. Args: coord (tuple): A coordinate whose length is equal to the number of dimensions. Returns: (list<tuple>). A list of neighboring cells. """ neighbors = [] for change in self._changes: neighbor_coord = np.array(coord) + change if np.logical_or(np.any(neighbor_coord < 0), np.any(neighbor_coord >= self._grid_shape)): continue neighbors.append(tuple(neighbor_coord)) return neighbors def _create_neighbor_distances(self): """Creates distance vectors for calculating all neighbors from a given point. A neighbor can be only so far away from the original point and is dependent on the number of dimensions. We calculate every possible coordinate on the grid that the neighbor can be part of relative to a point and store that information for later use. Returns: (np.ndarray<int>) The array is of shape (number_of_vectors, number_of_dimensions). """ # -------------------------------- # Create Directions from Point # -------------------------------- diff = [[0 for _ in range(self._dim)]] curr = diff[0][:] for i in range(self._dim): # Each diff is a unit vector, only having one value at +1 or -1 and all others at 0. curr[i] = 1 diff.append(curr[:]) curr[i] = -1 diff.append(curr[:]) curr[i] = 0 # Remove initial blank unit vector with all values at 0. diff.pop(0) del curr # -------------------------------- # Breadth First Search # -------------------------------- distances = [] queue = [[0 for _ in range(self._dim)]] while queue: # Get latest distance curr = queue.pop() # The distance from any possible point should be less than or equal to the number of dimensions. # This can be shown using basic calculations. if self._metric(np.array(curr), np.zeros(shape=(len(curr),))) >= 2 * np.sqrt(self._dim) or \ np.any(np.abs(np.array(curr)) > self._extent / 2) or curr in distances: continue # Calculate all distances from child and add to queue queue.extend([list(np.array(curr) + np.array(diff[i])) for i in range(len(diff))]) # Add current distance to distances distances.append(curr) # Return all possible neighbor distances return np.array(distances, dtype=int)
raise ValueError("Seed must be integer.")
conditional_block
PoissonDiscSampleGenerator.py
"""Poisson Disc Sampling Generator This script uses a generator to create random points with a minimum distance of r from all other points within a defined space. This is often called Poisson Disc Sampling or blue noise. The script used is Bridson's Algorithm to generate the points. """ import numpy as np from Utilities import metric class PoissonDiscSampleGenerator(object): """Poisson Disc Sample Generator A generator used to create a poisson disc sample with given user parameters. """ def __init__(self, radius=5, k=30, extent=[100, 100], seed=None): """ PoissonDiscSampleGenerator Constructor Initializes the Poisson Disc Sample Generator. Arguments: radius (float): Radial distance for each random sample. (Default: 1) metric (function): Function used to calculate distance. (Default: distance.euclidean) k (int): Number of points used to generate a sample. extent (list<int>): The maximum length of each dimension. seed (int): The seed used when generating the random sample. (Optional) (Default: None) """ # -------------------------------- # User Parameters # -------------------------------- # Defines self._radius self.radius = radius # Defines self._k self.k = k # Defines both self._extent and self._dim self.extent = np.array(extent) # Defines self._seed self.seed = seed self._metric = metric.euclidean self._changes = self._create_neighbor_distances() # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def __call__(self): """Calls the generate function. Returns: (np.ndarray) """ return self.generate() def generate(self): """Generates the samples with the given user parameters. Returns: (np.ndarray) Returns an array of samples of shape (n_samples, dim) where dim is the length of the extent given. """ # -------------------------------- # Initializing Variables # -------------------------------- # Set NumPy Random Seed np.random.seed(self._seed) # Create Active List active = [] # Clear previously generated examples if len(self._samples) > 0: self._clear_previous_samples() # -------------------------------- # Begin Generating Samples # -------------------------------- # Create the first sample self._samples.append(np.random.uniform(low=np.zeros(shape=(self._dim,)), high=self._extent, size=(self._dim,))) active.append(self._samples[0]) self._grid[self._get_grid_coord(self._samples[0])] = 0 while active: # Choose Random Active Sample idx = np.random.choice(len(active)) # Make new point & confirm it is valid new_point = self._make_point(active[idx]) if new_point is None: active.pop(idx) else: # Add sample to listings and store in grid for neighboring locations. self._samples.append(new_point) active.append(new_point) self._grid[self._get_grid_coord(new_point)] = len(self._samples) - 1 # Return samples as numpy array self._samples = np.array(self._samples) return self._samples @property def radius(self): """The minimum distance between any two points. Returns: (float) """ return self._radius @property def extent(self): """The measurements of the plane where the sample will be produced (length, width, height, depth, etc.) Returns: (np.ndarray) """ return np.array(self._extent) @property def k(self):
@property def metric(self): """The distance function used to measure the distance between two points. Returns: (function) """ return self._metric @property def seed(self): """The random seed used for generating samples. Returns: (int) Returns None if no seed is set, otherwise a user-defined int. """ return self._seed @property def dimension_size(self): """The length of the extent and all of the generated sample point. Returns: (int) """ return self._dim @property def samples(self): """The samples generated by the generator. May not match specs if parameters were re-adjusted after use. Returns: (np.ndarray) """ if len(self._samples) == 0: return np.empty((0, 2)) return self._samples @radius.setter def radius(self, radius): """Setter for radius Args: radius (float): Minimum distance between two points. """ if radius <= 0: raise ValueError("Radius must be a number that is greater than 0.") self._radius = radius @extent.setter def extent(self, extent): """Setter for extent Args: extent (list<float>): The dimension lengths. """ if len(extent) < 2: raise ValueError("Extent must have a length of at least 2.") extent = np.array(extent, dtype=float) if np.any(extent <= 0): raise ValueError("All extents must be greater than 0.") self._extent = extent self._dim = len(extent) @k.setter def k(self, k): """Setter for k Args: k (int): Number of attempts for each active point to generate a new point. """ if k <= 0: raise ValueError("K must be greater than 0.") if int(k) != k: raise ValueError("K must be an integer.") self._k = k @seed.setter def seed(self, seed): if seed is not None: if type(seed) is not int: raise ValueError("Seed must be integer.") if seed < 0: raise ValueError("Seed must be non-negative.") self._seed = seed def _clear_previous_samples(self): """Clears grid and samples for generating new samples. """ del self._grid del self._samples # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def _get_grid_coord(self, point): """Returns the grid coordinate of the point. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: The grid coordinate that the point is separated in. """ return tuple([int(point[i] / self._cell_length) for i in range(self._dim)]) def _make_point(self, active_point): """ Attempts to make a random point in proximity of active_point. Attempts to make a random point around the active_point k times. If the new point is too close to another point, it will discard and try. If it fails k times, the function returns None. Args: active_point (np.ndarray): An array of size (number_of_dimensions,). Returns: (np.ndarray). Returns an array of size (number_of_dimensions,) if succeeds. Otherwise, returns None. """ # -------------------------------- # Create Random Parameters # -------------------------------- for _ in range(self._k): # Defines radial distance from active_point. rho = np.random.uniform(self._radius, 2 * self._radius) # Defines angle from active_point. Requires multiple angles for higher dimensional planes. theta = [np.random.uniform(0, 2 * np.pi) for _ in range(self._dim - 1)] # -------------------------------- # Create New Point # -------------------------------- # Create a 2D point using first theta angle. new_point = [active_point[0] + rho * np.cos(theta[0]), active_point[1] + rho * np.sin(theta[0])] # Generate more components of the coordinate for higher dimensional planes. new_point.extend([active_point[i] + rho * np.sin(theta[i-1]) for i in range(2, active_point.shape[0])]) new_point = np.array(new_point) # Confirm point is valid if self._valid_point(new_point): return new_point return None def _valid_point(self, point): """Confirms that a point is valid. If a point is too close to another point or is outside of bounds, it will fail. Otherwise, it will succeed. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: (bool) If succeeds, returns True. Otherwise, returns False. """ # -------------------------------- # Check Bounds # -------------------------------- # Get grid point and confirm it is within range coord = self._get_grid_coord(point) if np.logical_or(np.any(point < 0), np.any(point >= self._extent)): return False # -------------------------------- # Check Distance of Neighbors # -------------------------------- for idx in self._get_neighbors(coord): # No points in grid cell if self._grid[idx] == -1: continue # Obtains point in grid cell and confirms its distance is less than the radius. near_point = self._samples[self._grid[idx]] if metric.euclidean(near_point, point) < self._radius: return False return True def _get_neighbors(self, coord): """Obtains neighboring cells that are surrounding a given cell coordinate. Args: coord (tuple): A coordinate whose length is equal to the number of dimensions. Returns: (list<tuple>). A list of neighboring cells. """ neighbors = [] for change in self._changes: neighbor_coord = np.array(coord) + change if np.logical_or(np.any(neighbor_coord < 0), np.any(neighbor_coord >= self._grid_shape)): continue neighbors.append(tuple(neighbor_coord)) return neighbors def _create_neighbor_distances(self): """Creates distance vectors for calculating all neighbors from a given point. A neighbor can be only so far away from the original point and is dependent on the number of dimensions. We calculate every possible coordinate on the grid that the neighbor can be part of relative to a point and store that information for later use. Returns: (np.ndarray<int>) The array is of shape (number_of_vectors, number_of_dimensions). """ # -------------------------------- # Create Directions from Point # -------------------------------- diff = [[0 for _ in range(self._dim)]] curr = diff[0][:] for i in range(self._dim): # Each diff is a unit vector, only having one value at +1 or -1 and all others at 0. curr[i] = 1 diff.append(curr[:]) curr[i] = -1 diff.append(curr[:]) curr[i] = 0 # Remove initial blank unit vector with all values at 0. diff.pop(0) del curr # -------------------------------- # Breadth First Search # -------------------------------- distances = [] queue = [[0 for _ in range(self._dim)]] while queue: # Get latest distance curr = queue.pop() # The distance from any possible point should be less than or equal to the number of dimensions. # This can be shown using basic calculations. if self._metric(np.array(curr), np.zeros(shape=(len(curr),))) >= 2 * np.sqrt(self._dim) or \ np.any(np.abs(np.array(curr)) > self._extent / 2) or curr in distances: continue # Calculate all distances from child and add to queue queue.extend([list(np.array(curr) + np.array(diff[i])) for i in range(len(diff))]) # Add current distance to distances distances.append(curr) # Return all possible neighbor distances return np.array(distances, dtype=int)
"""The number of attempts each active point to make a new point. Returns: (int) """ return self._k
identifier_body
PoissonDiscSampleGenerator.py
"""Poisson Disc Sampling Generator This script uses a generator to create random points with a minimum distance of r from all other points within a defined space. This is often called Poisson Disc Sampling or blue noise. The script used is Bridson's Algorithm to generate the points. """ import numpy as np from Utilities import metric class PoissonDiscSampleGenerator(object): """Poisson Disc Sample Generator A generator used to create a poisson disc sample with given user parameters. """ def __init__(self, radius=5, k=30, extent=[100, 100], seed=None): """ PoissonDiscSampleGenerator Constructor Initializes the Poisson Disc Sample Generator. Arguments: radius (float): Radial distance for each random sample. (Default: 1) metric (function): Function used to calculate distance. (Default: distance.euclidean) k (int): Number of points used to generate a sample. extent (list<int>): The maximum length of each dimension. seed (int): The seed used when generating the random sample. (Optional) (Default: None) """ # -------------------------------- # User Parameters # -------------------------------- # Defines self._radius self.radius = radius # Defines self._k self.k = k # Defines both self._extent and self._dim self.extent = np.array(extent) # Defines self._seed self.seed = seed self._metric = metric.euclidean self._changes = self._create_neighbor_distances() # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def __call__(self): """Calls the generate function. Returns: (np.ndarray) """ return self.generate() def generate(self): """Generates the samples with the given user parameters. Returns: (np.ndarray) Returns an array of samples of shape (n_samples, dim) where dim is the length of the extent given. """ # -------------------------------- # Initializing Variables # -------------------------------- # Set NumPy Random Seed np.random.seed(self._seed) # Create Active List active = [] # Clear previously generated examples if len(self._samples) > 0: self._clear_previous_samples() # -------------------------------- # Begin Generating Samples # -------------------------------- # Create the first sample self._samples.append(np.random.uniform(low=np.zeros(shape=(self._dim,)), high=self._extent, size=(self._dim,))) active.append(self._samples[0]) self._grid[self._get_grid_coord(self._samples[0])] = 0 while active: # Choose Random Active Sample idx = np.random.choice(len(active)) # Make new point & confirm it is valid new_point = self._make_point(active[idx]) if new_point is None: active.pop(idx) else: # Add sample to listings and store in grid for neighboring locations. self._samples.append(new_point) active.append(new_point) self._grid[self._get_grid_coord(new_point)] = len(self._samples) - 1 # Return samples as numpy array self._samples = np.array(self._samples) return self._samples @property def radius(self): """The minimum distance between any two points. Returns: (float) """ return self._radius @property def extent(self): """The measurements of the plane where the sample will be produced (length, width, height, depth, etc.) Returns: (np.ndarray) """ return np.array(self._extent) @property def k(self): """The number of attempts each active point to make a new point. Returns: (int) """ return self._k @property def metric(self): """The distance function used to measure the distance between two points. Returns: (function) """ return self._metric @property def seed(self): """The random seed used for generating samples. Returns: (int) Returns None if no seed is set, otherwise a user-defined int. """ return self._seed @property def dimension_size(self): """The length of the extent and all of the generated sample point. Returns: (int) """ return self._dim @property def samples(self): """The samples generated by the generator. May not match specs if parameters were re-adjusted after use. Returns: (np.ndarray) """ if len(self._samples) == 0: return np.empty((0, 2)) return self._samples @radius.setter def radius(self, radius): """Setter for radius Args: radius (float): Minimum distance between two points. """ if radius <= 0: raise ValueError("Radius must be a number that is greater than 0.") self._radius = radius @extent.setter def extent(self, extent): """Setter for extent Args: extent (list<float>): The dimension lengths. """ if len(extent) < 2: raise ValueError("Extent must have a length of at least 2.") extent = np.array(extent, dtype=float) if np.any(extent <= 0): raise ValueError("All extents must be greater than 0.") self._extent = extent self._dim = len(extent) @k.setter def k(self, k): """Setter for k Args: k (int): Number of attempts for each active point to generate a new point. """ if k <= 0: raise ValueError("K must be greater than 0.") if int(k) != k: raise ValueError("K must be an integer.") self._k = k @seed.setter def seed(self, seed): if seed is not None: if type(seed) is not int: raise ValueError("Seed must be integer.") if seed < 0: raise ValueError("Seed must be non-negative.") self._seed = seed def _clear_previous_samples(self): """Clears grid and samples for generating new samples. """ del self._grid del self._samples # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def _get_grid_coord(self, point): """Returns the grid coordinate of the point. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: The grid coordinate that the point is separated in. """ return tuple([int(point[i] / self._cell_length) for i in range(self._dim)]) def _make_point(self, active_point): """ Attempts to make a random point in proximity of active_point. Attempts to make a random point around the active_point k times. If the new point is too close to another point, it will discard and try. If it fails k times, the function returns None. Args: active_point (np.ndarray): An array of size (number_of_dimensions,). Returns: (np.ndarray). Returns an array of size (number_of_dimensions,) if succeeds. Otherwise, returns None. """ # -------------------------------- # Create Random Parameters # -------------------------------- for _ in range(self._k): # Defines radial distance from active_point. rho = np.random.uniform(self._radius, 2 * self._radius) # Defines angle from active_point. Requires multiple angles for higher dimensional planes. theta = [np.random.uniform(0, 2 * np.pi) for _ in range(self._dim - 1)] # -------------------------------- # Create New Point # -------------------------------- # Create a 2D point using first theta angle. new_point = [active_point[0] + rho * np.cos(theta[0]), active_point[1] + rho * np.sin(theta[0])] # Generate more components of the coordinate for higher dimensional planes. new_point.extend([active_point[i] + rho * np.sin(theta[i-1]) for i in range(2, active_point.shape[0])]) new_point = np.array(new_point) # Confirm point is valid if self._valid_point(new_point): return new_point return None def _valid_point(self, point): """Confirms that a point is valid. If a point is too close to another point or is outside of bounds, it will fail. Otherwise, it will succeed. Args:
(bool) If succeeds, returns True. Otherwise, returns False. """ # -------------------------------- # Check Bounds # -------------------------------- # Get grid point and confirm it is within range coord = self._get_grid_coord(point) if np.logical_or(np.any(point < 0), np.any(point >= self._extent)): return False # -------------------------------- # Check Distance of Neighbors # -------------------------------- for idx in self._get_neighbors(coord): # No points in grid cell if self._grid[idx] == -1: continue # Obtains point in grid cell and confirms its distance is less than the radius. near_point = self._samples[self._grid[idx]] if metric.euclidean(near_point, point) < self._radius: return False return True def _get_neighbors(self, coord): """Obtains neighboring cells that are surrounding a given cell coordinate. Args: coord (tuple): A coordinate whose length is equal to the number of dimensions. Returns: (list<tuple>). A list of neighboring cells. """ neighbors = [] for change in self._changes: neighbor_coord = np.array(coord) + change if np.logical_or(np.any(neighbor_coord < 0), np.any(neighbor_coord >= self._grid_shape)): continue neighbors.append(tuple(neighbor_coord)) return neighbors def _create_neighbor_distances(self): """Creates distance vectors for calculating all neighbors from a given point. A neighbor can be only so far away from the original point and is dependent on the number of dimensions. We calculate every possible coordinate on the grid that the neighbor can be part of relative to a point and store that information for later use. Returns: (np.ndarray<int>) The array is of shape (number_of_vectors, number_of_dimensions). """ # -------------------------------- # Create Directions from Point # -------------------------------- diff = [[0 for _ in range(self._dim)]] curr = diff[0][:] for i in range(self._dim): # Each diff is a unit vector, only having one value at +1 or -1 and all others at 0. curr[i] = 1 diff.append(curr[:]) curr[i] = -1 diff.append(curr[:]) curr[i] = 0 # Remove initial blank unit vector with all values at 0. diff.pop(0) del curr # -------------------------------- # Breadth First Search # -------------------------------- distances = [] queue = [[0 for _ in range(self._dim)]] while queue: # Get latest distance curr = queue.pop() # The distance from any possible point should be less than or equal to the number of dimensions. # This can be shown using basic calculations. if self._metric(np.array(curr), np.zeros(shape=(len(curr),))) >= 2 * np.sqrt(self._dim) or \ np.any(np.abs(np.array(curr)) > self._extent / 2) or curr in distances: continue # Calculate all distances from child and add to queue queue.extend([list(np.array(curr) + np.array(diff[i])) for i in range(len(diff))]) # Add current distance to distances distances.append(curr) # Return all possible neighbor distances return np.array(distances, dtype=int)
point (np.ndarray): An array of size (number_of_dimensions,). Returns:
random_line_split
PoissonDiscSampleGenerator.py
"""Poisson Disc Sampling Generator This script uses a generator to create random points with a minimum distance of r from all other points within a defined space. This is often called Poisson Disc Sampling or blue noise. The script used is Bridson's Algorithm to generate the points. """ import numpy as np from Utilities import metric class PoissonDiscSampleGenerator(object): """Poisson Disc Sample Generator A generator used to create a poisson disc sample with given user parameters. """ def __init__(self, radius=5, k=30, extent=[100, 100], seed=None): """ PoissonDiscSampleGenerator Constructor Initializes the Poisson Disc Sample Generator. Arguments: radius (float): Radial distance for each random sample. (Default: 1) metric (function): Function used to calculate distance. (Default: distance.euclidean) k (int): Number of points used to generate a sample. extent (list<int>): The maximum length of each dimension. seed (int): The seed used when generating the random sample. (Optional) (Default: None) """ # -------------------------------- # User Parameters # -------------------------------- # Defines self._radius self.radius = radius # Defines self._k self.k = k # Defines both self._extent and self._dim self.extent = np.array(extent) # Defines self._seed self.seed = seed self._metric = metric.euclidean self._changes = self._create_neighbor_distances() # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def __call__(self): """Calls the generate function. Returns: (np.ndarray) """ return self.generate() def generate(self): """Generates the samples with the given user parameters. Returns: (np.ndarray) Returns an array of samples of shape (n_samples, dim) where dim is the length of the extent given. """ # -------------------------------- # Initializing Variables # -------------------------------- # Set NumPy Random Seed np.random.seed(self._seed) # Create Active List active = [] # Clear previously generated examples if len(self._samples) > 0: self._clear_previous_samples() # -------------------------------- # Begin Generating Samples # -------------------------------- # Create the first sample self._samples.append(np.random.uniform(low=np.zeros(shape=(self._dim,)), high=self._extent, size=(self._dim,))) active.append(self._samples[0]) self._grid[self._get_grid_coord(self._samples[0])] = 0 while active: # Choose Random Active Sample idx = np.random.choice(len(active)) # Make new point & confirm it is valid new_point = self._make_point(active[idx]) if new_point is None: active.pop(idx) else: # Add sample to listings and store in grid for neighboring locations. self._samples.append(new_point) active.append(new_point) self._grid[self._get_grid_coord(new_point)] = len(self._samples) - 1 # Return samples as numpy array self._samples = np.array(self._samples) return self._samples @property def radius(self): """The minimum distance between any two points. Returns: (float) """ return self._radius @property def extent(self): """The measurements of the plane where the sample will be produced (length, width, height, depth, etc.) Returns: (np.ndarray) """ return np.array(self._extent) @property def k(self): """The number of attempts each active point to make a new point. Returns: (int) """ return self._k @property def metric(self): """The distance function used to measure the distance between two points. Returns: (function) """ return self._metric @property def seed(self): """The random seed used for generating samples. Returns: (int) Returns None if no seed is set, otherwise a user-defined int. """ return self._seed @property def dimension_size(self): """The length of the extent and all of the generated sample point. Returns: (int) """ return self._dim @property def samples(self): """The samples generated by the generator. May not match specs if parameters were re-adjusted after use. Returns: (np.ndarray) """ if len(self._samples) == 0: return np.empty((0, 2)) return self._samples @radius.setter def radius(self, radius): """Setter for radius Args: radius (float): Minimum distance between two points. """ if radius <= 0: raise ValueError("Radius must be a number that is greater than 0.") self._radius = radius @extent.setter def extent(self, extent): """Setter for extent Args: extent (list<float>): The dimension lengths. """ if len(extent) < 2: raise ValueError("Extent must have a length of at least 2.") extent = np.array(extent, dtype=float) if np.any(extent <= 0): raise ValueError("All extents must be greater than 0.") self._extent = extent self._dim = len(extent) @k.setter def k(self, k): """Setter for k Args: k (int): Number of attempts for each active point to generate a new point. """ if k <= 0: raise ValueError("K must be greater than 0.") if int(k) != k: raise ValueError("K must be an integer.") self._k = k @seed.setter def seed(self, seed): if seed is not None: if type(seed) is not int: raise ValueError("Seed must be integer.") if seed < 0: raise ValueError("Seed must be non-negative.") self._seed = seed def _clear_previous_samples(self): """Clears grid and samples for generating new samples. """ del self._grid del self._samples # -------------------------------- # Grid Parameters # -------------------------------- self._cell_length = self._radius / np.sqrt(self._dim) self._grid_shape = np.array([int(np.ceil( self._extent[i] / self._cell_length)) for i in range(self._dim)], dtype=int) # Define Grid self._grid = np.empty(shape=self._grid_shape, dtype=int) self._grid.fill(-1) # -------------------------------- # Sample List # -------------------------------- self._samples = [] def _get_grid_coord(self, point): """Returns the grid coordinate of the point. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: The grid coordinate that the point is separated in. """ return tuple([int(point[i] / self._cell_length) for i in range(self._dim)]) def
(self, active_point): """ Attempts to make a random point in proximity of active_point. Attempts to make a random point around the active_point k times. If the new point is too close to another point, it will discard and try. If it fails k times, the function returns None. Args: active_point (np.ndarray): An array of size (number_of_dimensions,). Returns: (np.ndarray). Returns an array of size (number_of_dimensions,) if succeeds. Otherwise, returns None. """ # -------------------------------- # Create Random Parameters # -------------------------------- for _ in range(self._k): # Defines radial distance from active_point. rho = np.random.uniform(self._radius, 2 * self._radius) # Defines angle from active_point. Requires multiple angles for higher dimensional planes. theta = [np.random.uniform(0, 2 * np.pi) for _ in range(self._dim - 1)] # -------------------------------- # Create New Point # -------------------------------- # Create a 2D point using first theta angle. new_point = [active_point[0] + rho * np.cos(theta[0]), active_point[1] + rho * np.sin(theta[0])] # Generate more components of the coordinate for higher dimensional planes. new_point.extend([active_point[i] + rho * np.sin(theta[i-1]) for i in range(2, active_point.shape[0])]) new_point = np.array(new_point) # Confirm point is valid if self._valid_point(new_point): return new_point return None def _valid_point(self, point): """Confirms that a point is valid. If a point is too close to another point or is outside of bounds, it will fail. Otherwise, it will succeed. Args: point (np.ndarray): An array of size (number_of_dimensions,). Returns: (bool) If succeeds, returns True. Otherwise, returns False. """ # -------------------------------- # Check Bounds # -------------------------------- # Get grid point and confirm it is within range coord = self._get_grid_coord(point) if np.logical_or(np.any(point < 0), np.any(point >= self._extent)): return False # -------------------------------- # Check Distance of Neighbors # -------------------------------- for idx in self._get_neighbors(coord): # No points in grid cell if self._grid[idx] == -1: continue # Obtains point in grid cell and confirms its distance is less than the radius. near_point = self._samples[self._grid[idx]] if metric.euclidean(near_point, point) < self._radius: return False return True def _get_neighbors(self, coord): """Obtains neighboring cells that are surrounding a given cell coordinate. Args: coord (tuple): A coordinate whose length is equal to the number of dimensions. Returns: (list<tuple>). A list of neighboring cells. """ neighbors = [] for change in self._changes: neighbor_coord = np.array(coord) + change if np.logical_or(np.any(neighbor_coord < 0), np.any(neighbor_coord >= self._grid_shape)): continue neighbors.append(tuple(neighbor_coord)) return neighbors def _create_neighbor_distances(self): """Creates distance vectors for calculating all neighbors from a given point. A neighbor can be only so far away from the original point and is dependent on the number of dimensions. We calculate every possible coordinate on the grid that the neighbor can be part of relative to a point and store that information for later use. Returns: (np.ndarray<int>) The array is of shape (number_of_vectors, number_of_dimensions). """ # -------------------------------- # Create Directions from Point # -------------------------------- diff = [[0 for _ in range(self._dim)]] curr = diff[0][:] for i in range(self._dim): # Each diff is a unit vector, only having one value at +1 or -1 and all others at 0. curr[i] = 1 diff.append(curr[:]) curr[i] = -1 diff.append(curr[:]) curr[i] = 0 # Remove initial blank unit vector with all values at 0. diff.pop(0) del curr # -------------------------------- # Breadth First Search # -------------------------------- distances = [] queue = [[0 for _ in range(self._dim)]] while queue: # Get latest distance curr = queue.pop() # The distance from any possible point should be less than or equal to the number of dimensions. # This can be shown using basic calculations. if self._metric(np.array(curr), np.zeros(shape=(len(curr),))) >= 2 * np.sqrt(self._dim) or \ np.any(np.abs(np.array(curr)) > self._extent / 2) or curr in distances: continue # Calculate all distances from child and add to queue queue.extend([list(np.array(curr) + np.array(diff[i])) for i in range(len(diff))]) # Add current distance to distances distances.append(curr) # Return all possible neighbor distances return np.array(distances, dtype=int)
_make_point
identifier_name
Labupdown.py
import os from PIL import Image import torchvision.transforms as tvt import torch from sklearn import manifold import torchvision.utils as tvu import torch.nn.functional as F import cv2 import numpy as np from sklearn.cluster import SpectralClustering from sklearn.cluster import DBSCAN from sklearn.cluster import KMeans from reid import models from torch import nn from reid.utils.serialization import load_checkpoint, save_checkpoint from reid.utils.data import transforms as T from torch.autograd import Variable from reid.utils import to_torch import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" image_trans = tvt.Compose([ tvt.Resize((224, 224)), tvt.ToTensor(), tvt.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def getVec(allfeats=None): # features: NCWH allfeaturesChange = allfeats.permute(1, 0, 2, 3) allreshaped_features = allfeaturesChange.reshape(allfeaturesChange.size(0), allfeaturesChange.size(1) * allfeaturesChange.size( 2) * allfeaturesChange.size(3)) cov = torch.from_numpy(np.cov(allreshaped_features.cpu().detach())) cov = cov.type_as(allreshaped_features).cuda() eigval, eigvec = torch.eig(cov, eigenvectors=True) first_compo = eigvec[:, 0] return first_compo def getPmap(features, first_compo): featuresChange=features.permute(1,0,2,3) reshaped_features=featuresChange.reshape(featuresChange.size(0),featuresChange.size(1)*featuresChange.size(2)*featuresChange.size(3)) projected_map = torch.matmul(first_compo.cuda().unsqueeze(0), reshaped_features.cuda()).view(1, features.size(0), -1) \ .view(features.size(0), features.size(2), features.size(3)) maxv = projected_map.max() minv = projected_map.min() projected_map *= (maxv + minv) / torch.abs(maxv + minv) return projected_map def getcood(features,BIGfeat,u,d,ur,ul,dr,dm,dl,theh): s1=0 s2=14 s3=28 s4=8 s5=20 if u == 0: project_mapup = torch.zeros(features[:, :, s1:s2, :].size(0), features[:, :, s1:s2, :].size(2), features[:, :, s1:s2, :].size(3)).cuda() elif u == 1: # project_mapup = torch.clamp(pca(features[:, :, s1:s2, :], allfeats=BIGfeat[:, :, s1:s2, :], updown='up'), min=0) VecUp=getVec(allfeats=BIGfeat[:, :, s1:s2, :]) torch.save(VecUp,'./VecUp.pkl') VecUp=torch.load('./VecUp.pkl') project_mapup=torch.clamp(getPmap(features[:, :, s1:s2, :],VecUp), min=0) maxv = project_mapup.view(project_mapup.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup /= maxv if d == 0: project_mapdown = torch.zeros(features[:, :, s2:s3, :].size(0), features[:, :, s2:s3, :].size(2), features[:, :, s2:s3, :].size(3)).cuda() elif d == 1: # project_mapdown = torch.clamp(pca(features[:, :, s2:s3, :], allfeats=BIGfeat[:, :, s2:s3, :], updown='down'), # min=0) VecDown=getVec(allfeats=BIGfeat[:, :, s2:s3, :]) torch.save(VecDown, './VecDown.pkl') VecDown = torch.load('./VecDown.pkl') project_mapdown=torch.clamp(getPmap(features[:, :, s2:s3, :],VecDown), min=0) maxv = project_mapdown.view(project_mapdown.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown /= maxv if ur == 0: project_mapup1 = torch.zeros(features[:, :, s1:s2, s1:s2].size(0), features[:, :, s1:s2, s1:s2].size(2), features[:, :, s1:s2, s1:s2].size(3)).cuda() # clamp 作用 小于0的,置为0 elif ur == 1: project_mapup1 = torch.clamp( pca(features[:, :, s1:s2, s1:s2], allfeats=BIGfeat[:, :, s1:s2, s1:s2], updown='up'), min=0) maxv = project_mapup1.view(project_mapup1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup1 /= maxv if ul == 0: project_mapup2 = torch.zeros(features[:, :, s1:s2, s2:s3].size(0), features[:, :, s1:s2, s2:s3].size(2), features[:, :, s1:s2, s2:s3].size(3)).cuda() elif ul == 1: project_mapup2 = torch.clamp( pca(features[:, :, s1:s2, s2:s3], allfeats=BIGfeat[:, :, s1:s2, s2:s3], updown='up'), min=0) maxv = project_mapup2.view(project_mapup2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup2 /= maxv
project_mapdown1 = torch.zeros(features[:, :, s2:s3, s1:s4].size(0), features[:, :, s2:s3, s1:s4].size(2), features[:, :, s2:s3, s1:s4].size(3)).cuda() elif dr == 1: project_mapdown1 = torch.clamp( pca(features[:, :, s2:s3, s1:s4], allfeats=BIGfeat[:, :, s2:s3, s1:s4], updown='down'), min=0) maxv = project_mapdown1.view(project_mapdown1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown1 /= maxv if dm == 0: project_mapdown2 = torch.zeros(features[:, :, s2:s3, s4:s5].size(0), features[:, :, s2:s3, s4:s5].size(2), features[:, :, s2:s3, s4:s5].size(3)).cuda() elif dm == 1: project_mapdown2 = torch.clamp( pca(features[:, :, s2:s3, s4:s5], allfeats=BIGfeat[:, :, s2:s3, s4:s5], updown='down'), min=0) maxv = project_mapdown2.view(project_mapdown2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown2 /= maxv if dl == 0: project_mapdown3 = torch.zeros(features[:, :, s2:s3, s5:s3].size(0), features[:, :, s2:s3, s5:s3].size(2), features[:, :, s2:s3, s5:s3].size(3)).cuda() elif dl == 1: project_mapdown3 = torch.clamp( pca(features[:, :, s2:s3, s5:s3], allfeats=BIGfeat[:, :, s2:s3, s5:s3], updown='down'), min=0) maxv = project_mapdown3.view(project_mapdown3.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown3 /= maxv if u == 0 and d == 0: project_mapup = torch.cat((project_mapup1, project_mapup2), 2) project_mapdown = torch.cat((project_mapdown1, project_mapdown2, project_mapdown3), 2) project_map = torch.cat((project_mapup, project_mapdown), 1) # project_map=F.adaptive_avg_pool2d(project_map,(7,7)) coordinate = [] feat = [] for i in range(project_map.size(0)): Sproject_map = project_map[i] Singlefeature = features[i, :, :, :] meanmaskvalue = Sproject_map.sum() / (Sproject_map.size(0) * Sproject_map.size(1)) Sproject_map[Sproject_map < theh] = 0 allcood = torch.nonzero(Sproject_map).cpu().numpy() Sproject_map = Sproject_map.unsqueeze(0) # Smask = Sproject_map.repeat(Singlefeature.size(0), 1, 1) SOproject_map = Sproject_map.squeeze() SOproject_map = SOproject_map.reshape(SOproject_map.size(0) * SOproject_map.size(1)) Singlefeature = Singlefeature.squeeze() Singlefeature = Singlefeature.permute(1, 2, 0) Singlefeature = Singlefeature.reshape(Singlefeature.size(0) * Singlefeature.size(1), Singlefeature.size(2)) cnt = 0 ClusterFeat = torch.zeros(Singlefeature.size(1)).unsqueeze(0) for j in range(SOproject_map.size(0)): if SOproject_map[j] > theh and cnt == 0: ClusterFeat = F.normalize(Singlefeature[j].unsqueeze(0)) cnt = cnt + 1 elif SOproject_map[j] >theh and cnt != 0: SF = F.normalize(Singlefeature[j].unsqueeze(0)) ClusterFeat = torch.cat((ClusterFeat, SF), 0) cnt = cnt + 1 ClusterFeatmean = torch.mean(ClusterFeat, dim=0) ClusterFeatmean = ClusterFeatmean.unsqueeze(0) ClusterFeatmax = torch.max(ClusterFeat, dim=0) ClusterFeatmax = ClusterFeatmax[0].unsqueeze(0) ClusterFeatmax = F.normalize(ClusterFeatmax) ClusterFeatmean = F.normalize(ClusterFeatmean) SingleClusterFeat = torch.cat((ClusterFeatmax, ClusterFeatmean), 1) SingleClusterFeat = SingleClusterFeat.squeeze() # Singlefeature = Singlefeature.mul(Smask) # Singlefeature = Singlefeature.reshape(Singlefeature.size(0), Singlefeature.size(1) * Singlefeature.size(2)) Singlefeature = SingleClusterFeat.cpu().detach().numpy() # SPFeat = np.sum(Singlefeature, axis=1, keepdims=False) / (len(allcood)) # ccc=Singlefeature.sum()/(len(aa)) cood = np.mean(allcood, axis=0, keepdims=True) feat.append(Singlefeature) coordinate.append(cood) # Sproject_map[Sproject_map > meanmaskvalue * (0.5)] = 1 project_map[i] = Sproject_map.reshape(project_map.size(1), project_map.size(2)) project_map = F.interpolate(project_map.unsqueeze(1), size=(imgs.size(2), imgs.size(3)), mode='bilinear', align_corners=False) * 255. coordinate = np.array(coordinate) coordinate = np.squeeze(coordinate) feat = np.array(feat) # tsne2 = manifold.TSNE(n_components=3, init='pca', random_state=501) # X_tsne2 = tsne2.fit_transform(feat) # x_min, x_max = X_tsne2.min(0), X_tsne2.max(0) # feat = (X_tsne2 - x_min) / (x_max - x_min) return coordinate,feat,project_map # datamanager = torchreid.data.ImageDataManager( # root='reid-data', # sources='VehicleID', # height=224, # width=224, # batch_size=32, # ) datadir='E:\\work\\DLMB-PB\\DLMB-PB\\data' model = models.create('resnet50', num_features=1024, dropout=0.5, num_classes=13164) model = model.cuda() checkpoint = load_checkpoint('E:\\work\\DLMB-PB\\DLMB-PB\\checkpointres50.pth.tar') model.load_state_dict(checkpoint['state_dict']) data_list = os.listdir(datadir) imgs = [] for name in data_list: img = image_trans(Image.open(os.path.join(datadir+'\\'+name)).convert('RGB')) imgs.append(img.unsqueeze(0)) imgs = torch.cat(imgs) featurelist,_ = model(imgs.cuda()) #第15层的尺寸是56*56 第16层的尺寸是28*28 23层开始后是14*14 features=featurelist[6] pca = PCAProjectNet() BIGfeat=torch.load('./renet50vidlayer6.pkl') coordinate,feat1,_=getcood(BIGfeat[0:2000, :, :, :].cuda(),BIGfeat[0:2000, :, :, :],1,0,0,0,0,0,0,theh=0.4) Scoordinate,Sfeat1,project_map=getcood(features.cuda(),BIGfeat[0:2000, :, :, :],1,0,0,0,0,0,0,theh=0.4) torch.save(feat1,'./feat1.pkl') torch.save(Sfeat1,'./Sfeat1.pkl') feat1=torch.load('./feat1.pkl') Sfeat1=torch.load('./Sfeat1.pkl') coordinate,feat2,_=getcood(BIGfeat[0:2000, :, :, :].cuda(),BIGfeat[0:2000, :, :, :],0,1,0,0,0,0,0,theh=0.4) Scoordinate,Sfeat2,project_map=getcood(features.cuda(),BIGfeat[0:2000, :, :, :],0,1,0,0,0,0,0,theh=0.4) torch.save(feat2,'./feat2.pkl') torch.save(Sfeat2,'./Sfeat2.pkl') feat2=torch.load('./feat2.pkl') Sfeat2=torch.load('./Sfeat2.pkl') feat=np.concatenate((feat1,feat2),axis=1) Sfeat=np.concatenate((Sfeat1,Sfeat2),axis=1) # coordinate,feat,_=getcood(features.cuda(),features.cuda(),0,1,0,0,0,0,0,theh=0.2) # Scoordinate,Sfeat,project_map=getcood(features.cuda(),features.cuda(),0,1,0,0,0,0,0,theh=0.2) # # feat=np.concatenate((coordinate,feat),axis=1) # Sfeat=np.concatenate((Scoordinate,Sfeat),axis=1) # label = SpectralClustering(n_clusters=2).fit_predict(Sfeat) km_cluster = KMeans(n_clusters=2, max_iter=3000, n_init=2, \ init='k-means++', n_jobs=-1) km_cluster.fit(Sfeat) # km_cluster=torch.load('./kmclustersub.pkl') label = km_cluster.predict(feat) torch.save(km_cluster,'./kmcluster.pkl') save_imgs = [] for i, name in enumerate(data_list): # if i>0: # break original_image = cv2.resize(cv2.imread(os.path.join(datadir, name)), (224, 224)) mask = project_map[i].repeat(3, 1, 1).permute(1, 2, 0).detach().cpu().numpy() #mask=project_map[i].permute(1, 2, 0).detach().cpu().numpy() #img = cv2.cvtColor(np.asarray(mask), cv2.COLOR_RGB2BGR) orgimg = cv2.cvtColor(np.asarray(original_image), cv2.COLOR_RGB2BGR) # imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # ret, thresh = cv2.threshold(imgray, 125, 255, 0) # thresh = np.clip(thresh, 0, 255) # 归一化也行 # thresh = np.array(thresh, np.uint8) # contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # cnt=0 # for c in contours: # M = cv2.moments(c) # x, y, w, h = cv2.boundingRect(c) # #if w*h<100 or w>100 or h>100 or w*h>4000: # # continue # #scores = 1 # cnt=cnt+1 # cv2.rectangle(original_image,(x,y), (x+w,y+h), (153,153,0), 5) #region = original_image.crop((x, y, x + w, y + h)) mask = cv2.applyColorMap(mask.astype(np.uint8), cv2.COLORMAP_JET) save_img = cv2.addWeighted(orgimg, 0.5, mask, 0.5, 0.0) if label[i]==0: save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (0, 255, 0), 3) # elif label[i]==1: # save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (0, 0, 255), 3) # elif label[i]==2: # save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (255, 0, 0), 3) save_imgs.append(save_img) save_imgs = np.concatenate(save_imgs, 1) cv2.imwrite('./testddtvec.jpg', save_imgs)
if dr == 0:
random_line_split
Labupdown.py
import os from PIL import Image import torchvision.transforms as tvt import torch from sklearn import manifold import torchvision.utils as tvu import torch.nn.functional as F import cv2 import numpy as np from sklearn.cluster import SpectralClustering from sklearn.cluster import DBSCAN from sklearn.cluster import KMeans from reid import models from torch import nn from reid.utils.serialization import load_checkpoint, save_checkpoint from reid.utils.data import transforms as T from torch.autograd import Variable from reid.utils import to_torch import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" image_trans = tvt.Compose([ tvt.Resize((224, 224)), tvt.ToTensor(), tvt.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def getVec(allfeats=None): # features: NCWH
def getPmap(features, first_compo): featuresChange=features.permute(1,0,2,3) reshaped_features=featuresChange.reshape(featuresChange.size(0),featuresChange.size(1)*featuresChange.size(2)*featuresChange.size(3)) projected_map = torch.matmul(first_compo.cuda().unsqueeze(0), reshaped_features.cuda()).view(1, features.size(0), -1) \ .view(features.size(0), features.size(2), features.size(3)) maxv = projected_map.max() minv = projected_map.min() projected_map *= (maxv + minv) / torch.abs(maxv + minv) return projected_map def getcood(features,BIGfeat,u,d,ur,ul,dr,dm,dl,theh): s1=0 s2=14 s3=28 s4=8 s5=20 if u == 0: project_mapup = torch.zeros(features[:, :, s1:s2, :].size(0), features[:, :, s1:s2, :].size(2), features[:, :, s1:s2, :].size(3)).cuda() elif u == 1: # project_mapup = torch.clamp(pca(features[:, :, s1:s2, :], allfeats=BIGfeat[:, :, s1:s2, :], updown='up'), min=0) VecUp=getVec(allfeats=BIGfeat[:, :, s1:s2, :]) torch.save(VecUp,'./VecUp.pkl') VecUp=torch.load('./VecUp.pkl') project_mapup=torch.clamp(getPmap(features[:, :, s1:s2, :],VecUp), min=0) maxv = project_mapup.view(project_mapup.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup /= maxv if d == 0: project_mapdown = torch.zeros(features[:, :, s2:s3, :].size(0), features[:, :, s2:s3, :].size(2), features[:, :, s2:s3, :].size(3)).cuda() elif d == 1: # project_mapdown = torch.clamp(pca(features[:, :, s2:s3, :], allfeats=BIGfeat[:, :, s2:s3, :], updown='down'), # min=0) VecDown=getVec(allfeats=BIGfeat[:, :, s2:s3, :]) torch.save(VecDown, './VecDown.pkl') VecDown = torch.load('./VecDown.pkl') project_mapdown=torch.clamp(getPmap(features[:, :, s2:s3, :],VecDown), min=0) maxv = project_mapdown.view(project_mapdown.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown /= maxv if ur == 0: project_mapup1 = torch.zeros(features[:, :, s1:s2, s1:s2].size(0), features[:, :, s1:s2, s1:s2].size(2), features[:, :, s1:s2, s1:s2].size(3)).cuda() # clamp 作用 小于0的,置为0 elif ur == 1: project_mapup1 = torch.clamp( pca(features[:, :, s1:s2, s1:s2], allfeats=BIGfeat[:, :, s1:s2, s1:s2], updown='up'), min=0) maxv = project_mapup1.view(project_mapup1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup1 /= maxv if ul == 0: project_mapup2 = torch.zeros(features[:, :, s1:s2, s2:s3].size(0), features[:, :, s1:s2, s2:s3].size(2), features[:, :, s1:s2, s2:s3].size(3)).cuda() elif ul == 1: project_mapup2 = torch.clamp( pca(features[:, :, s1:s2, s2:s3], allfeats=BIGfeat[:, :, s1:s2, s2:s3], updown='up'), min=0) maxv = project_mapup2.view(project_mapup2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup2 /= maxv if dr == 0: project_mapdown1 = torch.zeros(features[:, :, s2:s3, s1:s4].size(0), features[:, :, s2:s3, s1:s4].size(2), features[:, :, s2:s3, s1:s4].size(3)).cuda() elif dr == 1: project_mapdown1 = torch.clamp( pca(features[:, :, s2:s3, s1:s4], allfeats=BIGfeat[:, :, s2:s3, s1:s4], updown='down'), min=0) maxv = project_mapdown1.view(project_mapdown1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown1 /= maxv if dm == 0: project_mapdown2 = torch.zeros(features[:, :, s2:s3, s4:s5].size(0), features[:, :, s2:s3, s4:s5].size(2), features[:, :, s2:s3, s4:s5].size(3)).cuda() elif dm == 1: project_mapdown2 = torch.clamp( pca(features[:, :, s2:s3, s4:s5], allfeats=BIGfeat[:, :, s2:s3, s4:s5], updown='down'), min=0) maxv = project_mapdown2.view(project_mapdown2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown2 /= maxv if dl == 0: project_mapdown3 = torch.zeros(features[:, :, s2:s3, s5:s3].size(0), features[:, :, s2:s3, s5:s3].size(2), features[:, :, s2:s3, s5:s3].size(3)).cuda() elif dl == 1: project_mapdown3 = torch.clamp( pca(features[:, :, s2:s3, s5:s3], allfeats=BIGfeat[:, :, s2:s3, s5:s3], updown='down'), min=0) maxv = project_mapdown3.view(project_mapdown3.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown3 /= maxv if u == 0 and d == 0: project_mapup = torch.cat((project_mapup1, project_mapup2), 2) project_mapdown = torch.cat((project_mapdown1, project_mapdown2, project_mapdown3), 2) project_map = torch.cat((project_mapup, project_mapdown), 1) # project_map=F.adaptive_avg_pool2d(project_map,(7,7)) coordinate = [] feat = [] for i in range(project_map.size(0)): Sproject_map = project_map[i] Singlefeature = features[i, :, :, :] meanmaskvalue = Sproject_map.sum() / (Sproject_map.size(0) * Sproject_map.size(1)) Sproject_map[Sproject_map < theh] = 0 allcood = torch.nonzero(Sproject_map).cpu().numpy() Sproject_map = Sproject_map.unsqueeze(0) # Smask = Sproject_map.repeat(Singlefeature.size(0), 1, 1) SOproject_map = Sproject_map.squeeze() SOproject_map = SOproject_map.reshape(SOproject_map.size(0) * SOproject_map.size(1)) Singlefeature = Singlefeature.squeeze() Singlefeature = Singlefeature.permute(1, 2, 0) Singlefeature = Singlefeature.reshape(Singlefeature.size(0) * Singlefeature.size(1), Singlefeature.size(2)) cnt = 0 ClusterFeat = torch.zeros(Singlefeature.size(1)).unsqueeze(0) for j in range(SOproject_map.size(0)): if SOproject_map[j] > theh and cnt == 0: ClusterFeat = F.normalize(Singlefeature[j].unsqueeze(0)) cnt = cnt + 1 elif SOproject_map[j] >theh and cnt != 0: SF = F.normalize(Singlefeature[j].unsqueeze(0)) ClusterFeat = torch.cat((ClusterFeat, SF), 0) cnt = cnt + 1 ClusterFeatmean = torch.mean(ClusterFeat, dim=0) ClusterFeatmean = ClusterFeatmean.unsqueeze(0) ClusterFeatmax = torch.max(ClusterFeat, dim=0) ClusterFeatmax = ClusterFeatmax[0].unsqueeze(0) ClusterFeatmax = F.normalize(ClusterFeatmax) ClusterFeatmean = F.normalize(ClusterFeatmean) SingleClusterFeat = torch.cat((ClusterFeatmax, ClusterFeatmean), 1) SingleClusterFeat = SingleClusterFeat.squeeze() # Singlefeature = Singlefeature.mul(Smask) # Singlefeature = Singlefeature.reshape(Singlefeature.size(0), Singlefeature.size(1) * Singlefeature.size(2)) Singlefeature = SingleClusterFeat.cpu().detach().numpy() # SPFeat = np.sum(Singlefeature, axis=1, keepdims=False) / (len(allcood)) # ccc=Singlefeature.sum()/(len(aa)) cood = np.mean(allcood, axis=0, keepdims=True) feat.append(Singlefeature) coordinate.append(cood) # Sproject_map[Sproject_map > meanmaskvalue * (0.5)] = 1 project_map[i] = Sproject_map.reshape(project_map.size(1), project_map.size(2)) project_map = F.interpolate(project_map.unsqueeze(1), size=(imgs.size(2), imgs.size(3)), mode='bilinear', align_corners=False) * 255. coordinate = np.array(coordinate) coordinate = np.squeeze(coordinate) feat = np.array(feat) # tsne2 = manifold.TSNE(n_components=3, init='pca', random_state=501) # X_tsne2 = tsne2.fit_transform(feat) # x_min, x_max = X_tsne2.min(0), X_tsne2.max(0) # feat = (X_tsne2 - x_min) / (x_max - x_min) return coordinate,feat,project_map # datamanager = torchreid.data.ImageDataManager( # root='reid-data', # sources='VehicleID', # height=224, # width=224, # batch_size=32, # ) datadir='E:\\work\\DLMB-PB\\DLMB-PB\\data' model = models.create('resnet50', num_features=1024, dropout=0.5, num_classes=13164) model = model.cuda() checkpoint = load_checkpoint('E:\\work\\DLMB-PB\\DLMB-PB\\checkpointres50.pth.tar') model.load_state_dict(checkpoint['state_dict']) data_list = os.listdir(datadir) imgs = [] for name in data_list: img = image_trans(Image.open(os.path.join(datadir+'\\'+name)).convert('RGB')) imgs.append(img.unsqueeze(0)) imgs = torch.cat(imgs) featurelist,_ = model(imgs.cuda()) #第15层的尺寸是56*56 第16层的尺寸是28*28 23层开始后是14*14 features=featurelist[6] pca = PCAProjectNet() BIGfeat=torch.load('./renet50vidlayer6.pkl') coordinate,feat1,_=getcood(BIGfeat[0:2000, :, :, :].cuda(),BIGfeat[0:2000, :, :, :],1,0,0,0,0,0,0,theh=0.4) Scoordinate,Sfeat1,project_map=getcood(features.cuda(),BIGfeat[0:2000, :, :, :],1,0,0,0,0,0,0,theh=0.4) torch.save(feat1,'./feat1.pkl') torch.save(Sfeat1,'./Sfeat1.pkl') feat1=torch.load('./feat1.pkl') Sfeat1=torch.load('./Sfeat1.pkl') coordinate,feat2,_=getcood(BIGfeat[0:2000, :, :, :].cuda(),BIGfeat[0:2000, :, :, :],0,1,0,0,0,0,0,theh=0.4) Scoordinate,Sfeat2,project_map=getcood(features.cuda(),BIGfeat[0:2000, :, :, :],0,1,0,0,0,0,0,theh=0.4) torch.save(feat2,'./feat2.pkl') torch.save(Sfeat2,'./Sfeat2.pkl') feat2=torch.load('./feat2.pkl') Sfeat2=torch.load('./Sfeat2.pkl') feat=np.concatenate((feat1,feat2),axis=1) Sfeat=np.concatenate((Sfeat1,Sfeat2),axis=1) # coordinate,feat,_=getcood(features.cuda(),features.cuda(),0,1,0,0,0,0,0,theh=0.2) # Scoordinate,Sfeat,project_map=getcood(features.cuda(),features.cuda(),0,1,0,0,0,0,0,theh=0.2) # # feat=np.concatenate((coordinate,feat),axis=1) # Sfeat=np.concatenate((Scoordinate,Sfeat),axis=1) # label = SpectralClustering(n_clusters=2).fit_predict(Sfeat) km_cluster = KMeans(n_clusters=2, max_iter=3000, n_init=2, \ init='k-means++', n_jobs=-1) km_cluster.fit(Sfeat) # km_cluster=torch.load('./kmclustersub.pkl') label = km_cluster.predict(feat) torch.save(km_cluster,'./kmcluster.pkl') save_imgs = [] for i, name in enumerate(data_list): # if i>0: # break original_image = cv2.resize(cv2.imread(os.path.join(datadir, name)), (224, 224)) mask = project_map[i].repeat(3, 1, 1).permute(1, 2, 0).detach().cpu().numpy() #mask=project_map[i].permute(1, 2, 0).detach().cpu().numpy() #img = cv2.cvtColor(np.asarray(mask), cv2.COLOR_RGB2BGR) orgimg = cv2.cvtColor(np.asarray(original_image), cv2.COLOR_RGB2BGR) # imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # ret, thresh = cv2.threshold(imgray, 125, 255, 0) # thresh = np.clip(thresh, 0, 255) # 归一化也行 # thresh = np.array(thresh, np.uint8) # contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # cnt=0 # for c in contours: # M = cv2.moments(c) # x, y, w, h = cv2.boundingRect(c) # #if w*h<100 or w>100 or h>100 or w*h>4000: # # continue # #scores = 1 # cnt=cnt+1 # cv2.rectangle(original_image,(x,y), (x+w,y+h), (153,153,0), 5) #region = original_image.crop((x, y, x + w, y + h)) mask = cv2.applyColorMap(mask.astype(np.uint8), cv2.COLORMAP_JET) save_img = cv2.addWeighted(orgimg, 0.5, mask, 0.5, 0.0) if label[i]==0: save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (0, 255, 0), 3) # elif label[i]==1: # save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (0, 0, 255), 3) # elif label[i]==2: # save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (255, 0, 0), 3) save_imgs.append(save_img) save_imgs = np.concatenate(save_imgs, 1) cv2.imwrite('./testddtvec.jpg', save_imgs)
allfeaturesChange = allfeats.permute(1, 0, 2, 3) allreshaped_features = allfeaturesChange.reshape(allfeaturesChange.size(0), allfeaturesChange.size(1) * allfeaturesChange.size( 2) * allfeaturesChange.size(3)) cov = torch.from_numpy(np.cov(allreshaped_features.cpu().detach())) cov = cov.type_as(allreshaped_features).cuda() eigval, eigvec = torch.eig(cov, eigenvectors=True) first_compo = eigvec[:, 0] return first_compo
identifier_body
Labupdown.py
import os from PIL import Image import torchvision.transforms as tvt import torch from sklearn import manifold import torchvision.utils as tvu import torch.nn.functional as F import cv2 import numpy as np from sklearn.cluster import SpectralClustering from sklearn.cluster import DBSCAN from sklearn.cluster import KMeans from reid import models from torch import nn from reid.utils.serialization import load_checkpoint, save_checkpoint from reid.utils.data import transforms as T from torch.autograd import Variable from reid.utils import to_torch import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" image_trans = tvt.Compose([ tvt.Resize((224, 224)), tvt.ToTensor(), tvt.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def getVec(allfeats=None): # features: NCWH allfeaturesChange = allfeats.permute(1, 0, 2, 3) allreshaped_features = allfeaturesChange.reshape(allfeaturesChange.size(0), allfeaturesChange.size(1) * allfeaturesChange.size( 2) * allfeaturesChange.size(3)) cov = torch.from_numpy(np.cov(allreshaped_features.cpu().detach())) cov = cov.type_as(allreshaped_features).cuda() eigval, eigvec = torch.eig(cov, eigenvectors=True) first_compo = eigvec[:, 0] return first_compo def getPmap(features, first_compo): featuresChange=features.permute(1,0,2,3) reshaped_features=featuresChange.reshape(featuresChange.size(0),featuresChange.size(1)*featuresChange.size(2)*featuresChange.size(3)) projected_map = torch.matmul(first_compo.cuda().unsqueeze(0), reshaped_features.cuda()).view(1, features.size(0), -1) \ .view(features.size(0), features.size(2), features.size(3)) maxv = projected_map.max() minv = projected_map.min() projected_map *= (maxv + minv) / torch.abs(maxv + minv) return projected_map def getcood(features,BIGfeat,u,d,ur,ul,dr,dm,dl,theh): s1=0 s2=14 s3=28 s4=8 s5=20 if u == 0: project_mapup = torch.zeros(features[:, :, s1:s2, :].size(0), features[:, :, s1:s2, :].size(2), features[:, :, s1:s2, :].size(3)).cuda() elif u == 1: # project_mapup = torch.clamp(pca(features[:, :, s1:s2, :], allfeats=BIGfeat[:, :, s1:s2, :], updown='up'), min=0) VecUp=getVec(allfeats=BIGfeat[:, :, s1:s2, :]) torch.save(VecUp,'./VecUp.pkl') VecUp=torch.load('./VecUp.pkl') project_mapup=torch.clamp(getPmap(features[:, :, s1:s2, :],VecUp), min=0) maxv = project_mapup.view(project_mapup.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup /= maxv if d == 0: project_mapdown = torch.zeros(features[:, :, s2:s3, :].size(0), features[:, :, s2:s3, :].size(2), features[:, :, s2:s3, :].size(3)).cuda() elif d == 1: # project_mapdown = torch.clamp(pca(features[:, :, s2:s3, :], allfeats=BIGfeat[:, :, s2:s3, :], updown='down'), # min=0) VecDown=getVec(allfeats=BIGfeat[:, :, s2:s3, :]) torch.save(VecDown, './VecDown.pkl') VecDown = torch.load('./VecDown.pkl') project_mapdown=torch.clamp(getPmap(features[:, :, s2:s3, :],VecDown), min=0) maxv = project_mapdown.view(project_mapdown.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown /= maxv if ur == 0: project_mapup1 = torch.zeros(features[:, :, s1:s2, s1:s2].size(0), features[:, :, s1:s2, s1:s2].size(2), features[:, :, s1:s2, s1:s2].size(3)).cuda() # clamp 作用 小于0的,置为0 elif ur == 1: project_mapup1 = torch.clamp( pca(features[:, :, s1:s2, s1:s2], allfeats=BIGfeat[:, :, s1:s2, s1:s2], updown='up'), min=0) maxv = project_mapup1.view(project_mapup1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup1 /= maxv if ul == 0: project_mapup2 = torch.zeros(features[:, :, s1:s2, s2:s3].size(0), features[:, :, s1:s2, s2:s3].size(2), features[:, :, s1:s2, s2:s3].size(3)).cuda() elif ul == 1: project_mapup2 = torch.clamp( pca(features[:, :, s1:s2, s2:s3], allfeats=BIGfeat[:, :, s1:s2, s2:s3], updown='up'), min=0) maxv = project_mapup2.view(project_mapup2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup2 /= maxv if dr == 0: project_mapdown1 = torch.zeros(features[:, :, s2:s3, s1:s4].size(0), features[:, :, s2:s3, s1:s4].size(2), features[:, :, s2:s3, s1:s4].size(3)).cuda() elif dr == 1: project_mapdown1 = torch.clamp( pca(features[:, :, s2:s3, s1:s4], allfeats=BIGfeat[:, :, s2:s3, s1:s4], updown='down'), min=0) maxv = project_mapdown1.view(project_mapdown1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown1 /= maxv if dm == 0: project_mapdown2 = torch.zeros(features[:, :, s2:s3, s4:s5].size(0), features[:, :, s2:s3, s4:s5].size(2), features[:, :, s2:s3, s4:s5].size(3)).cuda() elif dm == 1: project_mapdown2 = torch.clamp( pca(features[:, :, s2:s3, s4:s5], allfeats=BIGfeat[:, :, s2:s3, s4:s5], updown='down'), min=0) maxv = project_mapdown2.view(project_mapdown2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown2 /= maxv if dl == 0: project_mapdown3 = torch.zeros(features[:, :, s2:s3, s5:s3].size(0), features[:, :, s2:s3, s5:s3].size(2), features[:, :, s2:s3, s5:s3].size(3)).cuda() elif dl == 1: project_mapdown3 = torch.clamp( pca(features[:, :, s2:s3, s5:s3], allfeats=BIGfeat[:, :, s2:s3, s5:s3], updown='down'), min=0) maxv = project_mapdown3.view(project_mapdown3.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown3 /= maxv if u == 0 and d == 0: project_mapup = torch.cat((project_mapup1, project_mapup2), 2) project_mapdown = torch.cat((project_mapdown1, project_mapdown2, project_mapdown3), 2) project_map = torch.cat((project_mapup, project_mapdown), 1) # project_map=F.adaptive_avg_pool2d(project_map,(7,7)) coordinate = [] feat = [] for i in range(project_map.size(0)): Sproject_map = p
p = F.interpolate(project_map.unsqueeze(1), size=(imgs.size(2), imgs.size(3)), mode='bilinear', align_corners=False) * 255. coordinate = np.array(coordinate) coordinate = np.squeeze(coordinate) feat = np.array(feat) # tsne2 = manifold.TSNE(n_components=3, init='pca', random_state=501) # X_tsne2 = tsne2.fit_transform(feat) # x_min, x_max = X_tsne2.min(0), X_tsne2.max(0) # feat = (X_tsne2 - x_min) / (x_max - x_min) return coordinate,feat,project_map # datamanager = torchreid.data.ImageDataManager( # root='reid-data', # sources='VehicleID', # height=224, # width=224, # batch_size=32, # ) datadir='E:\\work\\DLMB-PB\\DLMB-PB\\data' model = models.create('resnet50', num_features=1024, dropout=0.5, num_classes=13164) model = model.cuda() checkpoint = load_checkpoint('E:\\work\\DLMB-PB\\DLMB-PB\\checkpointres50.pth.tar') model.load_state_dict(checkpoint['state_dict']) data_list = os.listdir(datadir) imgs = [] for name in data_list: img = image_trans(Image.open(os.path.join(datadir+'\\'+name)).convert('RGB')) imgs.append(img.unsqueeze(0)) imgs = torch.cat(imgs) featurelist,_ = model(imgs.cuda()) #第15层的尺寸是56*56 第16层的尺寸是28*28 23层开始后是14*14 features=featurelist[6] pca = PCAProjectNet() BIGfeat=torch.load('./renet50vidlayer6.pkl') coordinate,feat1,_=getcood(BIGfeat[0:2000, :, :, :].cuda(),BIGfeat[0:2000, :, :, :],1,0,0,0,0,0,0,theh=0.4) Scoordinate,Sfeat1,project_map=getcood(features.cuda(),BIGfeat[0:2000, :, :, :],1,0,0,0,0,0,0,theh=0.4) torch.save(feat1,'./feat1.pkl') torch.save(Sfeat1,'./Sfeat1.pkl') feat1=torch.load('./feat1.pkl') Sfeat1=torch.load('./Sfeat1.pkl') coordinate,feat2,_=getcood(BIGfeat[0:2000, :, :, :].cuda(),BIGfeat[0:2000, :, :, :],0,1,0,0,0,0,0,theh=0.4) Scoordinate,Sfeat2,project_map=getcood(features.cuda(),BIGfeat[0:2000, :, :, :],0,1,0,0,0,0,0,theh=0.4) torch.save(feat2,'./feat2.pkl') torch.save(Sfeat2,'./Sfeat2.pkl') feat2=torch.load('./feat2.pkl') Sfeat2=torch.load('./Sfeat2.pkl') feat=np.concatenate((feat1,feat2),axis=1) Sfeat=np.concatenate((Sfeat1,Sfeat2),axis=1) # coordinate,feat,_=getcood(features.cuda(),features.cuda(),0,1,0,0,0,0,0,theh=0.2) # Scoordinate,Sfeat,project_map=getcood(features.cuda(),features.cuda(),0,1,0,0,0,0,0,theh=0.2) # # feat=np.concatenate((coordinate,feat),axis=1) # Sfeat=np.concatenate((Scoordinate,Sfeat),axis=1) # label = SpectralClustering(n_clusters=2).fit_predict(Sfeat) km_cluster = KMeans(n_clusters=2, max_iter=3000, n_init=2, \ init='k-means++', n_jobs=-1) km_cluster.fit(Sfeat) # km_cluster=torch.load('./kmclustersub.pkl') label = km_cluster.predict(feat) torch.save(km_cluster,'./kmcluster.pkl') save_imgs = [] for i, name in enumerate(data_list): # if i>0: # break original_image = cv2.resize(cv2.imread(os.path.join(datadir, name)), (224, 224)) mask = project_map[i].repeat(3, 1, 1).permute(1, 2, 0).detach().cpu().numpy() #mask=project_map[i].permute(1, 2, 0).detach().cpu().numpy() #img = cv2.cvtColor(np.asarray(mask), cv2.COLOR_RGB2BGR) orgimg = cv2.cvtColor(np.asarray(original_image), cv2.COLOR_RGB2BGR) # imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # ret, thresh = cv2.threshold(imgray, 125, 255, 0) # thresh = np.clip(thresh, 0, 255) # 归一化也行 # thresh = np.array(thresh, np.uint8) # contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # cnt=0 # for c in contours: # M = cv2.moments(c) # x, y, w, h = cv2.boundingRect(c) # #if w*h<100 or w>100 or h>100 or w*h>4000: # # continue # #scores = 1 # cnt=cnt+1 # cv2.rectangle(original_image,(x,y), (x+w,y+h), (153,153,0), 5) #region = original_image.crop((x, y, x + w, y + h)) mask = cv2.applyColorMap(mask.astype(np.uint8), cv2.COLORMAP_JET) save_img = cv2.addWeighted(orgimg, 0.5, mask, 0.5, 0.0) if label[i]==0: save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (0, 255, 0), 3) # elif label[i]==1: # save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (0, 0, 255), 3) # elif label[i]==2: # save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (255, 0, 0), 3) save_imgs.append(save_img) save_imgs = np.concatenate(save_imgs, 1) cv2.imwrite('./testddtvec.jpg', save_imgs)
roject_map[i] Singlefeature = features[i, :, :, :] meanmaskvalue = Sproject_map.sum() / (Sproject_map.size(0) * Sproject_map.size(1)) Sproject_map[Sproject_map < theh] = 0 allcood = torch.nonzero(Sproject_map).cpu().numpy() Sproject_map = Sproject_map.unsqueeze(0) # Smask = Sproject_map.repeat(Singlefeature.size(0), 1, 1) SOproject_map = Sproject_map.squeeze() SOproject_map = SOproject_map.reshape(SOproject_map.size(0) * SOproject_map.size(1)) Singlefeature = Singlefeature.squeeze() Singlefeature = Singlefeature.permute(1, 2, 0) Singlefeature = Singlefeature.reshape(Singlefeature.size(0) * Singlefeature.size(1), Singlefeature.size(2)) cnt = 0 ClusterFeat = torch.zeros(Singlefeature.size(1)).unsqueeze(0) for j in range(SOproject_map.size(0)): if SOproject_map[j] > theh and cnt == 0: ClusterFeat = F.normalize(Singlefeature[j].unsqueeze(0)) cnt = cnt + 1 elif SOproject_map[j] >theh and cnt != 0: SF = F.normalize(Singlefeature[j].unsqueeze(0)) ClusterFeat = torch.cat((ClusterFeat, SF), 0) cnt = cnt + 1 ClusterFeatmean = torch.mean(ClusterFeat, dim=0) ClusterFeatmean = ClusterFeatmean.unsqueeze(0) ClusterFeatmax = torch.max(ClusterFeat, dim=0) ClusterFeatmax = ClusterFeatmax[0].unsqueeze(0) ClusterFeatmax = F.normalize(ClusterFeatmax) ClusterFeatmean = F.normalize(ClusterFeatmean) SingleClusterFeat = torch.cat((ClusterFeatmax, ClusterFeatmean), 1) SingleClusterFeat = SingleClusterFeat.squeeze() # Singlefeature = Singlefeature.mul(Smask) # Singlefeature = Singlefeature.reshape(Singlefeature.size(0), Singlefeature.size(1) * Singlefeature.size(2)) Singlefeature = SingleClusterFeat.cpu().detach().numpy() # SPFeat = np.sum(Singlefeature, axis=1, keepdims=False) / (len(allcood)) # ccc=Singlefeature.sum()/(len(aa)) cood = np.mean(allcood, axis=0, keepdims=True) feat.append(Singlefeature) coordinate.append(cood) # Sproject_map[Sproject_map > meanmaskvalue * (0.5)] = 1 project_map[i] = Sproject_map.reshape(project_map.size(1), project_map.size(2)) project_ma
conditional_block
Labupdown.py
import os from PIL import Image import torchvision.transforms as tvt import torch from sklearn import manifold import torchvision.utils as tvu import torch.nn.functional as F import cv2 import numpy as np from sklearn.cluster import SpectralClustering from sklearn.cluster import DBSCAN from sklearn.cluster import KMeans from reid import models from torch import nn from reid.utils.serialization import load_checkpoint, save_checkpoint from reid.utils.data import transforms as T from torch.autograd import Variable from reid.utils import to_torch import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" image_trans = tvt.Compose([ tvt.Resize((224, 224)), tvt.ToTensor(), tvt.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def
(allfeats=None): # features: NCWH allfeaturesChange = allfeats.permute(1, 0, 2, 3) allreshaped_features = allfeaturesChange.reshape(allfeaturesChange.size(0), allfeaturesChange.size(1) * allfeaturesChange.size( 2) * allfeaturesChange.size(3)) cov = torch.from_numpy(np.cov(allreshaped_features.cpu().detach())) cov = cov.type_as(allreshaped_features).cuda() eigval, eigvec = torch.eig(cov, eigenvectors=True) first_compo = eigvec[:, 0] return first_compo def getPmap(features, first_compo): featuresChange=features.permute(1,0,2,3) reshaped_features=featuresChange.reshape(featuresChange.size(0),featuresChange.size(1)*featuresChange.size(2)*featuresChange.size(3)) projected_map = torch.matmul(first_compo.cuda().unsqueeze(0), reshaped_features.cuda()).view(1, features.size(0), -1) \ .view(features.size(0), features.size(2), features.size(3)) maxv = projected_map.max() minv = projected_map.min() projected_map *= (maxv + minv) / torch.abs(maxv + minv) return projected_map def getcood(features,BIGfeat,u,d,ur,ul,dr,dm,dl,theh): s1=0 s2=14 s3=28 s4=8 s5=20 if u == 0: project_mapup = torch.zeros(features[:, :, s1:s2, :].size(0), features[:, :, s1:s2, :].size(2), features[:, :, s1:s2, :].size(3)).cuda() elif u == 1: # project_mapup = torch.clamp(pca(features[:, :, s1:s2, :], allfeats=BIGfeat[:, :, s1:s2, :], updown='up'), min=0) VecUp=getVec(allfeats=BIGfeat[:, :, s1:s2, :]) torch.save(VecUp,'./VecUp.pkl') VecUp=torch.load('./VecUp.pkl') project_mapup=torch.clamp(getPmap(features[:, :, s1:s2, :],VecUp), min=0) maxv = project_mapup.view(project_mapup.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup /= maxv if d == 0: project_mapdown = torch.zeros(features[:, :, s2:s3, :].size(0), features[:, :, s2:s3, :].size(2), features[:, :, s2:s3, :].size(3)).cuda() elif d == 1: # project_mapdown = torch.clamp(pca(features[:, :, s2:s3, :], allfeats=BIGfeat[:, :, s2:s3, :], updown='down'), # min=0) VecDown=getVec(allfeats=BIGfeat[:, :, s2:s3, :]) torch.save(VecDown, './VecDown.pkl') VecDown = torch.load('./VecDown.pkl') project_mapdown=torch.clamp(getPmap(features[:, :, s2:s3, :],VecDown), min=0) maxv = project_mapdown.view(project_mapdown.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown /= maxv if ur == 0: project_mapup1 = torch.zeros(features[:, :, s1:s2, s1:s2].size(0), features[:, :, s1:s2, s1:s2].size(2), features[:, :, s1:s2, s1:s2].size(3)).cuda() # clamp 作用 小于0的,置为0 elif ur == 1: project_mapup1 = torch.clamp( pca(features[:, :, s1:s2, s1:s2], allfeats=BIGfeat[:, :, s1:s2, s1:s2], updown='up'), min=0) maxv = project_mapup1.view(project_mapup1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup1 /= maxv if ul == 0: project_mapup2 = torch.zeros(features[:, :, s1:s2, s2:s3].size(0), features[:, :, s1:s2, s2:s3].size(2), features[:, :, s1:s2, s2:s3].size(3)).cuda() elif ul == 1: project_mapup2 = torch.clamp( pca(features[:, :, s1:s2, s2:s3], allfeats=BIGfeat[:, :, s1:s2, s2:s3], updown='up'), min=0) maxv = project_mapup2.view(project_mapup2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapup2 /= maxv if dr == 0: project_mapdown1 = torch.zeros(features[:, :, s2:s3, s1:s4].size(0), features[:, :, s2:s3, s1:s4].size(2), features[:, :, s2:s3, s1:s4].size(3)).cuda() elif dr == 1: project_mapdown1 = torch.clamp( pca(features[:, :, s2:s3, s1:s4], allfeats=BIGfeat[:, :, s2:s3, s1:s4], updown='down'), min=0) maxv = project_mapdown1.view(project_mapdown1.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown1 /= maxv if dm == 0: project_mapdown2 = torch.zeros(features[:, :, s2:s3, s4:s5].size(0), features[:, :, s2:s3, s4:s5].size(2), features[:, :, s2:s3, s4:s5].size(3)).cuda() elif dm == 1: project_mapdown2 = torch.clamp( pca(features[:, :, s2:s3, s4:s5], allfeats=BIGfeat[:, :, s2:s3, s4:s5], updown='down'), min=0) maxv = project_mapdown2.view(project_mapdown2.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown2 /= maxv if dl == 0: project_mapdown3 = torch.zeros(features[:, :, s2:s3, s5:s3].size(0), features[:, :, s2:s3, s5:s3].size(2), features[:, :, s2:s3, s5:s3].size(3)).cuda() elif dl == 1: project_mapdown3 = torch.clamp( pca(features[:, :, s2:s3, s5:s3], allfeats=BIGfeat[:, :, s2:s3, s5:s3], updown='down'), min=0) maxv = project_mapdown3.view(project_mapdown3.size(0), -1).max(dim=1)[0].unsqueeze(1).unsqueeze(1) project_mapdown3 /= maxv if u == 0 and d == 0: project_mapup = torch.cat((project_mapup1, project_mapup2), 2) project_mapdown = torch.cat((project_mapdown1, project_mapdown2, project_mapdown3), 2) project_map = torch.cat((project_mapup, project_mapdown), 1) # project_map=F.adaptive_avg_pool2d(project_map,(7,7)) coordinate = [] feat = [] for i in range(project_map.size(0)): Sproject_map = project_map[i] Singlefeature = features[i, :, :, :] meanmaskvalue = Sproject_map.sum() / (Sproject_map.size(0) * Sproject_map.size(1)) Sproject_map[Sproject_map < theh] = 0 allcood = torch.nonzero(Sproject_map).cpu().numpy() Sproject_map = Sproject_map.unsqueeze(0) # Smask = Sproject_map.repeat(Singlefeature.size(0), 1, 1) SOproject_map = Sproject_map.squeeze() SOproject_map = SOproject_map.reshape(SOproject_map.size(0) * SOproject_map.size(1)) Singlefeature = Singlefeature.squeeze() Singlefeature = Singlefeature.permute(1, 2, 0) Singlefeature = Singlefeature.reshape(Singlefeature.size(0) * Singlefeature.size(1), Singlefeature.size(2)) cnt = 0 ClusterFeat = torch.zeros(Singlefeature.size(1)).unsqueeze(0) for j in range(SOproject_map.size(0)): if SOproject_map[j] > theh and cnt == 0: ClusterFeat = F.normalize(Singlefeature[j].unsqueeze(0)) cnt = cnt + 1 elif SOproject_map[j] >theh and cnt != 0: SF = F.normalize(Singlefeature[j].unsqueeze(0)) ClusterFeat = torch.cat((ClusterFeat, SF), 0) cnt = cnt + 1 ClusterFeatmean = torch.mean(ClusterFeat, dim=0) ClusterFeatmean = ClusterFeatmean.unsqueeze(0) ClusterFeatmax = torch.max(ClusterFeat, dim=0) ClusterFeatmax = ClusterFeatmax[0].unsqueeze(0) ClusterFeatmax = F.normalize(ClusterFeatmax) ClusterFeatmean = F.normalize(ClusterFeatmean) SingleClusterFeat = torch.cat((ClusterFeatmax, ClusterFeatmean), 1) SingleClusterFeat = SingleClusterFeat.squeeze() # Singlefeature = Singlefeature.mul(Smask) # Singlefeature = Singlefeature.reshape(Singlefeature.size(0), Singlefeature.size(1) * Singlefeature.size(2)) Singlefeature = SingleClusterFeat.cpu().detach().numpy() # SPFeat = np.sum(Singlefeature, axis=1, keepdims=False) / (len(allcood)) # ccc=Singlefeature.sum()/(len(aa)) cood = np.mean(allcood, axis=0, keepdims=True) feat.append(Singlefeature) coordinate.append(cood) # Sproject_map[Sproject_map > meanmaskvalue * (0.5)] = 1 project_map[i] = Sproject_map.reshape(project_map.size(1), project_map.size(2)) project_map = F.interpolate(project_map.unsqueeze(1), size=(imgs.size(2), imgs.size(3)), mode='bilinear', align_corners=False) * 255. coordinate = np.array(coordinate) coordinate = np.squeeze(coordinate) feat = np.array(feat) # tsne2 = manifold.TSNE(n_components=3, init='pca', random_state=501) # X_tsne2 = tsne2.fit_transform(feat) # x_min, x_max = X_tsne2.min(0), X_tsne2.max(0) # feat = (X_tsne2 - x_min) / (x_max - x_min) return coordinate,feat,project_map # datamanager = torchreid.data.ImageDataManager( # root='reid-data', # sources='VehicleID', # height=224, # width=224, # batch_size=32, # ) datadir='E:\\work\\DLMB-PB\\DLMB-PB\\data' model = models.create('resnet50', num_features=1024, dropout=0.5, num_classes=13164) model = model.cuda() checkpoint = load_checkpoint('E:\\work\\DLMB-PB\\DLMB-PB\\checkpointres50.pth.tar') model.load_state_dict(checkpoint['state_dict']) data_list = os.listdir(datadir) imgs = [] for name in data_list: img = image_trans(Image.open(os.path.join(datadir+'\\'+name)).convert('RGB')) imgs.append(img.unsqueeze(0)) imgs = torch.cat(imgs) featurelist,_ = model(imgs.cuda()) #第15层的尺寸是56*56 第16层的尺寸是28*28 23层开始后是14*14 features=featurelist[6] pca = PCAProjectNet() BIGfeat=torch.load('./renet50vidlayer6.pkl') coordinate,feat1,_=getcood(BIGfeat[0:2000, :, :, :].cuda(),BIGfeat[0:2000, :, :, :],1,0,0,0,0,0,0,theh=0.4) Scoordinate,Sfeat1,project_map=getcood(features.cuda(),BIGfeat[0:2000, :, :, :],1,0,0,0,0,0,0,theh=0.4) torch.save(feat1,'./feat1.pkl') torch.save(Sfeat1,'./Sfeat1.pkl') feat1=torch.load('./feat1.pkl') Sfeat1=torch.load('./Sfeat1.pkl') coordinate,feat2,_=getcood(BIGfeat[0:2000, :, :, :].cuda(),BIGfeat[0:2000, :, :, :],0,1,0,0,0,0,0,theh=0.4) Scoordinate,Sfeat2,project_map=getcood(features.cuda(),BIGfeat[0:2000, :, :, :],0,1,0,0,0,0,0,theh=0.4) torch.save(feat2,'./feat2.pkl') torch.save(Sfeat2,'./Sfeat2.pkl') feat2=torch.load('./feat2.pkl') Sfeat2=torch.load('./Sfeat2.pkl') feat=np.concatenate((feat1,feat2),axis=1) Sfeat=np.concatenate((Sfeat1,Sfeat2),axis=1) # coordinate,feat,_=getcood(features.cuda(),features.cuda(),0,1,0,0,0,0,0,theh=0.2) # Scoordinate,Sfeat,project_map=getcood(features.cuda(),features.cuda(),0,1,0,0,0,0,0,theh=0.2) # # feat=np.concatenate((coordinate,feat),axis=1) # Sfeat=np.concatenate((Scoordinate,Sfeat),axis=1) # label = SpectralClustering(n_clusters=2).fit_predict(Sfeat) km_cluster = KMeans(n_clusters=2, max_iter=3000, n_init=2, \ init='k-means++', n_jobs=-1) km_cluster.fit(Sfeat) # km_cluster=torch.load('./kmclustersub.pkl') label = km_cluster.predict(feat) torch.save(km_cluster,'./kmcluster.pkl') save_imgs = [] for i, name in enumerate(data_list): # if i>0: # break original_image = cv2.resize(cv2.imread(os.path.join(datadir, name)), (224, 224)) mask = project_map[i].repeat(3, 1, 1).permute(1, 2, 0).detach().cpu().numpy() #mask=project_map[i].permute(1, 2, 0).detach().cpu().numpy() #img = cv2.cvtColor(np.asarray(mask), cv2.COLOR_RGB2BGR) orgimg = cv2.cvtColor(np.asarray(original_image), cv2.COLOR_RGB2BGR) # imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # ret, thresh = cv2.threshold(imgray, 125, 255, 0) # thresh = np.clip(thresh, 0, 255) # 归一化也行 # thresh = np.array(thresh, np.uint8) # contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # cnt=0 # for c in contours: # M = cv2.moments(c) # x, y, w, h = cv2.boundingRect(c) # #if w*h<100 or w>100 or h>100 or w*h>4000: # # continue # #scores = 1 # cnt=cnt+1 # cv2.rectangle(original_image,(x,y), (x+w,y+h), (153,153,0), 5) #region = original_image.crop((x, y, x + w, y + h)) mask = cv2.applyColorMap(mask.astype(np.uint8), cv2.COLORMAP_JET) save_img = cv2.addWeighted(orgimg, 0.5, mask, 0.5, 0.0) if label[i]==0: save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (0, 255, 0), 3) # elif label[i]==1: # save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (0, 0, 255), 3) # elif label[i]==2: # save_img=cv2.rectangle(save_img, (int(0), int(0)), (int(224), int(224)), (255, 0, 0), 3) save_imgs.append(save_img) save_imgs = np.concatenate(save_imgs, 1) cv2.imwrite('./testddtvec.jpg', save_imgs)
getVec
identifier_name
main.rs
/* Copyright 2021 Integritee AG and Supercomputing Systems AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg_attr(test, feature(assert_matches))] #[cfg(feature = "teeracle")] use crate::teeracle::start_interval_market_update; use crate::{ account_funding::{setup_account_funding, EnclaveAccountInfoProvider}, error::Error, globals::tokio_handle::{GetTokioHandle, GlobalTokioHandle}, initialized_service::{ start_is_initialized_server, InitializationHandler, IsInitialized, TrackInitialization, }, ocall_bridge::{ bridge_api::Bridge as OCallBridge, component_factory::OCallBridgeComponentFactory, }, parentchain_handler::{HandleParentchain, ParentchainHandler}, prometheus_metrics::{start_metrics_server, EnclaveMetricsReceiver, MetricsHandler}, sidechain_setup::{sidechain_init_block_production, sidechain_start_untrusted_rpc_server}, sync_block_broadcaster::SyncBlockBroadcaster, utils::{check_files, extract_shard}, worker::Worker, worker_peers_updater::WorkerPeersUpdater, }; use base58::ToBase58; use clap::{load_yaml, App}; use codec::{Decode, Encode}; use config::Config; use enclave::{ api::enclave_init, tls_ra::{enclave_request_state_provisioning, enclave_run_state_provisioning_server}, }; use itp_enclave_api::{ direct_request::DirectRequest, enclave_base::EnclaveBase, remote_attestation::{RemoteAttestation, TlsRemoteAttestation}, sidechain::Sidechain, teeracle_api::TeeracleApi, Enclave, }; use itp_node_api::{ api_client::{AccountApi, PalletTeerexApi, ParentchainApi}, metadata::NodeMetadata, node_api_factory::{CreateNodeApi, NodeApiFactory}, }; use itp_settings::{ files::SIDECHAIN_STORAGE_PATH, worker_mode::{ProvideWorkerMode, WorkerMode, WorkerModeProvider}, }; use its_peer_fetch::{ block_fetch_client::BlockFetcher, untrusted_peer_fetch::UntrustedPeerFetcher, }; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; use its_storage::{interface::FetchBlocks, BlockPruner, SidechainStorageLock}; use log::*; use my_node_runtime::{Event, Hash, Header}; use sgx_types::*; use sp_core::crypto::{AccountId32, Ss58Codec}; use sp_keyring::AccountKeyring; use std::{ path::PathBuf, str, sync::{ mpsc::{channel, Sender}, Arc, }, thread, time::Duration, }; use substrate_api_client::{utils::FromHexString, Header as HeaderTrait, XtStatus}; use teerex_primitives::ShardIdentifier; mod account_funding; mod config; mod enclave; mod error; mod globals; mod initialized_service; mod ocall_bridge; mod parentchain_handler; mod prometheus_metrics; mod setup; mod sidechain_setup; mod sync_block_broadcaster; mod sync_state; #[cfg(feature = "teeracle")] mod teeracle; mod tests; mod utils; mod worker; mod worker_peers_updater; const VERSION: &str = env!("CARGO_PKG_VERSION"); pub type EnclaveWorker = Worker<Config, NodeApiFactory, Enclave, InitializationHandler<WorkerModeProvider>>; fn main() { // Setup logging env_logger::init(); let yml = load_yaml!("cli.yml"); let matches = App::from_yaml(yml).get_matches(); let config = Config::from(&matches); GlobalTokioHandle::initialize(); // log this information, don't println because some python scripts for GA rely on the // stdout from the service #[cfg(feature = "production")] info!("*** Starting service in SGX production mode"); #[cfg(not(feature = "production"))] info!("*** Starting service in SGX debug mode"); info!("*** Running worker in mode: {:?} \n", WorkerModeProvider::worker_mode()); let clean_reset = matches.is_present("clean-reset"); if clean_reset { setup::purge_files_from_cwd().unwrap(); } // build the entire dependency tree let tokio_handle = Arc::new(GlobalTokioHandle {}); let sidechain_blockstorage = Arc::new( SidechainStorageLock::<SignedSidechainBlock>::new(PathBuf::from(&SIDECHAIN_STORAGE_PATH)) .unwrap(), ); let node_api_factory = Arc::new(NodeApiFactory::new(config.node_url(), AccountKeyring::Alice.pair())); let enclave = Arc::new(enclave_init(&config).unwrap()); let initialization_handler = Arc::new(InitializationHandler::default()); let worker = Arc::new(EnclaveWorker::new( config.clone(), enclave.clone(), node_api_factory.clone(), initialization_handler.clone(), Vec::new(), )); let sync_block_broadcaster = Arc::new(SyncBlockBroadcaster::new(tokio_handle.clone(), worker.clone())); let peer_updater = Arc::new(WorkerPeersUpdater::new(worker)); let untrusted_peer_fetcher = UntrustedPeerFetcher::new(node_api_factory.clone()); let peer_sidechain_block_fetcher = Arc::new(BlockFetcher::<SignedSidechainBlock, _>::new(untrusted_peer_fetcher)); let enclave_metrics_receiver = Arc::new(EnclaveMetricsReceiver {}); // initialize o-call bridge with a concrete factory implementation OCallBridge::initialize(Arc::new(OCallBridgeComponentFactory::new( node_api_factory.clone(), sync_block_broadcaster, enclave.clone(), sidechain_blockstorage.clone(), peer_updater, peer_sidechain_block_fetcher, tokio_handle.clone(), enclave_metrics_receiver, ))); if let Some(run_config) = &config.run_config { let shard = extract_shard(&run_config.shard, enclave.as_ref()); println!("Worker Config: {:?}", config); if clean_reset { setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap(); } let node_api = node_api_factory.create_api().expect("Failed to create parentchain node API"); if run_config.request_state { sync_state::sync_state::<_, _, WorkerModeProvider>( &node_api, &shard, enclave.as_ref(), run_config.skip_ra, ); } start_worker::<_, _, _, _, WorkerModeProvider>( config, &shard, enclave, sidechain_blockstorage, node_api, tokio_handle, initialization_handler, ); } else if let Some(smatches) = matches.subcommand_matches("request-state") { println!("*** Requesting state from a registered worker \n"); let node_api = node_api_factory.create_api().expect("Failed to create parentchain node API"); sync_state::sync_state::<_, _, WorkerModeProvider>( &node_api, &extract_shard(&smatches.value_of("shard").map(|s| s.to_string()), enclave.as_ref()), enclave.as_ref(), smatches.is_present("skip-ra"), ); } else if matches.is_present("shielding-key") { setup::generate_shielding_key_file(enclave.as_ref()); } else if matches.is_present("signing-key") { setup::generate_signing_key_file(enclave.as_ref()); } else if matches.is_present("dump-ra") { info!("*** Perform RA and dump cert to disk"); enclave.dump_ra_to_disk().unwrap(); } else if matches.is_present("mrenclave") { println!("{}", enclave.get_mrenclave().unwrap().encode().to_base58()); } else if let Some(sub_matches) = matches.subcommand_matches("init-shard") { setup::init_shard( enclave.as_ref(), &extract_shard(&sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref()), ); } else if let Some(sub_matches) = matches.subcommand_matches("test") { if sub_matches.is_present("provisioning-server") { println!("*** Running Enclave MU-RA TLS server\n"); enclave_run_state_provisioning_server( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url(), sub_matches.is_present("skip-ra"), ); println!("[+] Done!"); } else if sub_matches.is_present("provisioning-client") { println!("*** Running Enclave MU-RA TLS client\n"); let shard = extract_shard( &sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref(), ); enclave_request_state_provisioning( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url_external(), &shard, sub_matches.is_present("skip-ra"), ) .unwrap(); println!("[+] Done!"); } else { tests::run_enclave_tests(sub_matches); } } else { println!("For options: use --help"); } } /// FIXME: needs some discussion (restructuring?) #[allow(clippy::too_many_arguments)] fn start_worker<E, T, D, InitializationHandler, WorkerModeProvider>( config: Config, shard: &ShardIdentifier, enclave: Arc<E>, sidechain_storage: Arc<D>, node_api: ParentchainApi, tokio_handle_getter: Arc<T>, initialization_handler: Arc<InitializationHandler>, ) where T: GetTokioHandle, E: EnclaveBase + DirectRequest + Sidechain + RemoteAttestation + TlsRemoteAttestation + TeeracleApi + Clone, D: BlockPruner + FetchBlocks<SignedSidechainBlock> + Sync + Send + 'static, InitializationHandler: TrackInitialization + IsInitialized + Sync + Send + 'static, WorkerModeProvider: ProvideWorkerMode, { println!("Integritee Worker v{}", VERSION); info!("starting worker on shard {}", shard.encode().to_base58()); // ------------------------------------------------------------------------ // check for required files check_files(); // ------------------------------------------------------------------------ // initialize the enclave let mrenclave = enclave.get_mrenclave().unwrap(); println!("MRENCLAVE={}", mrenclave.to_base58()); println!("MRENCLAVE in hex {:?}", hex::encode(mrenclave)); // ------------------------------------------------------------------------ // let new workers call us for key provisioning println!("MU-RA server listening on {}", config.mu_ra_url()); let run_config = config.run_config.clone().expect("Run config missing"); let skip_ra = run_config.skip_ra; let is_development_mode = run_config.dev; let ra_url = config.mu_ra_url(); let enclave_api_key_prov = enclave.clone(); thread::spawn(move || { enclave_run_state_provisioning_server( enclave_api_key_prov.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &ra_url, skip_ra, ); info!("State provisioning server stopped."); }); let tokio_handle = tokio_handle_getter.get_handle(); #[cfg(feature = "teeracle")] let teeracle_tokio_handle = tokio_handle.clone(); // ------------------------------------------------------------------------ // Get the public key of our TEE. let tee_accountid = enclave_account(enclave.as_ref()); println!("Enclave account {:} ", &tee_accountid.to_ss58check()); // ------------------------------------------------------------------------ // Start `is_initialized` server. let untrusted_http_server_port = config .try_parse_untrusted_http_server_port() .expect("untrusted http server port to be a valid port number"); let initialization_handler_clone = initialization_handler.clone(); tokio_handle.spawn(async move { if let Err(e) = start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) .await { error!("Unexpected error in `is_initialized` server: {:?}", e); } }); // ------------------------------------------------------------------------ // Start prometheus metrics server. if config.enable_metrics_server { let enclave_wallet = Arc::new(EnclaveAccountInfoProvider::new(node_api.clone(), tee_accountid.clone())); let metrics_handler = Arc::new(MetricsHandler::new(enclave_wallet)); let metrics_server_port = config .try_parse_metrics_server_port() .expect("metrics server port to be a valid port number"); tokio_handle.spawn(async move { if let Err(e) = start_metrics_server(metrics_handler, metrics_server_port).await { error!("Unexpected error in Prometheus metrics server: {:?}", e); } }); } // ------------------------------------------------------------------------ // Start trusted worker rpc server if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker { let direct_invocation_server_addr = config.trusted_worker_url_internal(); let enclave_for_direct_invocation = enclave.clone(); thread::spawn(move || { println!( "[+] Trusted RPC direct invocation server listening on {}", direct_invocation_server_addr ); enclave_for_direct_invocation .init_direct_invocation_server(direct_invocation_server_addr) .unwrap(); println!("[+] RPC direct invocation server shut down"); }); } // ------------------------------------------------------------------------ // Start untrusted worker rpc server. // i.e move sidechain block importing to trusted worker. if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { sidechain_start_untrusted_rpc_server( &config, enclave.clone(), sidechain_storage.clone(), tokio_handle, ); } // ------------------------------------------------------------------------ // Init parentchain specific stuff. Needed for parentchain communication. let parentchain_handler = Arc::new(ParentchainHandler::new(node_api.clone(), enclave.clone())); let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); let nonce = node_api.get_nonce_of(&tee_accountid).unwrap(); info!("Enclave nonce = {:?}", nonce); enclave .set_nonce(nonce) .expect("Could not set nonce of enclave. Returning here..."); let metadata = node_api.metadata.clone(); let runtime_spec_version = node_api.runtime_version.spec_version; let runtime_transaction_version = node_api.runtime_version.transaction_version; enclave .set_node_metadata( NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), ) .expect("Could not set the node metadata in the enclave"); // ------------------------------------------------------------------------ // Perform a remote attestation and get an unchecked extrinsic back. let trusted_url = config.trusted_worker_url_external(); if skip_ra { println!( "[!] skipping remote attestation. Registering enclave without attestation report." ); } else { println!("[!] creating remote attestation report and create enclave register extrinsic."); }; let uxt = enclave.perform_ra(&trusted_url, skip_ra).unwrap(); let mut xthex = hex::encode(uxt); xthex.insert_str(0, "0x"); // Account funds if let Err(x) = setup_account_funding(&node_api, &tee_accountid, xthex.clone(), is_development_mode) { error!("Starting worker failed: {:?}", x); // Return without registering the enclave. This will fail and the transaction will be banned for 30min. return } println!("[>] Register the enclave (send the extrinsic)"); let register_enclave_xt_hash = node_api.send_extrinsic(xthex, XtStatus::Finalized).unwrap(); println!("[<] Extrinsic got finalized. Hash: {:?}\n", register_enclave_xt_hash); let register_enclave_xt_header = node_api.get_header(register_enclave_xt_hash).unwrap().unwrap(); let we_are_primary_validateer = we_are_primary_validateer(&node_api, &register_enclave_xt_header).unwrap(); if we_are_primary_validateer { println!("[+] We are the primary validateer"); } else { println!("[+] We are NOT the primary validateer"); } initialization_handler.registered_on_parentchain(); // ------------------------------------------------------------------------ // initialize teeracle interval #[cfg(feature = "teeracle")] if WorkerModeProvider::worker_mode() == WorkerMode::Teeracle { start_interval_market_update( &node_api, run_config.teeracle_update_interval, enclave.as_ref(), &teeracle_tokio_handle, ); } if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { println!("*** [+] Finished syncing light client, syncing parentchain..."); // Syncing all parentchain blocks, this might take a while.. let mut last_synced_header = parentchain_handler.sync_parentchain(last_synced_header).unwrap(); // ------------------------------------------------------------------------ // Initialize the sidechain if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { last_synced_header = sidechain_init_block_production( enclave, &register_enclave_xt_header, we_are_primary_validateer, parentchain_handler.clone(), sidechain_storage, &last_synced_header, ) .unwrap(); } // ------------------------------------------------------------------------ // start parentchain syncing loop (subscribe to header updates) thread::Builder::new() .name("parentchain_sync_loop".to_owned()) .spawn(move || { if let Err(e) = subscribe_to_parentchain_new_headers(parentchain_handler, last_synced_header) { error!("Parentchain block syncing terminated with a failure: {:?}", e); } println!("[!] Parentchain block syncing has terminated"); }) .unwrap(); } // ------------------------------------------------------------------------ if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { spawn_worker_for_shard_polling(shard, node_api.clone(), initialization_handler); } // ------------------------------------------------------------------------ // subscribe to events and react on firing println!("*** Subscribing to events"); let (sender, receiver) = channel(); let sender2 = sender.clone(); let _eventsubscriber = thread::Builder::new() .name("eventsubscriber".to_owned()) .spawn(move || { node_api.subscribe_events(sender2).unwrap(); }) .unwrap(); println!("[+] Subscribed to events. waiting..."); let timeout = Duration::from_millis(10); loop { if let Ok(msg) = receiver.recv_timeout(timeout) { if let Ok(events) = parse_events(msg.clone()) { print_events(events, sender.clone()) } } } } /// Start polling loop to wait until we have a worker for a shard registered on /// the parentchain (TEEREX WorkerForShard). This is the pre-requisite to be /// considered initialized and ready for the next worker to start (in sidechain mode only). /// considered initialized and ready for the next worker to start. fn spawn_worker_for_shard_polling<InitializationHandler>( shard: &ShardIdentifier, node_api: ParentchainApi, initialization_handler: Arc<InitializationHandler>, ) where InitializationHandler: TrackInitialization + Sync + Send + 'static, { let shard_for_initialized = *shard; thread::spawn(move || { const POLL_INTERVAL_SECS: u64 = 2; loop { info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); if let Ok(Some(_)) = node_api.worker_for_shard(&shard_for_initialized, None) { // Set that the service is initialized. initialization_handler.worker_for_shard_registered(); println!("[+] Found `WorkerForShard` on parentchain state"); break } thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); } }); } type Events = Vec<frame_system::EventRecord<Event, Hash>>; fn parse_events(event: String) -> Result<Events, String> { let _unhex = Vec::from_hex(event).map_err(|_| "Decoding Events Failed".to_string())?; let mut _er_enc = _unhex.as_slice(); Events::decode(&mut _er_enc).map_err(|_| "Decoding Events Failed".to_string()) } fn print_events(events: Events, _sender: Sender<String>)
/// Subscribe to the node API finalized heads stream and trigger a parent chain sync /// upon receiving a new header. fn subscribe_to_parentchain_new_headers<E: EnclaveBase + Sidechain>( parentchain_handler: Arc<ParentchainHandler<ParentchainApi, E>>, mut last_synced_header: Header, ) -> Result<(), Error> { let (sender, receiver) = channel(); //TODO: this should be implemented by parentchain_handler directly, and not via // exposed parentchain_api. Blocked by https://github.com/scs/substrate-api-client/issues/267. parentchain_handler .parentchain_api() .subscribe_finalized_heads(sender) .map_err(Error::ApiClient)?; loop { let new_header: Header = match receiver.recv() { Ok(header_str) => serde_json::from_str(&header_str).map_err(Error::Serialization), Err(e) => Err(Error::ApiSubscriptionDisconnected(e)), }?; println!( "[+] Received finalized header update ({}), syncing parent chain...", new_header.number ); last_synced_header = parentchain_handler.sync_parentchain(last_synced_header)?; } } /// Get the public signing key of the TEE. fn enclave_account<E: EnclaveBase>(enclave_api: &E) -> AccountId32 { let tee_public = enclave_api.get_ecc_signing_pubkey().unwrap(); trace!("[+] Got ed25519 account of TEE = {}", tee_public.to_ss58check()); AccountId32::from(*tee_public.as_array_ref()) } /// Checks if we are the first validateer to register on the parentchain. fn we_are_primary_validateer( node_api: &ParentchainApi, register_enclave_xt_header: &Header, ) -> Result<bool, Error> { let enclave_count_of_previous_block = node_api.enclave_count(Some(*register_enclave_xt_header.parent_hash()))?; Ok(enclave_count_of_previous_block == 0) }
{ for evr in &events { debug!("Decoded: phase = {:?}, event = {:?}", evr.phase, evr.event); match &evr.event { Event::Balances(be) => { info!("[+] Received balances event"); debug!("{:?}", be); match &be { pallet_balances::Event::Transfer { from: transactor, to: dest, amount: value, } => { debug!(" Transactor: {:?}", transactor.to_ss58check()); debug!(" Destination: {:?}", dest.to_ss58check()); debug!(" Value: {:?}", value); }, _ => { trace!("Ignoring unsupported balances event"); }, } }, Event::Teerex(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teerex::Event::AddedEnclave(sender, worker_url) => { println!("[+] Received AddedEnclave event"); println!(" Sender (Worker): {:?}", sender); println!(" Registered URL: {:?}", str::from_utf8(worker_url).unwrap()); }, my_node_runtime::pallet_teerex::Event::Forwarded(shard) => { println!( "[+] Received trusted call for shard {}", shard.encode().to_base58() ); }, my_node_runtime::pallet_teerex::Event::ProcessedParentchainBlock( sender, block_hash, merkle_root, block_number, ) => { info!("[+] Received ProcessedParentchainBlock event"); debug!(" From: {:?}", sender); debug!(" Block Hash: {:?}", hex::encode(block_hash)); debug!(" Merkle Root: {:?}", hex::encode(merkle_root)); debug!(" Block Number: {:?}", block_number); }, my_node_runtime::pallet_teerex::Event::ShieldFunds(incognito_account) => { info!("[+] Received ShieldFunds event"); debug!(" For: {:?}", incognito_account); }, my_node_runtime::pallet_teerex::Event::UnshieldedFunds(incognito_account) => { info!("[+] Received UnshieldedFunds event"); debug!(" For: {:?}", incognito_account); }, _ => { trace!("Ignoring unsupported pallet_teerex event"); }, } }, #[cfg(feature = "teeracle")] Event::Teeracle(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teeracle::Event::ExchangeRateUpdated( source, currency, new_value, ) => { println!("[+] Received ExchangeRateUpdated event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); println!(" Exchange rate: {:?}", new_value); }, my_node_runtime::pallet_teeracle::Event::ExchangeRateDeleted( source, currency, ) => { println!("[+] Received ExchangeRateDeleted event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); }, my_node_runtime::pallet_teeracle::Event::AddedToWhitelist( source, mrenclave, ) => { println!("[+] Received AddedToWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, my_node_runtime::pallet_teeracle::Event::RemovedFromWhitelist( source, mrenclave, ) => { println!("[+] Received RemovedFromWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, _ => { trace!("Ignoring unsupported pallet_teeracle event"); }, } }, #[cfg(feature = "sidechain")] Event::Sidechain(re) => match &re { my_node_runtime::pallet_sidechain::Event::ProposedSidechainBlock( sender, payload, ) => { info!("[+] Received ProposedSidechainBlock event"); debug!(" From: {:?}", sender); debug!(" Payload: {:?}", hex::encode(payload)); }, _ => { trace!("Ignoring unsupported pallet_sidechain event"); }, }, _ => { trace!("Ignoring event {:?}", evr); }, } } }
identifier_body
main.rs
/* Copyright 2021 Integritee AG and Supercomputing Systems AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg_attr(test, feature(assert_matches))] #[cfg(feature = "teeracle")] use crate::teeracle::start_interval_market_update; use crate::{ account_funding::{setup_account_funding, EnclaveAccountInfoProvider}, error::Error, globals::tokio_handle::{GetTokioHandle, GlobalTokioHandle}, initialized_service::{ start_is_initialized_server, InitializationHandler, IsInitialized, TrackInitialization, }, ocall_bridge::{ bridge_api::Bridge as OCallBridge, component_factory::OCallBridgeComponentFactory, }, parentchain_handler::{HandleParentchain, ParentchainHandler}, prometheus_metrics::{start_metrics_server, EnclaveMetricsReceiver, MetricsHandler}, sidechain_setup::{sidechain_init_block_production, sidechain_start_untrusted_rpc_server}, sync_block_broadcaster::SyncBlockBroadcaster, utils::{check_files, extract_shard}, worker::Worker, worker_peers_updater::WorkerPeersUpdater, }; use base58::ToBase58; use clap::{load_yaml, App}; use codec::{Decode, Encode}; use config::Config; use enclave::{ api::enclave_init, tls_ra::{enclave_request_state_provisioning, enclave_run_state_provisioning_server}, }; use itp_enclave_api::{ direct_request::DirectRequest, enclave_base::EnclaveBase, remote_attestation::{RemoteAttestation, TlsRemoteAttestation}, sidechain::Sidechain, teeracle_api::TeeracleApi, Enclave, }; use itp_node_api::{ api_client::{AccountApi, PalletTeerexApi, ParentchainApi}, metadata::NodeMetadata, node_api_factory::{CreateNodeApi, NodeApiFactory}, }; use itp_settings::{ files::SIDECHAIN_STORAGE_PATH, worker_mode::{ProvideWorkerMode, WorkerMode, WorkerModeProvider}, }; use its_peer_fetch::{ block_fetch_client::BlockFetcher, untrusted_peer_fetch::UntrustedPeerFetcher, }; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; use its_storage::{interface::FetchBlocks, BlockPruner, SidechainStorageLock}; use log::*; use my_node_runtime::{Event, Hash, Header}; use sgx_types::*; use sp_core::crypto::{AccountId32, Ss58Codec}; use sp_keyring::AccountKeyring; use std::{ path::PathBuf, str, sync::{ mpsc::{channel, Sender}, Arc, }, thread, time::Duration, }; use substrate_api_client::{utils::FromHexString, Header as HeaderTrait, XtStatus}; use teerex_primitives::ShardIdentifier; mod account_funding; mod config; mod enclave; mod error; mod globals; mod initialized_service; mod ocall_bridge; mod parentchain_handler; mod prometheus_metrics; mod setup; mod sidechain_setup; mod sync_block_broadcaster; mod sync_state; #[cfg(feature = "teeracle")] mod teeracle; mod tests; mod utils; mod worker; mod worker_peers_updater; const VERSION: &str = env!("CARGO_PKG_VERSION"); pub type EnclaveWorker = Worker<Config, NodeApiFactory, Enclave, InitializationHandler<WorkerModeProvider>>; fn main() { // Setup logging env_logger::init(); let yml = load_yaml!("cli.yml"); let matches = App::from_yaml(yml).get_matches(); let config = Config::from(&matches); GlobalTokioHandle::initialize(); // log this information, don't println because some python scripts for GA rely on the // stdout from the service #[cfg(feature = "production")] info!("*** Starting service in SGX production mode"); #[cfg(not(feature = "production"))] info!("*** Starting service in SGX debug mode"); info!("*** Running worker in mode: {:?} \n", WorkerModeProvider::worker_mode()); let clean_reset = matches.is_present("clean-reset"); if clean_reset { setup::purge_files_from_cwd().unwrap(); } // build the entire dependency tree let tokio_handle = Arc::new(GlobalTokioHandle {}); let sidechain_blockstorage = Arc::new( SidechainStorageLock::<SignedSidechainBlock>::new(PathBuf::from(&SIDECHAIN_STORAGE_PATH)) .unwrap(), ); let node_api_factory = Arc::new(NodeApiFactory::new(config.node_url(), AccountKeyring::Alice.pair())); let enclave = Arc::new(enclave_init(&config).unwrap()); let initialization_handler = Arc::new(InitializationHandler::default()); let worker = Arc::new(EnclaveWorker::new( config.clone(), enclave.clone(), node_api_factory.clone(), initialization_handler.clone(), Vec::new(), )); let sync_block_broadcaster = Arc::new(SyncBlockBroadcaster::new(tokio_handle.clone(), worker.clone())); let peer_updater = Arc::new(WorkerPeersUpdater::new(worker)); let untrusted_peer_fetcher = UntrustedPeerFetcher::new(node_api_factory.clone()); let peer_sidechain_block_fetcher = Arc::new(BlockFetcher::<SignedSidechainBlock, _>::new(untrusted_peer_fetcher)); let enclave_metrics_receiver = Arc::new(EnclaveMetricsReceiver {}); // initialize o-call bridge with a concrete factory implementation OCallBridge::initialize(Arc::new(OCallBridgeComponentFactory::new( node_api_factory.clone(), sync_block_broadcaster, enclave.clone(), sidechain_blockstorage.clone(), peer_updater, peer_sidechain_block_fetcher, tokio_handle.clone(), enclave_metrics_receiver, ))); if let Some(run_config) = &config.run_config { let shard = extract_shard(&run_config.shard, enclave.as_ref()); println!("Worker Config: {:?}", config); if clean_reset { setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap(); } let node_api = node_api_factory.create_api().expect("Failed to create parentchain node API"); if run_config.request_state { sync_state::sync_state::<_, _, WorkerModeProvider>( &node_api, &shard, enclave.as_ref(), run_config.skip_ra, ); } start_worker::<_, _, _, _, WorkerModeProvider>( config, &shard, enclave, sidechain_blockstorage, node_api, tokio_handle, initialization_handler, ); } else if let Some(smatches) = matches.subcommand_matches("request-state") { println!("*** Requesting state from a registered worker \n"); let node_api = node_api_factory.create_api().expect("Failed to create parentchain node API"); sync_state::sync_state::<_, _, WorkerModeProvider>( &node_api, &extract_shard(&smatches.value_of("shard").map(|s| s.to_string()), enclave.as_ref()), enclave.as_ref(), smatches.is_present("skip-ra"), ); } else if matches.is_present("shielding-key") { setup::generate_shielding_key_file(enclave.as_ref()); } else if matches.is_present("signing-key") { setup::generate_signing_key_file(enclave.as_ref()); } else if matches.is_present("dump-ra") { info!("*** Perform RA and dump cert to disk"); enclave.dump_ra_to_disk().unwrap(); } else if matches.is_present("mrenclave") { println!("{}", enclave.get_mrenclave().unwrap().encode().to_base58()); } else if let Some(sub_matches) = matches.subcommand_matches("init-shard") { setup::init_shard( enclave.as_ref(), &extract_shard(&sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref()), ); } else if let Some(sub_matches) = matches.subcommand_matches("test") { if sub_matches.is_present("provisioning-server") { println!("*** Running Enclave MU-RA TLS server\n"); enclave_run_state_provisioning_server( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url(), sub_matches.is_present("skip-ra"), ); println!("[+] Done!"); } else if sub_matches.is_present("provisioning-client") { println!("*** Running Enclave MU-RA TLS client\n"); let shard = extract_shard( &sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref(), ); enclave_request_state_provisioning( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url_external(), &shard, sub_matches.is_present("skip-ra"), ) .unwrap(); println!("[+] Done!"); } else { tests::run_enclave_tests(sub_matches); } } else { println!("For options: use --help"); } } /// FIXME: needs some discussion (restructuring?) #[allow(clippy::too_many_arguments)] fn start_worker<E, T, D, InitializationHandler, WorkerModeProvider>( config: Config, shard: &ShardIdentifier, enclave: Arc<E>, sidechain_storage: Arc<D>, node_api: ParentchainApi, tokio_handle_getter: Arc<T>, initialization_handler: Arc<InitializationHandler>, ) where T: GetTokioHandle, E: EnclaveBase + DirectRequest + Sidechain + RemoteAttestation + TlsRemoteAttestation + TeeracleApi + Clone, D: BlockPruner + FetchBlocks<SignedSidechainBlock> + Sync + Send + 'static, InitializationHandler: TrackInitialization + IsInitialized + Sync + Send + 'static, WorkerModeProvider: ProvideWorkerMode, { println!("Integritee Worker v{}", VERSION); info!("starting worker on shard {}", shard.encode().to_base58()); // ------------------------------------------------------------------------ // check for required files check_files(); // ------------------------------------------------------------------------ // initialize the enclave let mrenclave = enclave.get_mrenclave().unwrap(); println!("MRENCLAVE={}", mrenclave.to_base58()); println!("MRENCLAVE in hex {:?}", hex::encode(mrenclave)); // ------------------------------------------------------------------------ // let new workers call us for key provisioning println!("MU-RA server listening on {}", config.mu_ra_url()); let run_config = config.run_config.clone().expect("Run config missing"); let skip_ra = run_config.skip_ra; let is_development_mode = run_config.dev; let ra_url = config.mu_ra_url(); let enclave_api_key_prov = enclave.clone(); thread::spawn(move || { enclave_run_state_provisioning_server( enclave_api_key_prov.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &ra_url, skip_ra, ); info!("State provisioning server stopped."); }); let tokio_handle = tokio_handle_getter.get_handle(); #[cfg(feature = "teeracle")] let teeracle_tokio_handle = tokio_handle.clone(); // ------------------------------------------------------------------------ // Get the public key of our TEE. let tee_accountid = enclave_account(enclave.as_ref()); println!("Enclave account {:} ", &tee_accountid.to_ss58check()); // ------------------------------------------------------------------------ // Start `is_initialized` server. let untrusted_http_server_port = config .try_parse_untrusted_http_server_port() .expect("untrusted http server port to be a valid port number"); let initialization_handler_clone = initialization_handler.clone(); tokio_handle.spawn(async move { if let Err(e) = start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) .await { error!("Unexpected error in `is_initialized` server: {:?}", e); } }); // ------------------------------------------------------------------------ // Start prometheus metrics server. if config.enable_metrics_server { let enclave_wallet = Arc::new(EnclaveAccountInfoProvider::new(node_api.clone(), tee_accountid.clone())); let metrics_handler = Arc::new(MetricsHandler::new(enclave_wallet)); let metrics_server_port = config .try_parse_metrics_server_port() .expect("metrics server port to be a valid port number"); tokio_handle.spawn(async move { if let Err(e) = start_metrics_server(metrics_handler, metrics_server_port).await { error!("Unexpected error in Prometheus metrics server: {:?}", e); } }); } // ------------------------------------------------------------------------ // Start trusted worker rpc server if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker { let direct_invocation_server_addr = config.trusted_worker_url_internal(); let enclave_for_direct_invocation = enclave.clone(); thread::spawn(move || { println!( "[+] Trusted RPC direct invocation server listening on {}", direct_invocation_server_addr ); enclave_for_direct_invocation .init_direct_invocation_server(direct_invocation_server_addr) .unwrap(); println!("[+] RPC direct invocation server shut down"); }); } // ------------------------------------------------------------------------ // Start untrusted worker rpc server. // i.e move sidechain block importing to trusted worker. if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { sidechain_start_untrusted_rpc_server( &config, enclave.clone(), sidechain_storage.clone(), tokio_handle, ); } // ------------------------------------------------------------------------ // Init parentchain specific stuff. Needed for parentchain communication. let parentchain_handler = Arc::new(ParentchainHandler::new(node_api.clone(), enclave.clone())); let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); let nonce = node_api.get_nonce_of(&tee_accountid).unwrap(); info!("Enclave nonce = {:?}", nonce); enclave .set_nonce(nonce) .expect("Could not set nonce of enclave. Returning here..."); let metadata = node_api.metadata.clone(); let runtime_spec_version = node_api.runtime_version.spec_version; let runtime_transaction_version = node_api.runtime_version.transaction_version; enclave .set_node_metadata( NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), ) .expect("Could not set the node metadata in the enclave"); // ------------------------------------------------------------------------ // Perform a remote attestation and get an unchecked extrinsic back. let trusted_url = config.trusted_worker_url_external(); if skip_ra { println!( "[!] skipping remote attestation. Registering enclave without attestation report." ); } else { println!("[!] creating remote attestation report and create enclave register extrinsic."); }; let uxt = enclave.perform_ra(&trusted_url, skip_ra).unwrap(); let mut xthex = hex::encode(uxt); xthex.insert_str(0, "0x"); // Account funds if let Err(x) = setup_account_funding(&node_api, &tee_accountid, xthex.clone(), is_development_mode) { error!("Starting worker failed: {:?}", x); // Return without registering the enclave. This will fail and the transaction will be banned for 30min. return } println!("[>] Register the enclave (send the extrinsic)"); let register_enclave_xt_hash = node_api.send_extrinsic(xthex, XtStatus::Finalized).unwrap(); println!("[<] Extrinsic got finalized. Hash: {:?}\n", register_enclave_xt_hash); let register_enclave_xt_header = node_api.get_header(register_enclave_xt_hash).unwrap().unwrap(); let we_are_primary_validateer = we_are_primary_validateer(&node_api, &register_enclave_xt_header).unwrap(); if we_are_primary_validateer { println!("[+] We are the primary validateer"); } else { println!("[+] We are NOT the primary validateer"); } initialization_handler.registered_on_parentchain(); // ------------------------------------------------------------------------ // initialize teeracle interval #[cfg(feature = "teeracle")] if WorkerModeProvider::worker_mode() == WorkerMode::Teeracle { start_interval_market_update( &node_api, run_config.teeracle_update_interval, enclave.as_ref(), &teeracle_tokio_handle, ); } if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { println!("*** [+] Finished syncing light client, syncing parentchain..."); // Syncing all parentchain blocks, this might take a while.. let mut last_synced_header = parentchain_handler.sync_parentchain(last_synced_header).unwrap(); // ------------------------------------------------------------------------ // Initialize the sidechain if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { last_synced_header = sidechain_init_block_production( enclave, &register_enclave_xt_header, we_are_primary_validateer, parentchain_handler.clone(), sidechain_storage, &last_synced_header, ) .unwrap(); } // ------------------------------------------------------------------------ // start parentchain syncing loop (subscribe to header updates) thread::Builder::new() .name("parentchain_sync_loop".to_owned()) .spawn(move || { if let Err(e) = subscribe_to_parentchain_new_headers(parentchain_handler, last_synced_header) { error!("Parentchain block syncing terminated with a failure: {:?}", e); } println!("[!] Parentchain block syncing has terminated"); }) .unwrap(); } // ------------------------------------------------------------------------ if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { spawn_worker_for_shard_polling(shard, node_api.clone(), initialization_handler); } // ------------------------------------------------------------------------ // subscribe to events and react on firing println!("*** Subscribing to events"); let (sender, receiver) = channel(); let sender2 = sender.clone(); let _eventsubscriber = thread::Builder::new() .name("eventsubscriber".to_owned()) .spawn(move || { node_api.subscribe_events(sender2).unwrap(); }) .unwrap(); println!("[+] Subscribed to events. waiting..."); let timeout = Duration::from_millis(10); loop { if let Ok(msg) = receiver.recv_timeout(timeout) { if let Ok(events) = parse_events(msg.clone()) { print_events(events, sender.clone()) } } } } /// Start polling loop to wait until we have a worker for a shard registered on /// the parentchain (TEEREX WorkerForShard). This is the pre-requisite to be /// considered initialized and ready for the next worker to start (in sidechain mode only). /// considered initialized and ready for the next worker to start. fn spawn_worker_for_shard_polling<InitializationHandler>( shard: &ShardIdentifier, node_api: ParentchainApi, initialization_handler: Arc<InitializationHandler>, ) where InitializationHandler: TrackInitialization + Sync + Send + 'static, { let shard_for_initialized = *shard; thread::spawn(move || { const POLL_INTERVAL_SECS: u64 = 2; loop { info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); if let Ok(Some(_)) = node_api.worker_for_shard(&shard_for_initialized, None) { // Set that the service is initialized. initialization_handler.worker_for_shard_registered(); println!("[+] Found `WorkerForShard` on parentchain state"); break } thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); } }); } type Events = Vec<frame_system::EventRecord<Event, Hash>>; fn parse_events(event: String) -> Result<Events, String> { let _unhex = Vec::from_hex(event).map_err(|_| "Decoding Events Failed".to_string())?; let mut _er_enc = _unhex.as_slice(); Events::decode(&mut _er_enc).map_err(|_| "Decoding Events Failed".to_string()) } fn print_events(events: Events, _sender: Sender<String>) { for evr in &events { debug!("Decoded: phase = {:?}, event = {:?}", evr.phase, evr.event); match &evr.event { Event::Balances(be) => { info!("[+] Received balances event"); debug!("{:?}", be); match &be { pallet_balances::Event::Transfer { from: transactor, to: dest, amount: value, } => { debug!(" Transactor: {:?}", transactor.to_ss58check()); debug!(" Destination: {:?}", dest.to_ss58check()); debug!(" Value: {:?}", value); }, _ => { trace!("Ignoring unsupported balances event"); }, } }, Event::Teerex(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teerex::Event::AddedEnclave(sender, worker_url) => { println!("[+] Received AddedEnclave event"); println!(" Sender (Worker): {:?}", sender); println!(" Registered URL: {:?}", str::from_utf8(worker_url).unwrap()); }, my_node_runtime::pallet_teerex::Event::Forwarded(shard) => { println!( "[+] Received trusted call for shard {}", shard.encode().to_base58() ); }, my_node_runtime::pallet_teerex::Event::ProcessedParentchainBlock( sender, block_hash, merkle_root, block_number, ) => { info!("[+] Received ProcessedParentchainBlock event"); debug!(" From: {:?}", sender); debug!(" Block Hash: {:?}", hex::encode(block_hash)); debug!(" Merkle Root: {:?}", hex::encode(merkle_root)); debug!(" Block Number: {:?}", block_number); }, my_node_runtime::pallet_teerex::Event::ShieldFunds(incognito_account) => { info!("[+] Received ShieldFunds event"); debug!(" For: {:?}", incognito_account); }, my_node_runtime::pallet_teerex::Event::UnshieldedFunds(incognito_account) => { info!("[+] Received UnshieldedFunds event"); debug!(" For: {:?}", incognito_account); }, _ => { trace!("Ignoring unsupported pallet_teerex event"); }, } }, #[cfg(feature = "teeracle")] Event::Teeracle(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teeracle::Event::ExchangeRateUpdated( source, currency, new_value, ) => { println!("[+] Received ExchangeRateUpdated event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); println!(" Exchange rate: {:?}", new_value); }, my_node_runtime::pallet_teeracle::Event::ExchangeRateDeleted( source, currency, ) => { println!("[+] Received ExchangeRateDeleted event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); }, my_node_runtime::pallet_teeracle::Event::AddedToWhitelist( source, mrenclave, ) => { println!("[+] Received AddedToWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, my_node_runtime::pallet_teeracle::Event::RemovedFromWhitelist( source, mrenclave, ) => { println!("[+] Received RemovedFromWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, _ => { trace!("Ignoring unsupported pallet_teeracle event"); }, } }, #[cfg(feature = "sidechain")] Event::Sidechain(re) => match &re { my_node_runtime::pallet_sidechain::Event::ProposedSidechainBlock( sender, payload, ) => { info!("[+] Received ProposedSidechainBlock event"); debug!(" From: {:?}", sender); debug!(" Payload: {:?}", hex::encode(payload)); }, _ => { trace!("Ignoring unsupported pallet_sidechain event"); }, }, _ => { trace!("Ignoring event {:?}", evr); }, } } } /// Subscribe to the node API finalized heads stream and trigger a parent chain sync /// upon receiving a new header. fn subscribe_to_parentchain_new_headers<E: EnclaveBase + Sidechain>( parentchain_handler: Arc<ParentchainHandler<ParentchainApi, E>>, mut last_synced_header: Header, ) -> Result<(), Error> { let (sender, receiver) = channel();
.subscribe_finalized_heads(sender) .map_err(Error::ApiClient)?; loop { let new_header: Header = match receiver.recv() { Ok(header_str) => serde_json::from_str(&header_str).map_err(Error::Serialization), Err(e) => Err(Error::ApiSubscriptionDisconnected(e)), }?; println!( "[+] Received finalized header update ({}), syncing parent chain...", new_header.number ); last_synced_header = parentchain_handler.sync_parentchain(last_synced_header)?; } } /// Get the public signing key of the TEE. fn enclave_account<E: EnclaveBase>(enclave_api: &E) -> AccountId32 { let tee_public = enclave_api.get_ecc_signing_pubkey().unwrap(); trace!("[+] Got ed25519 account of TEE = {}", tee_public.to_ss58check()); AccountId32::from(*tee_public.as_array_ref()) } /// Checks if we are the first validateer to register on the parentchain. fn we_are_primary_validateer( node_api: &ParentchainApi, register_enclave_xt_header: &Header, ) -> Result<bool, Error> { let enclave_count_of_previous_block = node_api.enclave_count(Some(*register_enclave_xt_header.parent_hash()))?; Ok(enclave_count_of_previous_block == 0) }
//TODO: this should be implemented by parentchain_handler directly, and not via // exposed parentchain_api. Blocked by https://github.com/scs/substrate-api-client/issues/267. parentchain_handler .parentchain_api()
random_line_split
main.rs
/* Copyright 2021 Integritee AG and Supercomputing Systems AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg_attr(test, feature(assert_matches))] #[cfg(feature = "teeracle")] use crate::teeracle::start_interval_market_update; use crate::{ account_funding::{setup_account_funding, EnclaveAccountInfoProvider}, error::Error, globals::tokio_handle::{GetTokioHandle, GlobalTokioHandle}, initialized_service::{ start_is_initialized_server, InitializationHandler, IsInitialized, TrackInitialization, }, ocall_bridge::{ bridge_api::Bridge as OCallBridge, component_factory::OCallBridgeComponentFactory, }, parentchain_handler::{HandleParentchain, ParentchainHandler}, prometheus_metrics::{start_metrics_server, EnclaveMetricsReceiver, MetricsHandler}, sidechain_setup::{sidechain_init_block_production, sidechain_start_untrusted_rpc_server}, sync_block_broadcaster::SyncBlockBroadcaster, utils::{check_files, extract_shard}, worker::Worker, worker_peers_updater::WorkerPeersUpdater, }; use base58::ToBase58; use clap::{load_yaml, App}; use codec::{Decode, Encode}; use config::Config; use enclave::{ api::enclave_init, tls_ra::{enclave_request_state_provisioning, enclave_run_state_provisioning_server}, }; use itp_enclave_api::{ direct_request::DirectRequest, enclave_base::EnclaveBase, remote_attestation::{RemoteAttestation, TlsRemoteAttestation}, sidechain::Sidechain, teeracle_api::TeeracleApi, Enclave, }; use itp_node_api::{ api_client::{AccountApi, PalletTeerexApi, ParentchainApi}, metadata::NodeMetadata, node_api_factory::{CreateNodeApi, NodeApiFactory}, }; use itp_settings::{ files::SIDECHAIN_STORAGE_PATH, worker_mode::{ProvideWorkerMode, WorkerMode, WorkerModeProvider}, }; use its_peer_fetch::{ block_fetch_client::BlockFetcher, untrusted_peer_fetch::UntrustedPeerFetcher, }; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; use its_storage::{interface::FetchBlocks, BlockPruner, SidechainStorageLock}; use log::*; use my_node_runtime::{Event, Hash, Header}; use sgx_types::*; use sp_core::crypto::{AccountId32, Ss58Codec}; use sp_keyring::AccountKeyring; use std::{ path::PathBuf, str, sync::{ mpsc::{channel, Sender}, Arc, }, thread, time::Duration, }; use substrate_api_client::{utils::FromHexString, Header as HeaderTrait, XtStatus}; use teerex_primitives::ShardIdentifier; mod account_funding; mod config; mod enclave; mod error; mod globals; mod initialized_service; mod ocall_bridge; mod parentchain_handler; mod prometheus_metrics; mod setup; mod sidechain_setup; mod sync_block_broadcaster; mod sync_state; #[cfg(feature = "teeracle")] mod teeracle; mod tests; mod utils; mod worker; mod worker_peers_updater; const VERSION: &str = env!("CARGO_PKG_VERSION"); pub type EnclaveWorker = Worker<Config, NodeApiFactory, Enclave, InitializationHandler<WorkerModeProvider>>; fn main() { // Setup logging env_logger::init(); let yml = load_yaml!("cli.yml"); let matches = App::from_yaml(yml).get_matches(); let config = Config::from(&matches); GlobalTokioHandle::initialize(); // log this information, don't println because some python scripts for GA rely on the // stdout from the service #[cfg(feature = "production")] info!("*** Starting service in SGX production mode"); #[cfg(not(feature = "production"))] info!("*** Starting service in SGX debug mode"); info!("*** Running worker in mode: {:?} \n", WorkerModeProvider::worker_mode()); let clean_reset = matches.is_present("clean-reset"); if clean_reset { setup::purge_files_from_cwd().unwrap(); } // build the entire dependency tree let tokio_handle = Arc::new(GlobalTokioHandle {}); let sidechain_blockstorage = Arc::new( SidechainStorageLock::<SignedSidechainBlock>::new(PathBuf::from(&SIDECHAIN_STORAGE_PATH)) .unwrap(), ); let node_api_factory = Arc::new(NodeApiFactory::new(config.node_url(), AccountKeyring::Alice.pair())); let enclave = Arc::new(enclave_init(&config).unwrap()); let initialization_handler = Arc::new(InitializationHandler::default()); let worker = Arc::new(EnclaveWorker::new( config.clone(), enclave.clone(), node_api_factory.clone(), initialization_handler.clone(), Vec::new(), )); let sync_block_broadcaster = Arc::new(SyncBlockBroadcaster::new(tokio_handle.clone(), worker.clone())); let peer_updater = Arc::new(WorkerPeersUpdater::new(worker)); let untrusted_peer_fetcher = UntrustedPeerFetcher::new(node_api_factory.clone()); let peer_sidechain_block_fetcher = Arc::new(BlockFetcher::<SignedSidechainBlock, _>::new(untrusted_peer_fetcher)); let enclave_metrics_receiver = Arc::new(EnclaveMetricsReceiver {}); // initialize o-call bridge with a concrete factory implementation OCallBridge::initialize(Arc::new(OCallBridgeComponentFactory::new( node_api_factory.clone(), sync_block_broadcaster, enclave.clone(), sidechain_blockstorage.clone(), peer_updater, peer_sidechain_block_fetcher, tokio_handle.clone(), enclave_metrics_receiver, ))); if let Some(run_config) = &config.run_config { let shard = extract_shard(&run_config.shard, enclave.as_ref()); println!("Worker Config: {:?}", config); if clean_reset { setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap(); } let node_api = node_api_factory.create_api().expect("Failed to create parentchain node API"); if run_config.request_state { sync_state::sync_state::<_, _, WorkerModeProvider>( &node_api, &shard, enclave.as_ref(), run_config.skip_ra, ); } start_worker::<_, _, _, _, WorkerModeProvider>( config, &shard, enclave, sidechain_blockstorage, node_api, tokio_handle, initialization_handler, ); } else if let Some(smatches) = matches.subcommand_matches("request-state") { println!("*** Requesting state from a registered worker \n"); let node_api = node_api_factory.create_api().expect("Failed to create parentchain node API"); sync_state::sync_state::<_, _, WorkerModeProvider>( &node_api, &extract_shard(&smatches.value_of("shard").map(|s| s.to_string()), enclave.as_ref()), enclave.as_ref(), smatches.is_present("skip-ra"), ); } else if matches.is_present("shielding-key") { setup::generate_shielding_key_file(enclave.as_ref()); } else if matches.is_present("signing-key") { setup::generate_signing_key_file(enclave.as_ref()); } else if matches.is_present("dump-ra") { info!("*** Perform RA and dump cert to disk"); enclave.dump_ra_to_disk().unwrap(); } else if matches.is_present("mrenclave") { println!("{}", enclave.get_mrenclave().unwrap().encode().to_base58()); } else if let Some(sub_matches) = matches.subcommand_matches("init-shard") { setup::init_shard( enclave.as_ref(), &extract_shard(&sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref()), ); } else if let Some(sub_matches) = matches.subcommand_matches("test") { if sub_matches.is_present("provisioning-server") { println!("*** Running Enclave MU-RA TLS server\n"); enclave_run_state_provisioning_server( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url(), sub_matches.is_present("skip-ra"), ); println!("[+] Done!"); } else if sub_matches.is_present("provisioning-client") { println!("*** Running Enclave MU-RA TLS client\n"); let shard = extract_shard( &sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref(), ); enclave_request_state_provisioning( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url_external(), &shard, sub_matches.is_present("skip-ra"), ) .unwrap(); println!("[+] Done!"); } else { tests::run_enclave_tests(sub_matches); } } else { println!("For options: use --help"); } } /// FIXME: needs some discussion (restructuring?) #[allow(clippy::too_many_arguments)] fn start_worker<E, T, D, InitializationHandler, WorkerModeProvider>( config: Config, shard: &ShardIdentifier, enclave: Arc<E>, sidechain_storage: Arc<D>, node_api: ParentchainApi, tokio_handle_getter: Arc<T>, initialization_handler: Arc<InitializationHandler>, ) where T: GetTokioHandle, E: EnclaveBase + DirectRequest + Sidechain + RemoteAttestation + TlsRemoteAttestation + TeeracleApi + Clone, D: BlockPruner + FetchBlocks<SignedSidechainBlock> + Sync + Send + 'static, InitializationHandler: TrackInitialization + IsInitialized + Sync + Send + 'static, WorkerModeProvider: ProvideWorkerMode, { println!("Integritee Worker v{}", VERSION); info!("starting worker on shard {}", shard.encode().to_base58()); // ------------------------------------------------------------------------ // check for required files check_files(); // ------------------------------------------------------------------------ // initialize the enclave let mrenclave = enclave.get_mrenclave().unwrap(); println!("MRENCLAVE={}", mrenclave.to_base58()); println!("MRENCLAVE in hex {:?}", hex::encode(mrenclave)); // ------------------------------------------------------------------------ // let new workers call us for key provisioning println!("MU-RA server listening on {}", config.mu_ra_url()); let run_config = config.run_config.clone().expect("Run config missing"); let skip_ra = run_config.skip_ra; let is_development_mode = run_config.dev; let ra_url = config.mu_ra_url(); let enclave_api_key_prov = enclave.clone(); thread::spawn(move || { enclave_run_state_provisioning_server( enclave_api_key_prov.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &ra_url, skip_ra, ); info!("State provisioning server stopped."); }); let tokio_handle = tokio_handle_getter.get_handle(); #[cfg(feature = "teeracle")] let teeracle_tokio_handle = tokio_handle.clone(); // ------------------------------------------------------------------------ // Get the public key of our TEE. let tee_accountid = enclave_account(enclave.as_ref()); println!("Enclave account {:} ", &tee_accountid.to_ss58check()); // ------------------------------------------------------------------------ // Start `is_initialized` server. let untrusted_http_server_port = config .try_parse_untrusted_http_server_port() .expect("untrusted http server port to be a valid port number"); let initialization_handler_clone = initialization_handler.clone(); tokio_handle.spawn(async move { if let Err(e) = start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) .await { error!("Unexpected error in `is_initialized` server: {:?}", e); } }); // ------------------------------------------------------------------------ // Start prometheus metrics server. if config.enable_metrics_server { let enclave_wallet = Arc::new(EnclaveAccountInfoProvider::new(node_api.clone(), tee_accountid.clone())); let metrics_handler = Arc::new(MetricsHandler::new(enclave_wallet)); let metrics_server_port = config .try_parse_metrics_server_port() .expect("metrics server port to be a valid port number"); tokio_handle.spawn(async move { if let Err(e) = start_metrics_server(metrics_handler, metrics_server_port).await { error!("Unexpected error in Prometheus metrics server: {:?}", e); } }); } // ------------------------------------------------------------------------ // Start trusted worker rpc server if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker { let direct_invocation_server_addr = config.trusted_worker_url_internal(); let enclave_for_direct_invocation = enclave.clone(); thread::spawn(move || { println!( "[+] Trusted RPC direct invocation server listening on {}", direct_invocation_server_addr ); enclave_for_direct_invocation .init_direct_invocation_server(direct_invocation_server_addr) .unwrap(); println!("[+] RPC direct invocation server shut down"); }); } // ------------------------------------------------------------------------ // Start untrusted worker rpc server. // i.e move sidechain block importing to trusted worker. if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { sidechain_start_untrusted_rpc_server( &config, enclave.clone(), sidechain_storage.clone(), tokio_handle, ); } // ------------------------------------------------------------------------ // Init parentchain specific stuff. Needed for parentchain communication. let parentchain_handler = Arc::new(ParentchainHandler::new(node_api.clone(), enclave.clone())); let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); let nonce = node_api.get_nonce_of(&tee_accountid).unwrap(); info!("Enclave nonce = {:?}", nonce); enclave .set_nonce(nonce) .expect("Could not set nonce of enclave. Returning here..."); let metadata = node_api.metadata.clone(); let runtime_spec_version = node_api.runtime_version.spec_version; let runtime_transaction_version = node_api.runtime_version.transaction_version; enclave .set_node_metadata( NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), ) .expect("Could not set the node metadata in the enclave"); // ------------------------------------------------------------------------ // Perform a remote attestation and get an unchecked extrinsic back. let trusted_url = config.trusted_worker_url_external(); if skip_ra { println!( "[!] skipping remote attestation. Registering enclave without attestation report." ); } else { println!("[!] creating remote attestation report and create enclave register extrinsic."); }; let uxt = enclave.perform_ra(&trusted_url, skip_ra).unwrap(); let mut xthex = hex::encode(uxt); xthex.insert_str(0, "0x"); // Account funds if let Err(x) = setup_account_funding(&node_api, &tee_accountid, xthex.clone(), is_development_mode) { error!("Starting worker failed: {:?}", x); // Return without registering the enclave. This will fail and the transaction will be banned for 30min. return } println!("[>] Register the enclave (send the extrinsic)"); let register_enclave_xt_hash = node_api.send_extrinsic(xthex, XtStatus::Finalized).unwrap(); println!("[<] Extrinsic got finalized. Hash: {:?}\n", register_enclave_xt_hash); let register_enclave_xt_header = node_api.get_header(register_enclave_xt_hash).unwrap().unwrap(); let we_are_primary_validateer = we_are_primary_validateer(&node_api, &register_enclave_xt_header).unwrap(); if we_are_primary_validateer { println!("[+] We are the primary validateer"); } else { println!("[+] We are NOT the primary validateer"); } initialization_handler.registered_on_parentchain(); // ------------------------------------------------------------------------ // initialize teeracle interval #[cfg(feature = "teeracle")] if WorkerModeProvider::worker_mode() == WorkerMode::Teeracle { start_interval_market_update( &node_api, run_config.teeracle_update_interval, enclave.as_ref(), &teeracle_tokio_handle, ); } if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { println!("*** [+] Finished syncing light client, syncing parentchain..."); // Syncing all parentchain blocks, this might take a while.. let mut last_synced_header = parentchain_handler.sync_parentchain(last_synced_header).unwrap(); // ------------------------------------------------------------------------ // Initialize the sidechain if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { last_synced_header = sidechain_init_block_production( enclave, &register_enclave_xt_header, we_are_primary_validateer, parentchain_handler.clone(), sidechain_storage, &last_synced_header, ) .unwrap(); } // ------------------------------------------------------------------------ // start parentchain syncing loop (subscribe to header updates) thread::Builder::new() .name("parentchain_sync_loop".to_owned()) .spawn(move || { if let Err(e) = subscribe_to_parentchain_new_headers(parentchain_handler, last_synced_header) { error!("Parentchain block syncing terminated with a failure: {:?}", e); } println!("[!] Parentchain block syncing has terminated"); }) .unwrap(); } // ------------------------------------------------------------------------ if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { spawn_worker_for_shard_polling(shard, node_api.clone(), initialization_handler); } // ------------------------------------------------------------------------ // subscribe to events and react on firing println!("*** Subscribing to events"); let (sender, receiver) = channel(); let sender2 = sender.clone(); let _eventsubscriber = thread::Builder::new() .name("eventsubscriber".to_owned()) .spawn(move || { node_api.subscribe_events(sender2).unwrap(); }) .unwrap(); println!("[+] Subscribed to events. waiting..."); let timeout = Duration::from_millis(10); loop { if let Ok(msg) = receiver.recv_timeout(timeout) { if let Ok(events) = parse_events(msg.clone()) { print_events(events, sender.clone()) } } } } /// Start polling loop to wait until we have a worker for a shard registered on /// the parentchain (TEEREX WorkerForShard). This is the pre-requisite to be /// considered initialized and ready for the next worker to start (in sidechain mode only). /// considered initialized and ready for the next worker to start. fn spawn_worker_for_shard_polling<InitializationHandler>( shard: &ShardIdentifier, node_api: ParentchainApi, initialization_handler: Arc<InitializationHandler>, ) where InitializationHandler: TrackInitialization + Sync + Send + 'static, { let shard_for_initialized = *shard; thread::spawn(move || { const POLL_INTERVAL_SECS: u64 = 2; loop { info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); if let Ok(Some(_)) = node_api.worker_for_shard(&shard_for_initialized, None) { // Set that the service is initialized. initialization_handler.worker_for_shard_registered(); println!("[+] Found `WorkerForShard` on parentchain state"); break } thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); } }); } type Events = Vec<frame_system::EventRecord<Event, Hash>>; fn parse_events(event: String) -> Result<Events, String> { let _unhex = Vec::from_hex(event).map_err(|_| "Decoding Events Failed".to_string())?; let mut _er_enc = _unhex.as_slice(); Events::decode(&mut _er_enc).map_err(|_| "Decoding Events Failed".to_string()) } fn print_events(events: Events, _sender: Sender<String>) { for evr in &events { debug!("Decoded: phase = {:?}, event = {:?}", evr.phase, evr.event); match &evr.event { Event::Balances(be) => { info!("[+] Received balances event"); debug!("{:?}", be); match &be { pallet_balances::Event::Transfer { from: transactor, to: dest, amount: value, } => { debug!(" Transactor: {:?}", transactor.to_ss58check()); debug!(" Destination: {:?}", dest.to_ss58check()); debug!(" Value: {:?}", value); }, _ => { trace!("Ignoring unsupported balances event"); }, } }, Event::Teerex(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teerex::Event::AddedEnclave(sender, worker_url) => { println!("[+] Received AddedEnclave event"); println!(" Sender (Worker): {:?}", sender); println!(" Registered URL: {:?}", str::from_utf8(worker_url).unwrap()); }, my_node_runtime::pallet_teerex::Event::Forwarded(shard) => { println!( "[+] Received trusted call for shard {}", shard.encode().to_base58() ); }, my_node_runtime::pallet_teerex::Event::ProcessedParentchainBlock( sender, block_hash, merkle_root, block_number, ) => { info!("[+] Received ProcessedParentchainBlock event"); debug!(" From: {:?}", sender); debug!(" Block Hash: {:?}", hex::encode(block_hash)); debug!(" Merkle Root: {:?}", hex::encode(merkle_root)); debug!(" Block Number: {:?}", block_number); }, my_node_runtime::pallet_teerex::Event::ShieldFunds(incognito_account) => { info!("[+] Received ShieldFunds event"); debug!(" For: {:?}", incognito_account); }, my_node_runtime::pallet_teerex::Event::UnshieldedFunds(incognito_account) => { info!("[+] Received UnshieldedFunds event"); debug!(" For: {:?}", incognito_account); }, _ => { trace!("Ignoring unsupported pallet_teerex event"); }, } }, #[cfg(feature = "teeracle")] Event::Teeracle(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teeracle::Event::ExchangeRateUpdated( source, currency, new_value, ) => { println!("[+] Received ExchangeRateUpdated event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); println!(" Exchange rate: {:?}", new_value); }, my_node_runtime::pallet_teeracle::Event::ExchangeRateDeleted( source, currency, ) => { println!("[+] Received ExchangeRateDeleted event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); }, my_node_runtime::pallet_teeracle::Event::AddedToWhitelist( source, mrenclave, ) => { println!("[+] Received AddedToWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, my_node_runtime::pallet_teeracle::Event::RemovedFromWhitelist( source, mrenclave, ) => { println!("[+] Received RemovedFromWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, _ => { trace!("Ignoring unsupported pallet_teeracle event"); }, } }, #[cfg(feature = "sidechain")] Event::Sidechain(re) => match &re { my_node_runtime::pallet_sidechain::Event::ProposedSidechainBlock( sender, payload, ) => { info!("[+] Received ProposedSidechainBlock event"); debug!(" From: {:?}", sender); debug!(" Payload: {:?}", hex::encode(payload)); }, _ => { trace!("Ignoring unsupported pallet_sidechain event"); }, }, _ => { trace!("Ignoring event {:?}", evr); }, } } } /// Subscribe to the node API finalized heads stream and trigger a parent chain sync /// upon receiving a new header. fn subscribe_to_parentchain_new_headers<E: EnclaveBase + Sidechain>( parentchain_handler: Arc<ParentchainHandler<ParentchainApi, E>>, mut last_synced_header: Header, ) -> Result<(), Error> { let (sender, receiver) = channel(); //TODO: this should be implemented by parentchain_handler directly, and not via // exposed parentchain_api. Blocked by https://github.com/scs/substrate-api-client/issues/267. parentchain_handler .parentchain_api() .subscribe_finalized_heads(sender) .map_err(Error::ApiClient)?; loop { let new_header: Header = match receiver.recv() { Ok(header_str) => serde_json::from_str(&header_str).map_err(Error::Serialization), Err(e) => Err(Error::ApiSubscriptionDisconnected(e)), }?; println!( "[+] Received finalized header update ({}), syncing parent chain...", new_header.number ); last_synced_header = parentchain_handler.sync_parentchain(last_synced_header)?; } } /// Get the public signing key of the TEE. fn enclave_account<E: EnclaveBase>(enclave_api: &E) -> AccountId32 { let tee_public = enclave_api.get_ecc_signing_pubkey().unwrap(); trace!("[+] Got ed25519 account of TEE = {}", tee_public.to_ss58check()); AccountId32::from(*tee_public.as_array_ref()) } /// Checks if we are the first validateer to register on the parentchain. fn
( node_api: &ParentchainApi, register_enclave_xt_header: &Header, ) -> Result<bool, Error> { let enclave_count_of_previous_block = node_api.enclave_count(Some(*register_enclave_xt_header.parent_hash()))?; Ok(enclave_count_of_previous_block == 0) }
we_are_primary_validateer
identifier_name
main.rs
/* Copyright 2021 Integritee AG and Supercomputing Systems AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg_attr(test, feature(assert_matches))] #[cfg(feature = "teeracle")] use crate::teeracle::start_interval_market_update; use crate::{ account_funding::{setup_account_funding, EnclaveAccountInfoProvider}, error::Error, globals::tokio_handle::{GetTokioHandle, GlobalTokioHandle}, initialized_service::{ start_is_initialized_server, InitializationHandler, IsInitialized, TrackInitialization, }, ocall_bridge::{ bridge_api::Bridge as OCallBridge, component_factory::OCallBridgeComponentFactory, }, parentchain_handler::{HandleParentchain, ParentchainHandler}, prometheus_metrics::{start_metrics_server, EnclaveMetricsReceiver, MetricsHandler}, sidechain_setup::{sidechain_init_block_production, sidechain_start_untrusted_rpc_server}, sync_block_broadcaster::SyncBlockBroadcaster, utils::{check_files, extract_shard}, worker::Worker, worker_peers_updater::WorkerPeersUpdater, }; use base58::ToBase58; use clap::{load_yaml, App}; use codec::{Decode, Encode}; use config::Config; use enclave::{ api::enclave_init, tls_ra::{enclave_request_state_provisioning, enclave_run_state_provisioning_server}, }; use itp_enclave_api::{ direct_request::DirectRequest, enclave_base::EnclaveBase, remote_attestation::{RemoteAttestation, TlsRemoteAttestation}, sidechain::Sidechain, teeracle_api::TeeracleApi, Enclave, }; use itp_node_api::{ api_client::{AccountApi, PalletTeerexApi, ParentchainApi}, metadata::NodeMetadata, node_api_factory::{CreateNodeApi, NodeApiFactory}, }; use itp_settings::{ files::SIDECHAIN_STORAGE_PATH, worker_mode::{ProvideWorkerMode, WorkerMode, WorkerModeProvider}, }; use its_peer_fetch::{ block_fetch_client::BlockFetcher, untrusted_peer_fetch::UntrustedPeerFetcher, }; use its_primitives::types::block::SignedBlock as SignedSidechainBlock; use its_storage::{interface::FetchBlocks, BlockPruner, SidechainStorageLock}; use log::*; use my_node_runtime::{Event, Hash, Header}; use sgx_types::*; use sp_core::crypto::{AccountId32, Ss58Codec}; use sp_keyring::AccountKeyring; use std::{ path::PathBuf, str, sync::{ mpsc::{channel, Sender}, Arc, }, thread, time::Duration, }; use substrate_api_client::{utils::FromHexString, Header as HeaderTrait, XtStatus}; use teerex_primitives::ShardIdentifier; mod account_funding; mod config; mod enclave; mod error; mod globals; mod initialized_service; mod ocall_bridge; mod parentchain_handler; mod prometheus_metrics; mod setup; mod sidechain_setup; mod sync_block_broadcaster; mod sync_state; #[cfg(feature = "teeracle")] mod teeracle; mod tests; mod utils; mod worker; mod worker_peers_updater; const VERSION: &str = env!("CARGO_PKG_VERSION"); pub type EnclaveWorker = Worker<Config, NodeApiFactory, Enclave, InitializationHandler<WorkerModeProvider>>; fn main() { // Setup logging env_logger::init(); let yml = load_yaml!("cli.yml"); let matches = App::from_yaml(yml).get_matches(); let config = Config::from(&matches); GlobalTokioHandle::initialize(); // log this information, don't println because some python scripts for GA rely on the // stdout from the service #[cfg(feature = "production")] info!("*** Starting service in SGX production mode"); #[cfg(not(feature = "production"))] info!("*** Starting service in SGX debug mode"); info!("*** Running worker in mode: {:?} \n", WorkerModeProvider::worker_mode()); let clean_reset = matches.is_present("clean-reset"); if clean_reset { setup::purge_files_from_cwd().unwrap(); } // build the entire dependency tree let tokio_handle = Arc::new(GlobalTokioHandle {}); let sidechain_blockstorage = Arc::new( SidechainStorageLock::<SignedSidechainBlock>::new(PathBuf::from(&SIDECHAIN_STORAGE_PATH)) .unwrap(), ); let node_api_factory = Arc::new(NodeApiFactory::new(config.node_url(), AccountKeyring::Alice.pair())); let enclave = Arc::new(enclave_init(&config).unwrap()); let initialization_handler = Arc::new(InitializationHandler::default()); let worker = Arc::new(EnclaveWorker::new( config.clone(), enclave.clone(), node_api_factory.clone(), initialization_handler.clone(), Vec::new(), )); let sync_block_broadcaster = Arc::new(SyncBlockBroadcaster::new(tokio_handle.clone(), worker.clone())); let peer_updater = Arc::new(WorkerPeersUpdater::new(worker)); let untrusted_peer_fetcher = UntrustedPeerFetcher::new(node_api_factory.clone()); let peer_sidechain_block_fetcher = Arc::new(BlockFetcher::<SignedSidechainBlock, _>::new(untrusted_peer_fetcher)); let enclave_metrics_receiver = Arc::new(EnclaveMetricsReceiver {}); // initialize o-call bridge with a concrete factory implementation OCallBridge::initialize(Arc::new(OCallBridgeComponentFactory::new( node_api_factory.clone(), sync_block_broadcaster, enclave.clone(), sidechain_blockstorage.clone(), peer_updater, peer_sidechain_block_fetcher, tokio_handle.clone(), enclave_metrics_receiver, ))); if let Some(run_config) = &config.run_config { let shard = extract_shard(&run_config.shard, enclave.as_ref()); println!("Worker Config: {:?}", config); if clean_reset { setup::initialize_shard_and_keys(enclave.as_ref(), &shard).unwrap(); } let node_api = node_api_factory.create_api().expect("Failed to create parentchain node API"); if run_config.request_state { sync_state::sync_state::<_, _, WorkerModeProvider>( &node_api, &shard, enclave.as_ref(), run_config.skip_ra, ); } start_worker::<_, _, _, _, WorkerModeProvider>( config, &shard, enclave, sidechain_blockstorage, node_api, tokio_handle, initialization_handler, ); } else if let Some(smatches) = matches.subcommand_matches("request-state") { println!("*** Requesting state from a registered worker \n"); let node_api = node_api_factory.create_api().expect("Failed to create parentchain node API"); sync_state::sync_state::<_, _, WorkerModeProvider>( &node_api, &extract_shard(&smatches.value_of("shard").map(|s| s.to_string()), enclave.as_ref()), enclave.as_ref(), smatches.is_present("skip-ra"), ); } else if matches.is_present("shielding-key") { setup::generate_shielding_key_file(enclave.as_ref()); } else if matches.is_present("signing-key") { setup::generate_signing_key_file(enclave.as_ref()); } else if matches.is_present("dump-ra") { info!("*** Perform RA and dump cert to disk"); enclave.dump_ra_to_disk().unwrap(); } else if matches.is_present("mrenclave") { println!("{}", enclave.get_mrenclave().unwrap().encode().to_base58()); } else if let Some(sub_matches) = matches.subcommand_matches("init-shard") { setup::init_shard( enclave.as_ref(), &extract_shard(&sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref()), ); } else if let Some(sub_matches) = matches.subcommand_matches("test") { if sub_matches.is_present("provisioning-server") { println!("*** Running Enclave MU-RA TLS server\n"); enclave_run_state_provisioning_server( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url(), sub_matches.is_present("skip-ra"), ); println!("[+] Done!"); } else if sub_matches.is_present("provisioning-client") { println!("*** Running Enclave MU-RA TLS client\n"); let shard = extract_shard( &sub_matches.value_of("shard").map(|s| s.to_string()), enclave.as_ref(), ); enclave_request_state_provisioning( enclave.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &config.mu_ra_url_external(), &shard, sub_matches.is_present("skip-ra"), ) .unwrap(); println!("[+] Done!"); } else { tests::run_enclave_tests(sub_matches); } } else { println!("For options: use --help"); } } /// FIXME: needs some discussion (restructuring?) #[allow(clippy::too_many_arguments)] fn start_worker<E, T, D, InitializationHandler, WorkerModeProvider>( config: Config, shard: &ShardIdentifier, enclave: Arc<E>, sidechain_storage: Arc<D>, node_api: ParentchainApi, tokio_handle_getter: Arc<T>, initialization_handler: Arc<InitializationHandler>, ) where T: GetTokioHandle, E: EnclaveBase + DirectRequest + Sidechain + RemoteAttestation + TlsRemoteAttestation + TeeracleApi + Clone, D: BlockPruner + FetchBlocks<SignedSidechainBlock> + Sync + Send + 'static, InitializationHandler: TrackInitialization + IsInitialized + Sync + Send + 'static, WorkerModeProvider: ProvideWorkerMode, { println!("Integritee Worker v{}", VERSION); info!("starting worker on shard {}", shard.encode().to_base58()); // ------------------------------------------------------------------------ // check for required files check_files(); // ------------------------------------------------------------------------ // initialize the enclave let mrenclave = enclave.get_mrenclave().unwrap(); println!("MRENCLAVE={}", mrenclave.to_base58()); println!("MRENCLAVE in hex {:?}", hex::encode(mrenclave)); // ------------------------------------------------------------------------ // let new workers call us for key provisioning println!("MU-RA server listening on {}", config.mu_ra_url()); let run_config = config.run_config.clone().expect("Run config missing"); let skip_ra = run_config.skip_ra; let is_development_mode = run_config.dev; let ra_url = config.mu_ra_url(); let enclave_api_key_prov = enclave.clone(); thread::spawn(move || { enclave_run_state_provisioning_server( enclave_api_key_prov.as_ref(), sgx_quote_sign_type_t::SGX_UNLINKABLE_SIGNATURE, &ra_url, skip_ra, ); info!("State provisioning server stopped."); }); let tokio_handle = tokio_handle_getter.get_handle(); #[cfg(feature = "teeracle")] let teeracle_tokio_handle = tokio_handle.clone(); // ------------------------------------------------------------------------ // Get the public key of our TEE. let tee_accountid = enclave_account(enclave.as_ref()); println!("Enclave account {:} ", &tee_accountid.to_ss58check()); // ------------------------------------------------------------------------ // Start `is_initialized` server. let untrusted_http_server_port = config .try_parse_untrusted_http_server_port() .expect("untrusted http server port to be a valid port number"); let initialization_handler_clone = initialization_handler.clone(); tokio_handle.spawn(async move { if let Err(e) = start_is_initialized_server(initialization_handler_clone, untrusted_http_server_port) .await { error!("Unexpected error in `is_initialized` server: {:?}", e); } }); // ------------------------------------------------------------------------ // Start prometheus metrics server. if config.enable_metrics_server { let enclave_wallet = Arc::new(EnclaveAccountInfoProvider::new(node_api.clone(), tee_accountid.clone())); let metrics_handler = Arc::new(MetricsHandler::new(enclave_wallet)); let metrics_server_port = config .try_parse_metrics_server_port() .expect("metrics server port to be a valid port number"); tokio_handle.spawn(async move { if let Err(e) = start_metrics_server(metrics_handler, metrics_server_port).await { error!("Unexpected error in Prometheus metrics server: {:?}", e); } }); } // ------------------------------------------------------------------------ // Start trusted worker rpc server if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain || WorkerModeProvider::worker_mode() == WorkerMode::OffChainWorker { let direct_invocation_server_addr = config.trusted_worker_url_internal(); let enclave_for_direct_invocation = enclave.clone(); thread::spawn(move || { println!( "[+] Trusted RPC direct invocation server listening on {}", direct_invocation_server_addr ); enclave_for_direct_invocation .init_direct_invocation_server(direct_invocation_server_addr) .unwrap(); println!("[+] RPC direct invocation server shut down"); }); } // ------------------------------------------------------------------------ // Start untrusted worker rpc server. // i.e move sidechain block importing to trusted worker. if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { sidechain_start_untrusted_rpc_server( &config, enclave.clone(), sidechain_storage.clone(), tokio_handle, ); } // ------------------------------------------------------------------------ // Init parentchain specific stuff. Needed for parentchain communication. let parentchain_handler = Arc::new(ParentchainHandler::new(node_api.clone(), enclave.clone())); let last_synced_header = parentchain_handler.init_parentchain_components().unwrap(); let nonce = node_api.get_nonce_of(&tee_accountid).unwrap(); info!("Enclave nonce = {:?}", nonce); enclave .set_nonce(nonce) .expect("Could not set nonce of enclave. Returning here..."); let metadata = node_api.metadata.clone(); let runtime_spec_version = node_api.runtime_version.spec_version; let runtime_transaction_version = node_api.runtime_version.transaction_version; enclave .set_node_metadata( NodeMetadata::new(metadata, runtime_spec_version, runtime_transaction_version).encode(), ) .expect("Could not set the node metadata in the enclave"); // ------------------------------------------------------------------------ // Perform a remote attestation and get an unchecked extrinsic back. let trusted_url = config.trusted_worker_url_external(); if skip_ra { println!( "[!] skipping remote attestation. Registering enclave without attestation report." ); } else { println!("[!] creating remote attestation report and create enclave register extrinsic."); }; let uxt = enclave.perform_ra(&trusted_url, skip_ra).unwrap(); let mut xthex = hex::encode(uxt); xthex.insert_str(0, "0x"); // Account funds if let Err(x) = setup_account_funding(&node_api, &tee_accountid, xthex.clone(), is_development_mode) { error!("Starting worker failed: {:?}", x); // Return without registering the enclave. This will fail and the transaction will be banned for 30min. return } println!("[>] Register the enclave (send the extrinsic)"); let register_enclave_xt_hash = node_api.send_extrinsic(xthex, XtStatus::Finalized).unwrap(); println!("[<] Extrinsic got finalized. Hash: {:?}\n", register_enclave_xt_hash); let register_enclave_xt_header = node_api.get_header(register_enclave_xt_hash).unwrap().unwrap(); let we_are_primary_validateer = we_are_primary_validateer(&node_api, &register_enclave_xt_header).unwrap(); if we_are_primary_validateer { println!("[+] We are the primary validateer"); } else { println!("[+] We are NOT the primary validateer"); } initialization_handler.registered_on_parentchain(); // ------------------------------------------------------------------------ // initialize teeracle interval #[cfg(feature = "teeracle")] if WorkerModeProvider::worker_mode() == WorkerMode::Teeracle
if WorkerModeProvider::worker_mode() != WorkerMode::Teeracle { println!("*** [+] Finished syncing light client, syncing parentchain..."); // Syncing all parentchain blocks, this might take a while.. let mut last_synced_header = parentchain_handler.sync_parentchain(last_synced_header).unwrap(); // ------------------------------------------------------------------------ // Initialize the sidechain if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { last_synced_header = sidechain_init_block_production( enclave, &register_enclave_xt_header, we_are_primary_validateer, parentchain_handler.clone(), sidechain_storage, &last_synced_header, ) .unwrap(); } // ------------------------------------------------------------------------ // start parentchain syncing loop (subscribe to header updates) thread::Builder::new() .name("parentchain_sync_loop".to_owned()) .spawn(move || { if let Err(e) = subscribe_to_parentchain_new_headers(parentchain_handler, last_synced_header) { error!("Parentchain block syncing terminated with a failure: {:?}", e); } println!("[!] Parentchain block syncing has terminated"); }) .unwrap(); } // ------------------------------------------------------------------------ if WorkerModeProvider::worker_mode() == WorkerMode::Sidechain { spawn_worker_for_shard_polling(shard, node_api.clone(), initialization_handler); } // ------------------------------------------------------------------------ // subscribe to events and react on firing println!("*** Subscribing to events"); let (sender, receiver) = channel(); let sender2 = sender.clone(); let _eventsubscriber = thread::Builder::new() .name("eventsubscriber".to_owned()) .spawn(move || { node_api.subscribe_events(sender2).unwrap(); }) .unwrap(); println!("[+] Subscribed to events. waiting..."); let timeout = Duration::from_millis(10); loop { if let Ok(msg) = receiver.recv_timeout(timeout) { if let Ok(events) = parse_events(msg.clone()) { print_events(events, sender.clone()) } } } } /// Start polling loop to wait until we have a worker for a shard registered on /// the parentchain (TEEREX WorkerForShard). This is the pre-requisite to be /// considered initialized and ready for the next worker to start (in sidechain mode only). /// considered initialized and ready for the next worker to start. fn spawn_worker_for_shard_polling<InitializationHandler>( shard: &ShardIdentifier, node_api: ParentchainApi, initialization_handler: Arc<InitializationHandler>, ) where InitializationHandler: TrackInitialization + Sync + Send + 'static, { let shard_for_initialized = *shard; thread::spawn(move || { const POLL_INTERVAL_SECS: u64 = 2; loop { info!("Polling for worker for shard ({} seconds interval)", POLL_INTERVAL_SECS); if let Ok(Some(_)) = node_api.worker_for_shard(&shard_for_initialized, None) { // Set that the service is initialized. initialization_handler.worker_for_shard_registered(); println!("[+] Found `WorkerForShard` on parentchain state"); break } thread::sleep(Duration::from_secs(POLL_INTERVAL_SECS)); } }); } type Events = Vec<frame_system::EventRecord<Event, Hash>>; fn parse_events(event: String) -> Result<Events, String> { let _unhex = Vec::from_hex(event).map_err(|_| "Decoding Events Failed".to_string())?; let mut _er_enc = _unhex.as_slice(); Events::decode(&mut _er_enc).map_err(|_| "Decoding Events Failed".to_string()) } fn print_events(events: Events, _sender: Sender<String>) { for evr in &events { debug!("Decoded: phase = {:?}, event = {:?}", evr.phase, evr.event); match &evr.event { Event::Balances(be) => { info!("[+] Received balances event"); debug!("{:?}", be); match &be { pallet_balances::Event::Transfer { from: transactor, to: dest, amount: value, } => { debug!(" Transactor: {:?}", transactor.to_ss58check()); debug!(" Destination: {:?}", dest.to_ss58check()); debug!(" Value: {:?}", value); }, _ => { trace!("Ignoring unsupported balances event"); }, } }, Event::Teerex(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teerex::Event::AddedEnclave(sender, worker_url) => { println!("[+] Received AddedEnclave event"); println!(" Sender (Worker): {:?}", sender); println!(" Registered URL: {:?}", str::from_utf8(worker_url).unwrap()); }, my_node_runtime::pallet_teerex::Event::Forwarded(shard) => { println!( "[+] Received trusted call for shard {}", shard.encode().to_base58() ); }, my_node_runtime::pallet_teerex::Event::ProcessedParentchainBlock( sender, block_hash, merkle_root, block_number, ) => { info!("[+] Received ProcessedParentchainBlock event"); debug!(" From: {:?}", sender); debug!(" Block Hash: {:?}", hex::encode(block_hash)); debug!(" Merkle Root: {:?}", hex::encode(merkle_root)); debug!(" Block Number: {:?}", block_number); }, my_node_runtime::pallet_teerex::Event::ShieldFunds(incognito_account) => { info!("[+] Received ShieldFunds event"); debug!(" For: {:?}", incognito_account); }, my_node_runtime::pallet_teerex::Event::UnshieldedFunds(incognito_account) => { info!("[+] Received UnshieldedFunds event"); debug!(" For: {:?}", incognito_account); }, _ => { trace!("Ignoring unsupported pallet_teerex event"); }, } }, #[cfg(feature = "teeracle")] Event::Teeracle(re) => { debug!("{:?}", re); match &re { my_node_runtime::pallet_teeracle::Event::ExchangeRateUpdated( source, currency, new_value, ) => { println!("[+] Received ExchangeRateUpdated event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); println!(" Exchange rate: {:?}", new_value); }, my_node_runtime::pallet_teeracle::Event::ExchangeRateDeleted( source, currency, ) => { println!("[+] Received ExchangeRateDeleted event"); println!(" Data source: {}", source); println!(" Currency: {}", currency); }, my_node_runtime::pallet_teeracle::Event::AddedToWhitelist( source, mrenclave, ) => { println!("[+] Received AddedToWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, my_node_runtime::pallet_teeracle::Event::RemovedFromWhitelist( source, mrenclave, ) => { println!("[+] Received RemovedFromWhitelist event"); println!(" Data source: {}", source); println!(" Currency: {:?}", mrenclave); }, _ => { trace!("Ignoring unsupported pallet_teeracle event"); }, } }, #[cfg(feature = "sidechain")] Event::Sidechain(re) => match &re { my_node_runtime::pallet_sidechain::Event::ProposedSidechainBlock( sender, payload, ) => { info!("[+] Received ProposedSidechainBlock event"); debug!(" From: {:?}", sender); debug!(" Payload: {:?}", hex::encode(payload)); }, _ => { trace!("Ignoring unsupported pallet_sidechain event"); }, }, _ => { trace!("Ignoring event {:?}", evr); }, } } } /// Subscribe to the node API finalized heads stream and trigger a parent chain sync /// upon receiving a new header. fn subscribe_to_parentchain_new_headers<E: EnclaveBase + Sidechain>( parentchain_handler: Arc<ParentchainHandler<ParentchainApi, E>>, mut last_synced_header: Header, ) -> Result<(), Error> { let (sender, receiver) = channel(); //TODO: this should be implemented by parentchain_handler directly, and not via // exposed parentchain_api. Blocked by https://github.com/scs/substrate-api-client/issues/267. parentchain_handler .parentchain_api() .subscribe_finalized_heads(sender) .map_err(Error::ApiClient)?; loop { let new_header: Header = match receiver.recv() { Ok(header_str) => serde_json::from_str(&header_str).map_err(Error::Serialization), Err(e) => Err(Error::ApiSubscriptionDisconnected(e)), }?; println!( "[+] Received finalized header update ({}), syncing parent chain...", new_header.number ); last_synced_header = parentchain_handler.sync_parentchain(last_synced_header)?; } } /// Get the public signing key of the TEE. fn enclave_account<E: EnclaveBase>(enclave_api: &E) -> AccountId32 { let tee_public = enclave_api.get_ecc_signing_pubkey().unwrap(); trace!("[+] Got ed25519 account of TEE = {}", tee_public.to_ss58check()); AccountId32::from(*tee_public.as_array_ref()) } /// Checks if we are the first validateer to register on the parentchain. fn we_are_primary_validateer( node_api: &ParentchainApi, register_enclave_xt_header: &Header, ) -> Result<bool, Error> { let enclave_count_of_previous_block = node_api.enclave_count(Some(*register_enclave_xt_header.parent_hash()))?; Ok(enclave_count_of_previous_block == 0) }
{ start_interval_market_update( &node_api, run_config.teeracle_update_interval, enclave.as_ref(), &teeracle_tokio_handle, ); }
conditional_block
products.page.ts
import { Component, OnInit } from '@angular/core'; import { ProductsService } from 'src/app/shared/services/products.service'; import { combineAll, first, ignoreElements, mapTo } from 'rxjs/operators'; import { ActivatedRoute, Router } from '@angular/router'; import { FollowUpService } from 'src/app/shared/services/follow-up.service'; import { Products } from 'src/app/shared/models/products.model'; import { AlertController } from '@ionic/angular'; import { ListsService } from 'src/app/shared/services/lists.service'; import {Location} from '@angular/common' import { Key } from 'protractor'; import { ProductToList } from 'src/app/shared/models/product_to_list.model'; // הערה @Component({ selector: 'app-products', templateUrl: './products.page.html', styleUrls: ['./products.page.scss'], }) export class ProductsPage implements OnInit { o = new Object() // contain all products from database include thair categories arrKind = new Array()// categories products arrProducts = new Array() // all products search: string // value of searchbar selectedsArray= [] // list of all categories that contain also selected products (matriza) allSelectedProducts = []// contain all products are selected idAccount: Number newProducts:Products[]= new Array() isPageForUpdateFollowList: boolean categoryProduct: number; nameProduct: string; currentItems =new Array() map= new Map() newListId = [] addProductsToList:boolean typeListId: number; typeListName: string; selectedItemsLength: Number selectedArrayId: Number[] =[] allChecked: boolean = false isFromBuyList: boolean oneTimes=[] listId: number oneTime: ProductToList[]=[] constructor(private _location: Location, private productService: ProductsService, private followUpService: FollowUpService, private listService: ListsService, private router: Router, private route: ActivatedRoute, private alertController: AlertController) { this.route.params.subscribe(params => { this.isFromBuyList = JSON.parse(params['isFromBuyList']) this.listId = +params['listId'] console.log(this.isFromBuyList) this.isPageForUpdateFollowList = params['isForFollowList']; this.addProductsToList=params['addProducts'] this.typeListId = +params['typeListId']
} }); } ngOnInit() { this.idAccount =+ localStorage.getItem('accountId') console.log(this.idAccount) this.getAllProducts() if(this.isPageForUpdateFollowList) this.getSortedFolowList() if(this.addProductsToList) this.GetAllProductsByTypeId() if(this.isFromBuyList) this.selectedsArray = this.oneTimes } // get all products of account from DB getAllProducts() { this.productService.getAllProducts(this.idAccount).subscribe(res => { this.o = Array.of(res) this.selectedItemsLength = this.selectedsArray.length Object.keys(res).forEach(category => {this.arrKind.push(category)//arrKind if(this.selectedItemsLength == 0) this.selectedsArray.push([]) }); this.selectedItemsLength = this.selectedsArray.length if(this.allSelectedProducts.length>0) this.setSelectedArray(this.allSelectedProducts.map(p=>p)) Object.values(res).forEach(element => {this.arrProducts.push(element)});// arrProducts console.log(this.arrProducts) // console.log(" nnncj"+this.arrProducts.map(p=>p.ProductId)) }); } // if enter from followUp page, bring follow products from DB getSortedFolowList() { this.followUpService.getSortedFolowList(this.idAccount).subscribe(res=> { this.selectedsArray=res; console.log(this.selectedsArray)}) } // if enter from createList page, bring already selected products from create page getProductsFromCreateList() { this.setSelectedArray(this.allSelectedProducts.map(p=>p)) } GetAllProductsByTypeId() { this.listService.GetAllProductsByTypeId(this.typeListId).subscribe(res=> { this.setSelectedArray(res.map(p=>p.ProductId)) }) } setSelectedArray(res:number[]) { this.productService.getProductsByIdProduct(res).subscribe(products=>{ this.selectedsArray = [] console.log(products) console.log(Object.values(products)) for (let i = 0; i < this.arrKind.length; i++) { if(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) this.selectedsArray.push(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) else this.selectedsArray.push([]) } console.log(this.selectedsArray) }) } selectText(index) { let text=''; if(this.selectedsArray[index].length>0) text=Object(this.selectedsArray[index]).map(s=>s.ProductName).toString() return text; } // filter func for searchbar getItems(ev) { let val = ev.target.value; if (!val || !val.trim()) { this.currentItems = []; return; } this.query(val) } query(val?: any) { this.currentItems = [] if (!val) { return this.arrProducts; } return this.arrProducts.forEach(arr => { arr.filter((item) => { for (let key in val) { let field = item.ProductName[key]; if (!(typeof field == 'string' && field.indexOf(val[key]) >= 0)) { return item.ProductName } else if (!(field == val[key])) { return item.ProductName} } this.currentItems.push(item) return null; }); }); } // add product for this account async presentAlertPromptNewProduct() { var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'הכנס מוצר', inputs: [{ type:"textarea", placeholder: 'מוצר חדש', name:'textarea', }], buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'המשך', handler: (alertData) => { this.nameProduct = alertData.textarea this.presentAlertPromptCategory() } } ] }); await alert.present(); } async presentAlertPromptCategory() { let alertInputs=[]; this.arrKind.forEach(element => { alertInputs.push({name:element, type:'radio', value:element, label: element}) }); var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'בחר קטגוריה', inputs:alertInputs, buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'הוסף', handler: (alertData) => { this.categoryProduct = alertData; let p=new Products(); p.ProductName=this.nameProduct; console.log(this.o[0][this.categoryProduct]) p.CategoryId=this.o[0][this.categoryProduct][0].CategoryId this.newProducts.push(p) } } ] }); console.log(alert.inputs.length) await alert.present(); } addForSaveList(product: Products)// מעדכן את המוצר שנבחר מתיבת החיפוש - מוסיף או מסיר אותו אם לא קיים { console.log(this.selectedsArray) for(let i=0; i<this.selectedsArray.length; i++) // תעבור על כל המוצרים שנבחרו (ממוינים לפי קטגוריות ואם יש קטגורין=ות שעוד אין להם מוצרים שנבחרו אז הם מערכים ריקים) { var j =0; console.log(this.selectedsArray[i]) if(this.selectedsArray[i].length > 0)// אם נבחרו מוצרים מהקטגוריה הנוכחית { if(this.selectedsArray[i][0].CategoryName == product.CategoryName)//תבדוק אם המוצר שבחר מאותו קטגוריה { for(j=0; j<this.selectedsArray[i].length && this.selectedsArray[i][j].ProductId != product.ProductId; j++);//אם הם מאותו קטגוריה תבדוק אולי המוצר כבר קיים if(j === this.selectedsArray[i].length)// אם המוצר לא קיים אז תוסיף אותו { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } else// אם קיים כבר אז תמחק אותו { this.selectedsArray[i].splice(j,1) console.log(this.selectedsArray) this.showAlert("המוצר הוסר") break } } } else // אם לא נבחרו מוצרים מהקטגוריה של המוצר עוד if(this.arrProducts[i][0].CategoryName == product.CategoryName) { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } } this.currentItems = [] } async showAlert(message: string) { const alert = await this.alertController.create({ cssClass: 'my-custom-class', message: message, buttons: ['אישור'] }); await alert.present(); } // update follow up list saveList() { // this.allSelectedProducts=[] לבדוק עם מוצרים מהרשימה אם מתווסף או מה for (let i = 0; i < this.selectedsArray.length; i++)// over all Categories for checking which Categories are selected items { if (this.selectedsArray[i] != null)// if seleced items in this Categories so { Object(this.selectedsArray[i]).forEach(pr=> { if(!this.allSelectedProducts.find(item=> item == pr.ProductId)) this.allSelectedProducts.push(pr.ProductId) console.log(this.allSelectedProducts) }); // push products the costumer is choose to array } } console.log(this.allSelectedProducts) Object(this.selectedsArray).forEach(arrProducts => { this.selectedArrayId.push(...arrProducts.map(element => element=element.ProductId )); }); console.log("selectedrrayId: "+this.selectedArrayId ) const arr= this.allSelectedProducts.filter(idProduct=>{ return !this.selectedArrayId.includes(idProduct)}) console.log("in array1 and not array 2"+ arr) console.log("allselectedProducts: "+this.allSelectedProducts) arr.forEach(element => { this.allSelectedProducts.splice(this.allSelectedProducts.indexOf[element],1)}); console.log("allselectedProducts after: "+this.allSelectedProducts) // send for adding this.productService.addPersonalItems(this.newProducts,this.idAccount).subscribe((newProducts)=> { newProducts.forEach(element => {if(element) this.newListId.push(element)}); this.allSelectedProducts.push(...this.newListId) if(this.isPageForUpdateFollowList) { if(this.allChecked) { const arrAll= new Array() for(var arr of this.arrProducts) { for(let p of arr) arrAll.push(p.ProductId) } this.allSelectedProducts = arrAll } this.followUpService.saveList(this.allSelectedProducts, this.idAccount). subscribe((res) => { this.router.navigateByUrl('follow-list');}); } else if(this.addProductsToList) { this.router.navigate(['show-list',{"status":"true", "allSelectedProducts":JSON.stringify(this.allSelectedProducts),"typeListId":this.typeListId, "typeListName":this.typeListName}]); } else if(this.isFromBuyList == true) { this.allSelectedProducts.map(p => { var p1 = new ProductToList() p1.ListId = this.listId p1.ProductId = p this.oneTime.push(p1) }); this.listService.SaveOneProductsToList(this.idAccount, this.oneTime).subscribe(res=> this.router.navigate(['buy-list', {"listId":this.listId}]) ) } else { this.router.navigate(['create-list',{"productsList": this.allSelectedProducts}]); } }); } compareById(o1,o2) { for(let p of o2) if(p.ProductId==o1.ProductId) return true; return false; } showSelected() { console.log(this.selectedsArray) } }
this.typeListName = params['typeListName'] if(params['productsInList'] != undefined) { this.allSelectedProducts =(params['productsInList'].split(',')).map(x=>+x);
random_line_split
products.page.ts
import { Component, OnInit } from '@angular/core'; import { ProductsService } from 'src/app/shared/services/products.service'; import { combineAll, first, ignoreElements, mapTo } from 'rxjs/operators'; import { ActivatedRoute, Router } from '@angular/router'; import { FollowUpService } from 'src/app/shared/services/follow-up.service'; import { Products } from 'src/app/shared/models/products.model'; import { AlertController } from '@ionic/angular'; import { ListsService } from 'src/app/shared/services/lists.service'; import {Location} from '@angular/common' import { Key } from 'protractor'; import { ProductToList } from 'src/app/shared/models/product_to_list.model'; // הערה @Component({ selector: 'app-products', templateUrl: './products.page.html', styleUrls: ['./products.page.scss'], }) export class ProductsPage implements OnInit { o = new Object() // contain all products from database include thair categories arrKind = new Array()// categories products arrProducts = new Array() // all products search: string // value of searchbar selectedsArray= [] // list of all categories that contain also selected products (matriza) allSelectedProducts = []// contain all products are selected idAccount: Number newProducts:Products[]= new Array() isPageForUpdateFollowList: boolean categoryProduct: number; nameProduct: string; currentItems =new Array() map= new Map() newListId = [] addProductsToList:boolean typeListId: number; typeListName: string; selectedItemsLength: Number selectedArrayId: Number[] =[] allChecked: boolean = false isFromBuyList: boolean oneTimes=[] listId: number oneTime: ProductToList[]=[] constructor(private _location: Location, private productService: ProductsService, private followUpService: FollowUpService, private listService: ListsService, private router: Router, private route: ActivatedRoute, private alertController: AlertController) { this.route.params.subscribe(params => { this.isFromBuyList = JSON.parse(params['isFromBuyList']) this.listId = +params['listId'] console.log(this.isFromBuyList) this.isPageForUpdateFollowList = params['isForFollowList']; this.addProductsToList=params['addProducts'] this.typeListId = +params['typeListId'] this.typeListName = params['typeListName'] if(params['productsInList'] != undefined) { this.allSelectedProducts =(params['productsInList'].split(',')).map(x=>+x); } }); } ngOnInit() { this.idAccount =+ localStorage.getItem('accountId') console.log(this.idAccount) this.getAllProducts() if(this.isPageForUpdateFollowList) this.getSortedFolowList() if(this.addProductsToList) this.GetAllProductsByTypeId() if(this.isFromBuyList) this.selectedsArray = this.oneTimes } // get all products of account from DB getAllProducts() { this.productService.getAllProducts(this.idAccount).subscribe(res => { this.o = Array.of(res) this.selectedItemsLength = this.selectedsArray.length Object.keys(res).forEach(category => {this.arrKind.push(category)//arrKind if(this.selectedItemsLength == 0) this.selectedsArray.push([]) }); this.selectedItemsLength = this.selectedsArray.length if(this.allSelectedProducts.length>0) this.setSelectedArray(this.allSelectedProducts.map(p=>p)) Object.values(res).forEach(element => {this.arrProducts.push(element)});// arrProducts console.log(this.arrProducts) // console.log(" nnncj"+this.arrProducts.map(p=>p.ProductId)) }); } // if enter from followUp page, bring follow products from DB getSortedFolowList() { this.followUpService.getSortedFolowList(this.idAccount).subscribe(res=> { this.selectedsArray=res; console.log(this.selectedsArray)}) } // if enter from createList page, bring already selected products from create page getProductsFromCreateList() { this.setSelectedArray(this.allSelectedProducts.map(p=>p)) } GetAllProductsByTypeId() { this.listService.GetAllProductsByTypeId(this.typeListId).subscribe(res=> { this.setSelectedArray(res.map(p=>p.ProductId)) }) } setS
:number[]) { this.productService.getProductsByIdProduct(res).subscribe(products=>{ this.selectedsArray = [] console.log(products) console.log(Object.values(products)) for (let i = 0; i < this.arrKind.length; i++) { if(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) this.selectedsArray.push(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) else this.selectedsArray.push([]) } console.log(this.selectedsArray) }) } selectText(index) { let text=''; if(this.selectedsArray[index].length>0) text=Object(this.selectedsArray[index]).map(s=>s.ProductName).toString() return text; } // filter func for searchbar getItems(ev) { let val = ev.target.value; if (!val || !val.trim()) { this.currentItems = []; return; } this.query(val) } query(val?: any) { this.currentItems = [] if (!val) { return this.arrProducts; } return this.arrProducts.forEach(arr => { arr.filter((item) => { for (let key in val) { let field = item.ProductName[key]; if (!(typeof field == 'string' && field.indexOf(val[key]) >= 0)) { return item.ProductName } else if (!(field == val[key])) { return item.ProductName} } this.currentItems.push(item) return null; }); }); } // add product for this account async presentAlertPromptNewProduct() { var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'הכנס מוצר', inputs: [{ type:"textarea", placeholder: 'מוצר חדש', name:'textarea', }], buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'המשך', handler: (alertData) => { this.nameProduct = alertData.textarea this.presentAlertPromptCategory() } } ] }); await alert.present(); } async presentAlertPromptCategory() { let alertInputs=[]; this.arrKind.forEach(element => { alertInputs.push({name:element, type:'radio', value:element, label: element}) }); var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'בחר קטגוריה', inputs:alertInputs, buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'הוסף', handler: (alertData) => { this.categoryProduct = alertData; let p=new Products(); p.ProductName=this.nameProduct; console.log(this.o[0][this.categoryProduct]) p.CategoryId=this.o[0][this.categoryProduct][0].CategoryId this.newProducts.push(p) } } ] }); console.log(alert.inputs.length) await alert.present(); } addForSaveList(product: Products)// מעדכן את המוצר שנבחר מתיבת החיפוש - מוסיף או מסיר אותו אם לא קיים { console.log(this.selectedsArray) for(let i=0; i<this.selectedsArray.length; i++) // תעבור על כל המוצרים שנבחרו (ממוינים לפי קטגוריות ואם יש קטגורין=ות שעוד אין להם מוצרים שנבחרו אז הם מערכים ריקים) { var j =0; console.log(this.selectedsArray[i]) if(this.selectedsArray[i].length > 0)// אם נבחרו מוצרים מהקטגוריה הנוכחית { if(this.selectedsArray[i][0].CategoryName == product.CategoryName)//תבדוק אם המוצר שבחר מאותו קטגוריה { for(j=0; j<this.selectedsArray[i].length && this.selectedsArray[i][j].ProductId != product.ProductId; j++);//אם הם מאותו קטגוריה תבדוק אולי המוצר כבר קיים if(j === this.selectedsArray[i].length)// אם המוצר לא קיים אז תוסיף אותו { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } else// אם קיים כבר אז תמחק אותו { this.selectedsArray[i].splice(j,1) console.log(this.selectedsArray) this.showAlert("המוצר הוסר") break } } } else // אם לא נבחרו מוצרים מהקטגוריה של המוצר עוד if(this.arrProducts[i][0].CategoryName == product.CategoryName) { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } } this.currentItems = [] } async showAlert(message: string) { const alert = await this.alertController.create({ cssClass: 'my-custom-class', message: message, buttons: ['אישור'] }); await alert.present(); } // update follow up list saveList() { // this.allSelectedProducts=[] לבדוק עם מוצרים מהרשימה אם מתווסף או מה for (let i = 0; i < this.selectedsArray.length; i++)// over all Categories for checking which Categories are selected items { if (this.selectedsArray[i] != null)// if seleced items in this Categories so { Object(this.selectedsArray[i]).forEach(pr=> { if(!this.allSelectedProducts.find(item=> item == pr.ProductId)) this.allSelectedProducts.push(pr.ProductId) console.log(this.allSelectedProducts) }); // push products the costumer is choose to array } } console.log(this.allSelectedProducts) Object(this.selectedsArray).forEach(arrProducts => { this.selectedArrayId.push(...arrProducts.map(element => element=element.ProductId )); }); console.log("selectedrrayId: "+this.selectedArrayId ) const arr= this.allSelectedProducts.filter(idProduct=>{ return !this.selectedArrayId.includes(idProduct)}) console.log("in array1 and not array 2"+ arr) console.log("allselectedProducts: "+this.allSelectedProducts) arr.forEach(element => { this.allSelectedProducts.splice(this.allSelectedProducts.indexOf[element],1)}); console.log("allselectedProducts after: "+this.allSelectedProducts) // send for adding this.productService.addPersonalItems(this.newProducts,this.idAccount).subscribe((newProducts)=> { newProducts.forEach(element => {if(element) this.newListId.push(element)}); this.allSelectedProducts.push(...this.newListId) if(this.isPageForUpdateFollowList) { if(this.allChecked) { const arrAll= new Array() for(var arr of this.arrProducts) { for(let p of arr) arrAll.push(p.ProductId) } this.allSelectedProducts = arrAll } this.followUpService.saveList(this.allSelectedProducts, this.idAccount). subscribe((res) => { this.router.navigateByUrl('follow-list');}); } else if(this.addProductsToList) { this.router.navigate(['show-list',{"status":"true", "allSelectedProducts":JSON.stringify(this.allSelectedProducts),"typeListId":this.typeListId, "typeListName":this.typeListName}]); } else if(this.isFromBuyList == true) { this.allSelectedProducts.map(p => { var p1 = new ProductToList() p1.ListId = this.listId p1.ProductId = p this.oneTime.push(p1) }); this.listService.SaveOneProductsToList(this.idAccount, this.oneTime).subscribe(res=> this.router.navigate(['buy-list', {"listId":this.listId}]) ) } else { this.router.navigate(['create-list',{"productsList": this.allSelectedProducts}]); } }); } compareById(o1,o2) { for(let p of o2) if(p.ProductId==o1.ProductId) return true; return false; } showSelected() { console.log(this.selectedsArray) } }
electedArray(res
identifier_name
products.page.ts
import { Component, OnInit } from '@angular/core'; import { ProductsService } from 'src/app/shared/services/products.service'; import { combineAll, first, ignoreElements, mapTo } from 'rxjs/operators'; import { ActivatedRoute, Router } from '@angular/router'; import { FollowUpService } from 'src/app/shared/services/follow-up.service'; import { Products } from 'src/app/shared/models/products.model'; import { AlertController } from '@ionic/angular'; import { ListsService } from 'src/app/shared/services/lists.service'; import {Location} from '@angular/common' import { Key } from 'protractor'; import { ProductToList } from 'src/app/shared/models/product_to_list.model'; // הערה @Component({ selector: 'app-products', templateUrl: './products.page.html', styleUrls: ['./products.page.scss'], }) export class ProductsPage implements OnInit { o = new Object() // contain all products from database include thair categories arrKind = new Array()// categories products arrProducts = new Array() // all products search: string // value of searchbar selectedsArray= [] // list of all categories that contain also selected products (matriza) allSelectedProducts = []// contain all products are selected idAccount: Number newProducts:Products[]= new Array() isPageForUpdateFollowList: boolean categoryProduct: number; nameProduct: string; currentItems =new Array() map= new Map() newListId = [] addProductsToList:boolean typeListId: number; typeListName: string; selectedItemsLength: Number selectedArrayId: Number[] =[] allChecked: boolean = false isFromBuyList: boolean oneTimes=[] listId: number oneTime: ProductToList[]=[] constructor(private _location: Location, private productService: ProductsService, private followUpService: FollowUpService, private listService: ListsService, private router: Router, private route: ActivatedRoute, private alertController: AlertController) { this.route.params.subscribe(params => { this.isFromBuyList = JSON.parse(params['isFromBuyList']) this.listId = +params['listId'] console.log(this.isFromBuyList) this.isPageForUpdateFollowList = params['isForFollowList']; this.addProductsToList=params['addProducts'] this.typeListId = +params['typeListId'] this.typeListName = params['typeListName'] if(params['productsInList'] != undefined) { this.allSelectedProducts =(params['productsInList'].split(',')).map(x=>+x); } }); } ngOnInit() { this.idAccount =+ localStorage.getItem('accountId') console.log(this.idAccount) this.getAllProducts() if(this.isPageForUpdateFollowList) this.getSortedFolowList() if(this.addProductsToList) this.GetAllProductsByTypeId() if(this.isFromBuyList) this.selectedsArray = this.oneTimes } // get all products of account from DB getAllProducts() { this.productService.getAllProducts(this.idAccount).subscribe(res => { this.o = Array.of(res) this.selectedItemsLength = this.selectedsArray.length Object.keys(res).forEach(category => {this.arrKind.push(category)//arrKind if(this.selectedItemsLength == 0) this.selectedsArray.push([]) }); this.selectedItemsLength = this.selectedsArray.length if(this.allSelectedProducts.length>0) this.setSelectedArray(this.allSelectedProducts.map(p=>p)) Object.values(res).forEach(element => {this.arrProducts.push(element)});// arrProducts console.log(this.arrProducts) // console.log(" nnncj"+this.arrProducts.map(p=>p.ProductId)) }); } // if enter from followUp page, bring follow products from DB getSortedFolowList() { this.followUpService.getSortedFolowList(this.idAccount).subscribe(res=> { this.selectedsArray=res; console.log(this.selectedsArray)}) } // if enter from createList page, bring already selected products from create page getProductsFromCreateList() { this.setSelectedArray(this.allSelectedProducts.map(p=>p)) } GetAllProductsByTypeId() { this.listService.GetAllProductsByTypeId(this.typeListId).subscribe(res=> { this.setSelectedArray(res.map(p=>p.ProductId)) }) } setSelectedArray(res:number[]) { this.productService.getProductsByIdProduct(res).subscribe(products=>{ this.selectedsArray = [] console.log(products) console.log(Object.values(products)) for (let i = 0; i < this.arrKind.length; i++) { if(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) this.selectedsArray.push(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) else this.selectedsArray.push([]) } console.log(this.selectedsArray) }) } selectText(index) { let text=''; if(this.selectedsArray[index].length>0) text=Object(this.selectedsArray[index]).map(s=>s.ProductName).toString() return text; } // filter func for searchbar getItems(ev) { let val = ev.target.value; if (!val || !val.trim()) { this.currentItems = []; return; } this.query(val) } query(val?: any) { this.currentItems = [] if (!val) { return this.arrProducts; } return this.arrProducts.forEach(arr => { arr.filter((item) => { for (let key in val) { let field = item.ProductName[key]; if (!(typeof field == 'string' && field.indexOf(val[key]) >= 0)) { return item.ProductName } else if (!(field == val[key])) { return item.ProductName} } this.currentItems.push(item) return null; }); }); } // add product for this account async presentAlertPromptNewProduct() { var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'הכנס מוצר', inputs: [{ type:"textarea", placeholder: 'מוצר חדש', name:'textarea', }], buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'המשך', handler: (alertData) => { this.nameProduct = alertData.textarea this.presentAlertPromptCategory() } } ] }); await alert.present(); } async presentAlertPromptCategory() { let alertInputs=[]; this.arrKind.forEach(element => { alertInputs.push({name:element, type:'radio', value:element, label: element}) }); var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'בחר קטגוריה', inputs:alertInputs, buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'הוסף', handler: (alertData) => { this.categoryProduct = alertData; let p=new Products(); p.ProductName=this.nameProduct; console.log(this.o[0][this.categoryProduct]) p.CategoryId=this.o[0][this.categoryProduct][0].CategoryId this.newProducts.push(p) } } ] }); console.log(alert.inputs.length) await alert.present(); } addForSaveList(product: Products)// מעדכן את המוצר שנבחר מתיבת החיפוש - מוסיף או מסיר אותו אם לא קיים { console.log(this.selectedsArray) for(let i=0; i<this.selectedsArray.length; i++) // תעבור על כל המוצרים שנבחרו (ממוינים לפי קטגוריות ואם יש קטגורין=ות שעוד אין להם מוצרים שנבחרו אז הם מערכים ריקים) { var j =0; console.log(this.selectedsArray[i]) if(this.selectedsArray[i].length > 0)// אם נבחרו מוצרים מהקטגוריה הנוכחית { if(this.selectedsArray[i][0].CategoryName == product.CategoryName)//תבדוק אם המוצר שבחר מאותו קטגוריה { for(j=0; j<this.selectedsArray[i].length && this.selectedsArray[i][j].ProductId != product.ProductId; j++);//אם הם מאותו קטגוריה תבדוק אולי המוצר כבר קיים if(j === this.selectedsArray[i].length)// אם המוצר לא קיים אז תוסיף אותו { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } else// אם קיים כבר אז תמחק אותו { this.selectedsArray[i].splice(j,1) console.log(this.selectedsArray) this.showAlert("המוצר הוסר") break } } } else // אם לא נבחרו מוצרים מהקטגוריה של המוצר עוד if(this.arrProducts[i][0].CategoryName == product.CategoryName) { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } } this.currentItems = [] } async showAlert(message: string) { const alert = await this.alertController.create({ cssClass: 'my-custom-class', message: message, buttons: ['אישור'] }); await alert.present(); } // update
this.selectedsArray.length; i++)// over all Categories for checking which Categories are selected items { if (this.selectedsArray[i] != null)// if seleced items in this Categories so { Object(this.selectedsArray[i]).forEach(pr=> { if(!this.allSelectedProducts.find(item=> item == pr.ProductId)) this.allSelectedProducts.push(pr.ProductId) console.log(this.allSelectedProducts) }); // push products the costumer is choose to array } } console.log(this.allSelectedProducts) Object(this.selectedsArray).forEach(arrProducts => { this.selectedArrayId.push(...arrProducts.map(element => element=element.ProductId )); }); console.log("selectedrrayId: "+this.selectedArrayId ) const arr= this.allSelectedProducts.filter(idProduct=>{ return !this.selectedArrayId.includes(idProduct)}) console.log("in array1 and not array 2"+ arr) console.log("allselectedProducts: "+this.allSelectedProducts) arr.forEach(element => { this.allSelectedProducts.splice(this.allSelectedProducts.indexOf[element],1)}); console.log("allselectedProducts after: "+this.allSelectedProducts) // send for adding this.productService.addPersonalItems(this.newProducts,this.idAccount).subscribe((newProducts)=> { newProducts.forEach(element => {if(element) this.newListId.push(element)}); this.allSelectedProducts.push(...this.newListId) if(this.isPageForUpdateFollowList) { if(this.allChecked) { const arrAll= new Array() for(var arr of this.arrProducts) { for(let p of arr) arrAll.push(p.ProductId) } this.allSelectedProducts = arrAll } this.followUpService.saveList(this.allSelectedProducts, this.idAccount). subscribe((res) => { this.router.navigateByUrl('follow-list');}); } else if(this.addProductsToList) { this.router.navigate(['show-list',{"status":"true", "allSelectedProducts":JSON.stringify(this.allSelectedProducts),"typeListId":this.typeListId, "typeListName":this.typeListName}]); } else if(this.isFromBuyList == true) { this.allSelectedProducts.map(p => { var p1 = new ProductToList() p1.ListId = this.listId p1.ProductId = p this.oneTime.push(p1) }); this.listService.SaveOneProductsToList(this.idAccount, this.oneTime).subscribe(res=> this.router.navigate(['buy-list', {"listId":this.listId}]) ) } else { this.router.navigate(['create-list',{"productsList": this.allSelectedProducts}]); } }); } compareById(o1,o2) { for(let p of o2) if(p.ProductId==o1.ProductId) return true; return false; } showSelected() { console.log(this.selectedsArray) } }
follow up list saveList() { // this.allSelectedProducts=[] לבדוק עם מוצרים מהרשימה אם מתווסף או מה for (let i = 0; i <
conditional_block
products.page.ts
import { Component, OnInit } from '@angular/core'; import { ProductsService } from 'src/app/shared/services/products.service'; import { combineAll, first, ignoreElements, mapTo } from 'rxjs/operators'; import { ActivatedRoute, Router } from '@angular/router'; import { FollowUpService } from 'src/app/shared/services/follow-up.service'; import { Products } from 'src/app/shared/models/products.model'; import { AlertController } from '@ionic/angular'; import { ListsService } from 'src/app/shared/services/lists.service'; import {Location} from '@angular/common' import { Key } from 'protractor'; import { ProductToList } from 'src/app/shared/models/product_to_list.model'; // הערה @Component({ selector: 'app-products', templateUrl: './products.page.html', styleUrls: ['./products.page.scss'], }) export class ProductsPage implements OnInit { o = new Object() // contain all products from database include thair categories arrKind = new Array()// categories products arrProducts = new Array() // all products search: string // value of searchbar selectedsArray= [] // list of all categories that contain also selected products (matriza) allSelectedProducts = []// contain all products are selected idAccount: Number newProducts:Products[]= new Array() isPageForUpdateFollowList: boolean categoryProduct: number; nameProduct: string; currentItems =new Array() map= new Map() newListId = [] addProductsToList:boolean typeListId: number; typeListName: string; selectedItemsLength: Number selectedArrayId: Number[] =[] allChecked: boolean = false isFromBuyList: boolean oneTimes=[] listId: number oneTime: ProductToList[]=[] constructor(private _location: Location, private productService: ProductsService, private followUpService: FollowUpService, private listService: ListsService, private router: Router, private route: ActivatedRoute, private alertController: AlertController) { this.route.params.subscribe(params => { this.isFromBuyList = JSON.parse(params['isFromBuyList']) this.listId = +params['listId'] console.log(this.isFromBuyList) this.isPageForUpdateFollowList = params['isForFollowList']; this.addProductsToList=params['addProducts'] this.typeListId = +params['typeListId'] this.typeListName = params['typeListName'] if(params['productsInList'] != undefined) { this.allSelectedProducts =(params['productsInList'].split(',')).map(x=>+x); } }); } ngOnInit() { this.idAccount =+ localStorage.getItem('accountId') console.log(this.idAccount) this.getAllProducts() if(this.isPageForUpdateFollowList) this.getSortedFolowList() if(this.addProductsToList) this.GetAllProductsByTypeId() if(this.isFromBuyList) this.selectedsArray = this.oneTimes } // get all products of account from DB getAllProducts() { this.productService.getAllProducts(this.idAccount).subscribe(res => { this.o = Array.of(res) this.selectedItemsLength = this.selectedsArray.length Object.keys(res).forEach(category => {this.arrKind.push(category)//arrKind if(this.selectedItemsLength == 0) this.selectedsArray.push([]) }); this.selectedItemsLength = this.selectedsArray.length if(this.allSelectedProducts.length>0) this.setSelectedArray(this.allSelectedProducts.map(p=>p)) Object.values(res).forEach(element => {this.arrProducts.push(element)});// arrProducts console.log(this.arrProducts) // console.log(" nnncj"+this.arrProducts.map(p=>p.ProductId)) }); } // if enter from followUp page, bring follow products from DB getSortedFolowList() { this.followUpService.getSortedFolowList(this.idAccount).subscribe(res=> { this.selectedsArray=res; console.log(this.selectedsArray)}) } // if enter from createList page, bring already selected products from create page getProductsFromCreateList() {
GetAllProductsByTypeId() { this.listService.GetAllProductsByTypeId(this.typeListId).subscribe(res=> { this.setSelectedArray(res.map(p=>p.ProductId)) }) } setSelectedArray(res:number[]) { this.productService.getProductsByIdProduct(res).subscribe(products=>{ this.selectedsArray = [] console.log(products) console.log(Object.values(products)) for (let i = 0; i < this.arrKind.length; i++) { if(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) this.selectedsArray.push(Object.values(products).find(group=> group[0].CategoryName == this.arrKind[i])) else this.selectedsArray.push([]) } console.log(this.selectedsArray) }) } selectText(index) { let text=''; if(this.selectedsArray[index].length>0) text=Object(this.selectedsArray[index]).map(s=>s.ProductName).toString() return text; } // filter func for searchbar getItems(ev) { let val = ev.target.value; if (!val || !val.trim()) { this.currentItems = []; return; } this.query(val) } query(val?: any) { this.currentItems = [] if (!val) { return this.arrProducts; } return this.arrProducts.forEach(arr => { arr.filter((item) => { for (let key in val) { let field = item.ProductName[key]; if (!(typeof field == 'string' && field.indexOf(val[key]) >= 0)) { return item.ProductName } else if (!(field == val[key])) { return item.ProductName} } this.currentItems.push(item) return null; }); }); } // add product for this account async presentAlertPromptNewProduct() { var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'הכנס מוצר', inputs: [{ type:"textarea", placeholder: 'מוצר חדש', name:'textarea', }], buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'המשך', handler: (alertData) => { this.nameProduct = alertData.textarea this.presentAlertPromptCategory() } } ] }); await alert.present(); } async presentAlertPromptCategory() { let alertInputs=[]; this.arrKind.forEach(element => { alertInputs.push({name:element, type:'radio', value:element, label: element}) }); var alert = await this.alertController.create( { cssClass: 'my-custom-class', header: 'בחר קטגוריה', inputs:alertInputs, buttons: [ { text: 'ביטול', role: 'cancel', cssClass: 'secondary', handler: () => {console.log('Confirm Cancel');} }, { text: 'הוסף', handler: (alertData) => { this.categoryProduct = alertData; let p=new Products(); p.ProductName=this.nameProduct; console.log(this.o[0][this.categoryProduct]) p.CategoryId=this.o[0][this.categoryProduct][0].CategoryId this.newProducts.push(p) } } ] }); console.log(alert.inputs.length) await alert.present(); } addForSaveList(product: Products)// מעדכן את המוצר שנבחר מתיבת החיפוש - מוסיף או מסיר אותו אם לא קיים { console.log(this.selectedsArray) for(let i=0; i<this.selectedsArray.length; i++) // תעבור על כל המוצרים שנבחרו (ממוינים לפי קטגוריות ואם יש קטגורין=ות שעוד אין להם מוצרים שנבחרו אז הם מערכים ריקים) { var j =0; console.log(this.selectedsArray[i]) if(this.selectedsArray[i].length > 0)// אם נבחרו מוצרים מהקטגוריה הנוכחית { if(this.selectedsArray[i][0].CategoryName == product.CategoryName)//תבדוק אם המוצר שבחר מאותו קטגוריה { for(j=0; j<this.selectedsArray[i].length && this.selectedsArray[i][j].ProductId != product.ProductId; j++);//אם הם מאותו קטגוריה תבדוק אולי המוצר כבר קיים if(j === this.selectedsArray[i].length)// אם המוצר לא קיים אז תוסיף אותו { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } else// אם קיים כבר אז תמחק אותו { this.selectedsArray[i].splice(j,1) console.log(this.selectedsArray) this.showAlert("המוצר הוסר") break } } } else // אם לא נבחרו מוצרים מהקטגוריה של המוצר עוד if(this.arrProducts[i][0].CategoryName == product.CategoryName) { this.selectedsArray[i].push(product) this.showAlert("המוצר נוסף") break } } this.currentItems = [] } async showAlert(message: string) { const alert = await this.alertController.create({ cssClass: 'my-custom-class', message: message, buttons: ['אישור'] }); await alert.present(); } // update follow up list saveList() { // this.allSelectedProducts=[] לבדוק עם מוצרים מהרשימה אם מתווסף או מה for (let i = 0; i < this.selectedsArray.length; i++)// over all Categories for checking which Categories are selected items { if (this.selectedsArray[i] != null)// if seleced items in this Categories so { Object(this.selectedsArray[i]).forEach(pr=> { if(!this.allSelectedProducts.find(item=> item == pr.ProductId)) this.allSelectedProducts.push(pr.ProductId) console.log(this.allSelectedProducts) }); // push products the costumer is choose to array } } console.log(this.allSelectedProducts) Object(this.selectedsArray).forEach(arrProducts => { this.selectedArrayId.push(...arrProducts.map(element => element=element.ProductId )); }); console.log("selectedrrayId: "+this.selectedArrayId ) const arr= this.allSelectedProducts.filter(idProduct=>{ return !this.selectedArrayId.includes(idProduct)}) console.log("in array1 and not array 2"+ arr) console.log("allselectedProducts: "+this.allSelectedProducts) arr.forEach(element => { this.allSelectedProducts.splice(this.allSelectedProducts.indexOf[element],1)}); console.log("allselectedProducts after: "+this.allSelectedProducts) // send for adding this.productService.addPersonalItems(this.newProducts,this.idAccount).subscribe((newProducts)=> { newProducts.forEach(element => {if(element) this.newListId.push(element)}); this.allSelectedProducts.push(...this.newListId) if(this.isPageForUpdateFollowList) { if(this.allChecked) { const arrAll= new Array() for(var arr of this.arrProducts) { for(let p of arr) arrAll.push(p.ProductId) } this.allSelectedProducts = arrAll } this.followUpService.saveList(this.allSelectedProducts, this.idAccount). subscribe((res) => { this.router.navigateByUrl('follow-list');}); } else if(this.addProductsToList) { this.router.navigate(['show-list',{"status":"true", "allSelectedProducts":JSON.stringify(this.allSelectedProducts),"typeListId":this.typeListId, "typeListName":this.typeListName}]); } else if(this.isFromBuyList == true) { this.allSelectedProducts.map(p => { var p1 = new ProductToList() p1.ListId = this.listId p1.ProductId = p this.oneTime.push(p1) }); this.listService.SaveOneProductsToList(this.idAccount, this.oneTime).subscribe(res=> this.router.navigate(['buy-list', {"listId":this.listId}]) ) } else { this.router.navigate(['create-list',{"productsList": this.allSelectedProducts}]); } }); } compareById(o1,o2) { for(let p of o2) if(p.ProductId==o1.ProductId) return true; return false; } showSelected() { console.log(this.selectedsArray) } }
this.setSelectedArray(this.allSelectedProducts.map(p=>p)) }
identifier_body
material.rs
use std::ops::{Add, Div, Mul, Sub}; use crate::color::Color; use crate::hittable::HitRecord; use crate::ray::Ray; use crate::rtweekend; use crate::vec::Vec3; /// Generic material trait. pub trait Material<T: Copy> { /// Scatter an incoming light ray on a material. /// /// Returns a scattered ray and the attenuation color if there is reflection. /// Otherwise, none is returned. /// /// * `ray` - Incoming light ray. /// * `rec` - Previous hit record of the ray on some object. fn scatter(&self, ray: &Ray<T>, rec: &HitRecord<T>) -> Option<(Ray<T>, Color)>; } /// Lambertian (diffuse) material. /// /// In our diffuse reflection model, a lambertian material will always both scatter and attenuate /// by its own reflectance (albedo). /// /// Should only be used for smooth matte surfaces, not rough matte ones. /// See https://www.cs.cmu.edu/afs/cs/academic/class/15462-f09/www/lec/lec8.pdf for explanation. pub struct Lambertian { /// Color of the object. albedo: Color, } impl Lambertian { /// Create a new diffuse material from a given intrinsic object color. pub fn new(albedo: Color) -> Self { Lambertian { albedo } } } impl Material<f64> for Lambertian { fn scatter(&self, _ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // Diffuse reflection: True Lambertian reflection. // We aim for a Lambertian distribution of the reflected rays, which has a distribution of // cos(phi) instead of cos³(phi) for random vectors inside the unit sphere. // To achieve this, we pick a random point on the surface of the unit sphere, which is done // by picking a random point inside the sphere and then normalizing that point. let random_unit_vec = rtweekend::random_vec_in_unit_sphere().normalized(); // Diffuse reflection: send out a new ray from the hit position point pointing towards a // random point on the surface of the sphere tangent to that hit point. // Possible problem: the recursion depth may be too deep, so we blow up the stack. Avoid // this by limiting the number of child rays. let scatter_direction = rec.normal + random_unit_vec; let scatter = Ray::new(rec.point, scatter_direction); Some((scatter, self.albedo)) } } /// Metal (specular) material. /// /// For smooth metal surfaces, light is not randomly scattered. Instead, the angle of the incident /// ray is equal to that of the specular outgoing ray. /// /// V N ^ ^ /// \ ^ / | /// \ | / | B /// v|/ | /// S ------------x------------ /// \ | /// \ | B /// v | /// /// The elements of the figure are: /// * S: Metal surface /// * V: Incident ray /// * N: Surface normal
/// /// In our current design, N is a unit vector, but the same does not have to be true for V. /// Furthermore, V points inwards, so the sign has to be changed. /// All this is encoded in the reflect() function. pub struct Metal { /// Color of the object. albedo: Color, /// Fuziness of the specular reflections. fuzz: f64, } impl Metal { /// Create a new metallic material from a given intrinsic object color. /// /// * `albedo`: Intrinsic surface color. /// * `fuzz`: Fuzziness factor for specular reflection in the range [0.0, 1.0]. pub fn new(albedo: Color, fuzz: f64) -> Self { Metal { albedo, fuzz: rtweekend::clamp(fuzz, 0.0, 1.0), } } /// Returns the reflected ray. /// /// This basically just encodes the V + B + B term for the specular reflection. We flip the /// sign and simplify the term so it becomes V - 2B. pub fn reflect<T: Copy>(ray: &Vec3<T>, normal: &Vec3<T>) -> Vec3<T> where T: Add<Output = T> + Div<Output = T> + Mul<Output = T> + Mul<f64, Output = T> + Sub<Output = T> + From<f64> + Into<f64>, { let b = *normal * Vec3::dot(ray, normal); *ray - b * T::from(2.0) } } impl Material<f64> for Metal { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // specular reflection let direction = Metal::reflect(&ray.direction().normalized(), &rec.normal); // apply fuzzing let direction = direction + rtweekend::random_vec_in_unit_sphere() * self.fuzz; let scatter = Ray::new(rec.point, direction); if Vec3::dot(&scatter.direction(), &rec.normal) <= 0.0 { None } else { Some((scatter, self.albedo)) } } } /// Clear (dielectrics) material. /// /// Examples of dielectric materials are water, glass or diamond. /// When such a material it hit by a light ray, the ray is split into a reflected and a refracted /// (transmitted) ray. /// /// Refraction for dielectrics is described by Snell's law: /// /// η⋅sinθ = η′⋅sinθ′ /// /// where /// * θ/θ′: angles from the surface normal /// * η/η′: refractive indices (e.g. 1.0 for air, 1.3-1.7 for glass) /// /// R N /// \ ^ /// \ | /// η v| /// S ------------x------------ /// η′ |\ /// | \ /// | \ /// v v /// N´ R´ /// /// In the illustration above, R is the incident ray and N is the surface normal. θ is thus the /// angle between R and N, while θ′ is the angle between N´ and R´. In the illustration, the angles /// θ and θ′ are exactly the same, so the ray R would pass from air (η = 1.0) through air /// (η′ = 1.0). /// /// To calculate the angle θ′, we solve for sinθ′: /// /// sinθ′ = (η / η′)⋅sinθ /// /// We split R´ into two parts: one that is perpendicular to N´ and one that is parallel to N´: /// /// R´ = R′⊥ + R′∥ /// /// Solving for those parts yields: /// /// R′⊥ = (η / η′)⋅(R + cosθ⋅n) /// R′∥ = - sqrt(1 - |R′⊥|²)⋅n /// /// The next step is solving for cosθ. The dot product of two vectors can be expressed in terms of /// the cosine of the angle between them: /// /// a⋅b = |a||b| cosθ /// /// or, assuming unit vectors: /// /// a⋅b = cosθ /// /// Thus, we can rewrite R′⊥ as: /// /// R′⊥ = (η / η′)⋅(R + (-R⋅n)n) /// /// Sometimes, the refraction ratio η / η′ is too high (e.g. when a ray passes through glass and /// enters air), so a real solution to Snell's law does not exist. An example: /// /// sinθ′ = (η / η′)⋅sinθ /// /// given η = 1.5 (glass) and η´ = 1.0 (air): /// /// sinθ′ = (1.5 / 1.0)⋅sinθ /// /// Since sinθ′ can at maximum be 1.0, sinθ must at maximum be (1.0 / 1.5), otherwise the equation /// can no longer be satisfied. We can solve for sinθ using the following: /// /// sinθ = sqrt(1 - cos²θ) /// cosθ = R⋅n /// /// which yields: /// /// sinθ = sqrt(1 - (R⋅n)²) /// /// In case of sinθ > (1.0 / refraction ratio), we cannot refract and thus must reflect. This is /// called "total internal reflection". pub struct Dielectric { /// Refraction index. refraction: f64, } impl Dielectric { /// Create a new metallic material from a given intrinsic object color. /// /// * `refraction`: Refraction index. pub fn new(refraction: f64) -> Self { Dielectric { refraction } } /// Returns the refracted (trasmitted) ray. /// /// Based on Snell's law. /// /// * `ray`: Incident ray. /// * `normal`: Surface normal (in eta direction). /// * `refraction_ratio`: Refractive ratio (η over η´). pub fn refract(ray: &Vec3<f64>, normal: &Vec3<f64>, refraction_ratio: f64) -> Vec3<f64> { // the part of the refracted ray which is perpendicular to R´ let mut cos_theta = Vec3::dot(&(-(*ray)), normal); if cos_theta > 1.0 { cos_theta = 1.0; } let perpendicular = (*ray + *normal * cos_theta) * refraction_ratio; // the part that is parallel to R´ let parallel = *normal * (-((1.0 - perpendicular.length_squared()).abs()).sqrt()); perpendicular + parallel } /// Returns the reflectance on a dielectric surface at a given angle. /// /// Based on the polynomial approximation by Christophe Schlick. /// /// * `angle` - Angle of incoming ray. /// * `refraction_ratio`: - Refractive ratio (η over η´). pub fn reflectance(angle: f64, refraction_ratio: f64) -> f64 { let r0 = (1.0 - refraction_ratio) / (1.0 + refraction_ratio); let r0 = r0 * r0; r0 + (1.0 - r0) * (1.0 - angle).powf(5.0) } } impl Material<f64> for Dielectric { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // assume the material where the ray originates from is air let eta = 1.0; let eta_prime = self.refraction; let refraction_ratio = if rec.front_face { eta / eta_prime } else { eta_prime }; // Total internal reflection: if // // (η / η′)⋅sinθ > 1.0 // // we must not refract (and have to reflect) instead! let r = ray.direction().normalized(); // cosθ = R⋅n let mut cos_theta = Vec3::dot(&(-r), &rec.normal); if cos_theta > 1.0 { cos_theta = 1.0; } // sinθ = sqrt(1 - cos²θ) let sin_theta = (1.0 - cos_theta * cos_theta).sqrt(); // Snell's law let can_refract = refraction_ratio * sin_theta <= 1.0; // Schlick approximation let can_refract = can_refract && (Dielectric::reflectance(cos_theta, refraction_ratio)) < rtweekend::random(0.0..1.0); // direction of the scattered ray let direction = if !can_refract { // must reflect Metal::reflect(&r, &rec.normal) } else { // can refract Dielectric::refract(&r, &rec.normal, refraction_ratio) }; let scatter = Ray::new(rec.point, direction); // attenuation is always 1 since air/glass/diamond do not absorb let attenuation = Color::new3(1.0, 1.0, 1.0); Some((scatter, attenuation)) } }
/// /// Additionally, B is an additional vector for illustration purposes. /// The reflected ray (in between N and B) has the same angle as the incident ray V with regards to // the surface. It can be computed as V + B + B.
random_line_split
material.rs
use std::ops::{Add, Div, Mul, Sub}; use crate::color::Color; use crate::hittable::HitRecord; use crate::ray::Ray; use crate::rtweekend; use crate::vec::Vec3; /// Generic material trait. pub trait Material<T: Copy> { /// Scatter an incoming light ray on a material. /// /// Returns a scattered ray and the attenuation color if there is reflection. /// Otherwise, none is returned. /// /// * `ray` - Incoming light ray. /// * `rec` - Previous hit record of the ray on some object. fn scatter(&self, ray: &Ray<T>, rec: &HitRecord<T>) -> Option<(Ray<T>, Color)>; } /// Lambertian (diffuse) material. /// /// In our diffuse reflection model, a lambertian material will always both scatter and attenuate /// by its own reflectance (albedo). /// /// Should only be used for smooth matte surfaces, not rough matte ones. /// See https://www.cs.cmu.edu/afs/cs/academic/class/15462-f09/www/lec/lec8.pdf for explanation. pub struct Lambertian { /// Color of the object. albedo: Color, } impl Lambertian { /// Create a new diffuse material from a given intrinsic object color. pub fn new(albedo: Color) -> Self { Lambertian { albedo } } } impl Material<f64> for Lambertian { fn scatter(&self, _ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // Diffuse reflection: True Lambertian reflection. // We aim for a Lambertian distribution of the reflected rays, which has a distribution of // cos(phi) instead of cos³(phi) for random vectors inside the unit sphere. // To achieve this, we pick a random point on the surface of the unit sphere, which is done // by picking a random point inside the sphere and then normalizing that point. let random_unit_vec = rtweekend::random_vec_in_unit_sphere().normalized(); // Diffuse reflection: send out a new ray from the hit position point pointing towards a // random point on the surface of the sphere tangent to that hit point. // Possible problem: the recursion depth may be too deep, so we blow up the stack. Avoid // this by limiting the number of child rays. let scatter_direction = rec.normal + random_unit_vec; let scatter = Ray::new(rec.point, scatter_direction); Some((scatter, self.albedo)) } } /// Metal (specular) material. /// /// For smooth metal surfaces, light is not randomly scattered. Instead, the angle of the incident /// ray is equal to that of the specular outgoing ray. /// /// V N ^ ^ /// \ ^ / | /// \ | / | B /// v|/ | /// S ------------x------------ /// \ | /// \ | B /// v | /// /// The elements of the figure are: /// * S: Metal surface /// * V: Incident ray /// * N: Surface normal /// /// Additionally, B is an additional vector for illustration purposes. /// The reflected ray (in between N and B) has the same angle as the incident ray V with regards to // the surface. It can be computed as V + B + B. /// /// In our current design, N is a unit vector, but the same does not have to be true for V. /// Furthermore, V points inwards, so the sign has to be changed. /// All this is encoded in the reflect() function. pub struct Metal { /// Color of the object. albedo: Color, /// Fuziness of the specular reflections. fuzz: f64, } impl Metal { /// Create a new metallic material from a given intrinsic object color. /// /// * `albedo`: Intrinsic surface color. /// * `fuzz`: Fuzziness factor for specular reflection in the range [0.0, 1.0]. pub fn new(albedo: Color, fuzz: f64) -> Self { Metal { albedo, fuzz: rtweekend::clamp(fuzz, 0.0, 1.0), } } /// Returns the reflected ray. /// /// This basically just encodes the V + B + B term for the specular reflection. We flip the /// sign and simplify the term so it becomes V - 2B. pub fn reflect<T: Copy>(ray: &Vec3<T>, normal: &Vec3<T>) -> Vec3<T> where T: Add<Output = T> + Div<Output = T> + Mul<Output = T> + Mul<f64, Output = T> + Sub<Output = T> + From<f64> + Into<f64>, { let b = *normal * Vec3::dot(ray, normal); *ray - b * T::from(2.0) } } impl Material<f64> for Metal { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // specular reflection let direction = Metal::reflect(&ray.direction().normalized(), &rec.normal); // apply fuzzing let direction = direction + rtweekend::random_vec_in_unit_sphere() * self.fuzz; let scatter = Ray::new(rec.point, direction); if Vec3::dot(&scatter.direction(), &rec.normal) <= 0.0 { None } else { Some((scatter, self.albedo)) } } } /// Clear (dielectrics) material. /// /// Examples of dielectric materials are water, glass or diamond. /// When such a material it hit by a light ray, the ray is split into a reflected and a refracted /// (transmitted) ray. /// /// Refraction for dielectrics is described by Snell's law: /// /// η⋅sinθ = η′⋅sinθ′ /// /// where /// * θ/θ′: angles from the surface normal /// * η/η′: refractive indices (e.g. 1.0 for air, 1.3-1.7 for glass) /// /// R N /// \ ^ /// \ | /// η v| /// S ------------x------------ /// η′ |\ /// | \ /// | \ /// v v /// N´ R´ /// /// In the illustration above, R is the incident ray and N is the surface normal. θ is thus the /// angle between R and N, while θ′ is the angle between N´ and R´. In the illustration, the angles /// θ and θ′ are exactly the same, so the ray R would pass from air (η = 1.0) through air /// (η′ = 1.0). /// /// To calculate the angle θ′, we solve for sinθ′: /// /// sinθ′ = (η / η′)⋅sinθ /// /// We split R´ into two parts: one that is perpendicular to N´ and one that is parallel to N´: /// /// R´ = R′⊥ + R′∥ /// /// Solving for those parts yields: /// /// R′⊥ = (η / η′)⋅(R + cosθ⋅n) /// R′∥ = - sqrt(1 - |R′⊥|²)⋅n /// /// The next step is solving for cosθ. The dot product of two vectors can be expressed in terms of /// the cosine of the angle between them: /// /// a⋅b = |a||b| cosθ /// /// or, assuming unit vectors: /// /// a⋅b = cosθ /// /// Thus, we can rewrite R′⊥ as: /// /// R′⊥ = (η / η′)⋅(R + (-R⋅n)n) /// /// Sometimes, the refraction ratio η / η′ is too high (e.g. when a ray passes through glass and /// enters air), so a real solution to Snell's law does not exist. An example: /// /// sinθ′ = (η / η′)⋅sinθ /// /// given η = 1.5 (glass) and η´ = 1.0 (air): /// /// sinθ′ = (1.5 / 1.0)⋅sinθ /// /// Since sinθ′ can at maximum be 1.0, sinθ must at maximum be (1.0 / 1.5), otherwise the equation /// can no longer be satisfied. We can solve for sinθ using the following: /// /// sinθ = sqrt(1 - cos²θ) /// cosθ = R⋅n /// /// which yields: /// /// sinθ = sqrt(1 - (R⋅n)²) /// /// In case of sinθ > (1.0 / refraction ratio), we cannot refract and thus must reflect. This is /// called "total internal reflection". pub struct Dielectric { /// Refraction index. refraction: f64, } impl Dielectric { /// Create a new metallic material from a given intrinsic object color. /// /// * `refraction`: Refraction index. pub fn new(refraction: f64) -> Self { Dielectric { refraction } } /// Returns the refracted (trasmitted) ray. /// /// Based on Snell's law. /// /// * `ray`: Incident ray. /// * `normal`: Surface normal (in eta direction). /// * `refraction_ratio`: Refractive ratio (η over η´). pub fn refract(ray: &Vec3<f64>, normal: &Vec3<f64>, refraction_ratio: f64) -> Vec3<f64> { // the part of the refracted ray which is perpendicular to R´ let mut cos_theta = Vec3::dot(&(-(*ray)), normal); if cos_theta > 1.0 { cos_theta = 1.0; } let perpendicular = (*ray + *normal * cos_theta) * refraction_ratio; // the part that is parallel to R´ let parallel = *normal * (-((1.0 - perpendicular.length_squared()).abs()).sqrt()); perpendicular + parallel } /// Returns the reflectance on a dielectric surface at a given angle. /// /// Based on the polynomial approximation by Christophe Schlick. /// /// * `angle` - Angle of incoming ray. /// * `refraction_ratio`: - Refractive ratio (η over η´). pub fn reflectance(angle: f64, refraction_ratio: f64) -> f64 { let r0 = (1.0 - refraction_ratio) / (1.0 + refraction_ratio); let r0 = r0 * r0; r0 + (1.0 - r0) * (1.0 - angle).powf(5.0) } } impl Material<f64> for Dielectric { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // assume the material where the ray originates from is air let eta = 1.0; let eta_prime = self.refraction; let refraction_ratio = if rec.front_face { eta / eta_prime } else { eta_prime }; // Total internal reflection: if // // (η / η′)⋅sinθ > 1.0 // // we must not refract (
let r = ray.direction().normalized(); // cosθ = R⋅n let mut cos_theta = Vec3::dot(&(-r), &rec.normal); if cos_theta > 1.0 { cos_theta = 1.0; } // sinθ = sqrt(1 - cos²θ) let sin_theta = (1.0 - cos_theta * cos_theta).sqrt(); // Snell's law let can_refract = refraction_ratio * sin_theta <= 1.0; // Schlick approximation let can_refract = can_refract && (Dielectric::reflectance(cos_theta, refraction_ratio)) < rtweekend::random(0.0..1.0); // direction of the scattered ray let direction = if !can_refract { // must reflect Metal::reflect(&r, &rec.normal) } else { // can refract Dielectric::refract(&r, &rec.normal, refraction_ratio) }; let scatter = Ray::new(rec.point, direction); // attenuation is always 1 since air/glass/diamond do not absorb let attenuation = Color::new3(1.0, 1.0, 1.0); Some((scatter, attenuation)) } }
and have to reflect) instead!
conditional_block
material.rs
use std::ops::{Add, Div, Mul, Sub}; use crate::color::Color; use crate::hittable::HitRecord; use crate::ray::Ray; use crate::rtweekend; use crate::vec::Vec3; /// Generic material trait. pub trait Material<T: Copy> { /// Scatter an incoming light ray on a material. /// /// Returns a scattered ray and the attenuation color if there is reflection. /// Otherwise, none is returned. /// /// * `ray` - Incoming light ray. /// * `rec` - Previous hit record of the ray on some object. fn scatter(&self, ray: &Ray<T>, rec: &HitRecord<T>) -> Option<(Ray<T>, Color)>; } /// Lambertian (diffuse) material. /// /// In our diffuse reflection model, a lambertian material will always both scatter and attenuate /// by its own reflectance (albedo). /// /// Should only be used for smooth matte surfaces, not rough matte ones. /// See https://www.cs.cmu.edu/afs/cs/academic/class/15462-f09/www/lec/lec8.pdf for explanation. pub struct Lambertian { /// Color of the object. albedo: Color, } impl Lambertian { /// Create a new diffuse material from a given intrinsic object color. pub fn new(albedo: Color) -> Self
} impl Material<f64> for Lambertian { fn scatter(&self, _ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // Diffuse reflection: True Lambertian reflection. // We aim for a Lambertian distribution of the reflected rays, which has a distribution of // cos(phi) instead of cos³(phi) for random vectors inside the unit sphere. // To achieve this, we pick a random point on the surface of the unit sphere, which is done // by picking a random point inside the sphere and then normalizing that point. let random_unit_vec = rtweekend::random_vec_in_unit_sphere().normalized(); // Diffuse reflection: send out a new ray from the hit position point pointing towards a // random point on the surface of the sphere tangent to that hit point. // Possible problem: the recursion depth may be too deep, so we blow up the stack. Avoid // this by limiting the number of child rays. let scatter_direction = rec.normal + random_unit_vec; let scatter = Ray::new(rec.point, scatter_direction); Some((scatter, self.albedo)) } } /// Metal (specular) material. /// /// For smooth metal surfaces, light is not randomly scattered. Instead, the angle of the incident /// ray is equal to that of the specular outgoing ray. /// /// V N ^ ^ /// \ ^ / | /// \ | / | B /// v|/ | /// S ------------x------------ /// \ | /// \ | B /// v | /// /// The elements of the figure are: /// * S: Metal surface /// * V: Incident ray /// * N: Surface normal /// /// Additionally, B is an additional vector for illustration purposes. /// The reflected ray (in between N and B) has the same angle as the incident ray V with regards to // the surface. It can be computed as V + B + B. /// /// In our current design, N is a unit vector, but the same does not have to be true for V. /// Furthermore, V points inwards, so the sign has to be changed. /// All this is encoded in the reflect() function. pub struct Metal { /// Color of the object. albedo: Color, /// Fuziness of the specular reflections. fuzz: f64, } impl Metal { /// Create a new metallic material from a given intrinsic object color. /// /// * `albedo`: Intrinsic surface color. /// * `fuzz`: Fuzziness factor for specular reflection in the range [0.0, 1.0]. pub fn new(albedo: Color, fuzz: f64) -> Self { Metal { albedo, fuzz: rtweekend::clamp(fuzz, 0.0, 1.0), } } /// Returns the reflected ray. /// /// This basically just encodes the V + B + B term for the specular reflection. We flip the /// sign and simplify the term so it becomes V - 2B. pub fn reflect<T: Copy>(ray: &Vec3<T>, normal: &Vec3<T>) -> Vec3<T> where T: Add<Output = T> + Div<Output = T> + Mul<Output = T> + Mul<f64, Output = T> + Sub<Output = T> + From<f64> + Into<f64>, { let b = *normal * Vec3::dot(ray, normal); *ray - b * T::from(2.0) } } impl Material<f64> for Metal { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // specular reflection let direction = Metal::reflect(&ray.direction().normalized(), &rec.normal); // apply fuzzing let direction = direction + rtweekend::random_vec_in_unit_sphere() * self.fuzz; let scatter = Ray::new(rec.point, direction); if Vec3::dot(&scatter.direction(), &rec.normal) <= 0.0 { None } else { Some((scatter, self.albedo)) } } } /// Clear (dielectrics) material. /// /// Examples of dielectric materials are water, glass or diamond. /// When such a material it hit by a light ray, the ray is split into a reflected and a refracted /// (transmitted) ray. /// /// Refraction for dielectrics is described by Snell's law: /// /// η⋅sinθ = η′⋅sinθ′ /// /// where /// * θ/θ′: angles from the surface normal /// * η/η′: refractive indices (e.g. 1.0 for air, 1.3-1.7 for glass) /// /// R N /// \ ^ /// \ | /// η v| /// S ------------x------------ /// η′ |\ /// | \ /// | \ /// v v /// N´ R´ /// /// In the illustration above, R is the incident ray and N is the surface normal. θ is thus the /// angle between R and N, while θ′ is the angle between N´ and R´. In the illustration, the angles /// θ and θ′ are exactly the same, so the ray R would pass from air (η = 1.0) through air /// (η′ = 1.0). /// /// To calculate the angle θ′, we solve for sinθ′: /// /// sinθ′ = (η / η′)⋅sinθ /// /// We split R´ into two parts: one that is perpendicular to N´ and one that is parallel to N´: /// /// R´ = R′⊥ + R′∥ /// /// Solving for those parts yields: /// /// R′⊥ = (η / η′)⋅(R + cosθ⋅n) /// R′∥ = - sqrt(1 - |R′⊥|²)⋅n /// /// The next step is solving for cosθ. The dot product of two vectors can be expressed in terms of /// the cosine of the angle between them: /// /// a⋅b = |a||b| cosθ /// /// or, assuming unit vectors: /// /// a⋅b = cosθ /// /// Thus, we can rewrite R′⊥ as: /// /// R′⊥ = (η / η′)⋅(R + (-R⋅n)n) /// /// Sometimes, the refraction ratio η / η′ is too high (e.g. when a ray passes through glass and /// enters air), so a real solution to Snell's law does not exist. An example: /// /// sinθ′ = (η / η′)⋅sinθ /// /// given η = 1.5 (glass) and η´ = 1.0 (air): /// /// sinθ′ = (1.5 / 1.0)⋅sinθ /// /// Since sinθ′ can at maximum be 1.0, sinθ must at maximum be (1.0 / 1.5), otherwise the equation /// can no longer be satisfied. We can solve for sinθ using the following: /// /// sinθ = sqrt(1 - cos²θ) /// cosθ = R⋅n /// /// which yields: /// /// sinθ = sqrt(1 - (R⋅n)²) /// /// In case of sinθ > (1.0 / refraction ratio), we cannot refract and thus must reflect. This is /// called "total internal reflection". pub struct Dielectric { /// Refraction index. refraction: f64, } impl Dielectric { /// Create a new metallic material from a given intrinsic object color. /// /// * `refraction`: Refraction index. pub fn new(refraction: f64) -> Self { Dielectric { refraction } } /// Returns the refracted (trasmitted) ray. /// /// Based on Snell's law. /// /// * `ray`: Incident ray. /// * `normal`: Surface normal (in eta direction). /// * `refraction_ratio`: Refractive ratio (η over η´). pub fn refract(ray: &Vec3<f64>, normal: &Vec3<f64>, refraction_ratio: f64) -> Vec3<f64> { // the part of the refracted ray which is perpendicular to R´ let mut cos_theta = Vec3::dot(&(-(*ray)), normal); if cos_theta > 1.0 { cos_theta = 1.0; } let perpendicular = (*ray + *normal * cos_theta) * refraction_ratio; // the part that is parallel to R´ let parallel = *normal * (-((1.0 - perpendicular.length_squared()).abs()).sqrt()); perpendicular + parallel } /// Returns the reflectance on a dielectric surface at a given angle. /// /// Based on the polynomial approximation by Christophe Schlick. /// /// * `angle` - Angle of incoming ray. /// * `refraction_ratio`: - Refractive ratio (η over η´). pub fn reflectance(angle: f64, refraction_ratio: f64) -> f64 { let r0 = (1.0 - refraction_ratio) / (1.0 + refraction_ratio); let r0 = r0 * r0; r0 + (1.0 - r0) * (1.0 - angle).powf(5.0) } } impl Material<f64> for Dielectric { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // assume the material where the ray originates from is air let eta = 1.0; let eta_prime = self.refraction; let refraction_ratio = if rec.front_face { eta / eta_prime } else { eta_prime }; // Total internal reflection: if // // (η / η′)⋅sinθ > 1.0 // // we must not refract (and have to reflect) instead! let r = ray.direction().normalized(); // cosθ = R⋅n let mut cos_theta = Vec3::dot(&(-r), &rec.normal); if cos_theta > 1.0 { cos_theta = 1.0; } // sinθ = sqrt(1 - cos²θ) let sin_theta = (1.0 - cos_theta * cos_theta).sqrt(); // Snell's law let can_refract = refraction_ratio * sin_theta <= 1.0; // Schlick approximation let can_refract = can_refract && (Dielectric::reflectance(cos_theta, refraction_ratio)) < rtweekend::random(0.0..1.0); // direction of the scattered ray let direction = if !can_refract { // must reflect Metal::reflect(&r, &rec.normal) } else { // can refract Dielectric::refract(&r, &rec.normal, refraction_ratio) }; let scatter = Ray::new(rec.point, direction); // attenuation is always 1 since air/glass/diamond do not absorb let attenuation = Color::new3(1.0, 1.0, 1.0); Some((scatter, attenuation)) } }
{ Lambertian { albedo } }
identifier_body
material.rs
use std::ops::{Add, Div, Mul, Sub}; use crate::color::Color; use crate::hittable::HitRecord; use crate::ray::Ray; use crate::rtweekend; use crate::vec::Vec3; /// Generic material trait. pub trait Material<T: Copy> { /// Scatter an incoming light ray on a material. /// /// Returns a scattered ray and the attenuation color if there is reflection. /// Otherwise, none is returned. /// /// * `ray` - Incoming light ray. /// * `rec` - Previous hit record of the ray on some object. fn scatter(&self, ray: &Ray<T>, rec: &HitRecord<T>) -> Option<(Ray<T>, Color)>; } /// Lambertian (diffuse) material. /// /// In our diffuse reflection model, a lambertian material will always both scatter and attenuate /// by its own reflectance (albedo). /// /// Should only be used for smooth matte surfaces, not rough matte ones. /// See https://www.cs.cmu.edu/afs/cs/academic/class/15462-f09/www/lec/lec8.pdf for explanation. pub struct Lambertian { /// Color of the object. albedo: Color, } impl Lambertian { /// Create a new diffuse material from a given intrinsic object color. pub fn new(albedo: Color) -> Self { Lambertian { albedo } } } impl Material<f64> for Lambertian { fn scatter(&self, _ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // Diffuse reflection: True Lambertian reflection. // We aim for a Lambertian distribution of the reflected rays, which has a distribution of // cos(phi) instead of cos³(phi) for random vectors inside the unit sphere. // To achieve this, we pick a random point on the surface of the unit sphere, which is done // by picking a random point inside the sphere and then normalizing that point. let random_unit_vec = rtweekend::random_vec_in_unit_sphere().normalized(); // Diffuse reflection: send out a new ray from the hit position point pointing towards a // random point on the surface of the sphere tangent to that hit point. // Possible problem: the recursion depth may be too deep, so we blow up the stack. Avoid // this by limiting the number of child rays. let scatter_direction = rec.normal + random_unit_vec; let scatter = Ray::new(rec.point, scatter_direction); Some((scatter, self.albedo)) } } /// Metal (specular) material. /// /// For smooth metal surfaces, light is not randomly scattered. Instead, the angle of the incident /// ray is equal to that of the specular outgoing ray. /// /// V N ^ ^ /// \ ^ / | /// \ | / | B /// v|/ | /// S ------------x------------ /// \ | /// \ | B /// v | /// /// The elements of the figure are: /// * S: Metal surface /// * V: Incident ray /// * N: Surface normal /// /// Additionally, B is an additional vector for illustration purposes. /// The reflected ray (in between N and B) has the same angle as the incident ray V with regards to // the surface. It can be computed as V + B + B. /// /// In our current design, N is a unit vector, but the same does not have to be true for V. /// Furthermore, V points inwards, so the sign has to be changed. /// All this is encoded in the reflect() function. pub struct M
{ /// Color of the object. albedo: Color, /// Fuziness of the specular reflections. fuzz: f64, } impl Metal { /// Create a new metallic material from a given intrinsic object color. /// /// * `albedo`: Intrinsic surface color. /// * `fuzz`: Fuzziness factor for specular reflection in the range [0.0, 1.0]. pub fn new(albedo: Color, fuzz: f64) -> Self { Metal { albedo, fuzz: rtweekend::clamp(fuzz, 0.0, 1.0), } } /// Returns the reflected ray. /// /// This basically just encodes the V + B + B term for the specular reflection. We flip the /// sign and simplify the term so it becomes V - 2B. pub fn reflect<T: Copy>(ray: &Vec3<T>, normal: &Vec3<T>) -> Vec3<T> where T: Add<Output = T> + Div<Output = T> + Mul<Output = T> + Mul<f64, Output = T> + Sub<Output = T> + From<f64> + Into<f64>, { let b = *normal * Vec3::dot(ray, normal); *ray - b * T::from(2.0) } } impl Material<f64> for Metal { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // specular reflection let direction = Metal::reflect(&ray.direction().normalized(), &rec.normal); // apply fuzzing let direction = direction + rtweekend::random_vec_in_unit_sphere() * self.fuzz; let scatter = Ray::new(rec.point, direction); if Vec3::dot(&scatter.direction(), &rec.normal) <= 0.0 { None } else { Some((scatter, self.albedo)) } } } /// Clear (dielectrics) material. /// /// Examples of dielectric materials are water, glass or diamond. /// When such a material it hit by a light ray, the ray is split into a reflected and a refracted /// (transmitted) ray. /// /// Refraction for dielectrics is described by Snell's law: /// /// η⋅sinθ = η′⋅sinθ′ /// /// where /// * θ/θ′: angles from the surface normal /// * η/η′: refractive indices (e.g. 1.0 for air, 1.3-1.7 for glass) /// /// R N /// \ ^ /// \ | /// η v| /// S ------------x------------ /// η′ |\ /// | \ /// | \ /// v v /// N´ R´ /// /// In the illustration above, R is the incident ray and N is the surface normal. θ is thus the /// angle between R and N, while θ′ is the angle between N´ and R´. In the illustration, the angles /// θ and θ′ are exactly the same, so the ray R would pass from air (η = 1.0) through air /// (η′ = 1.0). /// /// To calculate the angle θ′, we solve for sinθ′: /// /// sinθ′ = (η / η′)⋅sinθ /// /// We split R´ into two parts: one that is perpendicular to N´ and one that is parallel to N´: /// /// R´ = R′⊥ + R′∥ /// /// Solving for those parts yields: /// /// R′⊥ = (η / η′)⋅(R + cosθ⋅n) /// R′∥ = - sqrt(1 - |R′⊥|²)⋅n /// /// The next step is solving for cosθ. The dot product of two vectors can be expressed in terms of /// the cosine of the angle between them: /// /// a⋅b = |a||b| cosθ /// /// or, assuming unit vectors: /// /// a⋅b = cosθ /// /// Thus, we can rewrite R′⊥ as: /// /// R′⊥ = (η / η′)⋅(R + (-R⋅n)n) /// /// Sometimes, the refraction ratio η / η′ is too high (e.g. when a ray passes through glass and /// enters air), so a real solution to Snell's law does not exist. An example: /// /// sinθ′ = (η / η′)⋅sinθ /// /// given η = 1.5 (glass) and η´ = 1.0 (air): /// /// sinθ′ = (1.5 / 1.0)⋅sinθ /// /// Since sinθ′ can at maximum be 1.0, sinθ must at maximum be (1.0 / 1.5), otherwise the equation /// can no longer be satisfied. We can solve for sinθ using the following: /// /// sinθ = sqrt(1 - cos²θ) /// cosθ = R⋅n /// /// which yields: /// /// sinθ = sqrt(1 - (R⋅n)²) /// /// In case of sinθ > (1.0 / refraction ratio), we cannot refract and thus must reflect. This is /// called "total internal reflection". pub struct Dielectric { /// Refraction index. refraction: f64, } impl Dielectric { /// Create a new metallic material from a given intrinsic object color. /// /// * `refraction`: Refraction index. pub fn new(refraction: f64) -> Self { Dielectric { refraction } } /// Returns the refracted (trasmitted) ray. /// /// Based on Snell's law. /// /// * `ray`: Incident ray. /// * `normal`: Surface normal (in eta direction). /// * `refraction_ratio`: Refractive ratio (η over η´). pub fn refract(ray: &Vec3<f64>, normal: &Vec3<f64>, refraction_ratio: f64) -> Vec3<f64> { // the part of the refracted ray which is perpendicular to R´ let mut cos_theta = Vec3::dot(&(-(*ray)), normal); if cos_theta > 1.0 { cos_theta = 1.0; } let perpendicular = (*ray + *normal * cos_theta) * refraction_ratio; // the part that is parallel to R´ let parallel = *normal * (-((1.0 - perpendicular.length_squared()).abs()).sqrt()); perpendicular + parallel } /// Returns the reflectance on a dielectric surface at a given angle. /// /// Based on the polynomial approximation by Christophe Schlick. /// /// * `angle` - Angle of incoming ray. /// * `refraction_ratio`: - Refractive ratio (η over η´). pub fn reflectance(angle: f64, refraction_ratio: f64) -> f64 { let r0 = (1.0 - refraction_ratio) / (1.0 + refraction_ratio); let r0 = r0 * r0; r0 + (1.0 - r0) * (1.0 - angle).powf(5.0) } } impl Material<f64> for Dielectric { fn scatter(&self, ray: &Ray<f64>, rec: &HitRecord<f64>) -> Option<(Ray<f64>, Color)> { // assume the material where the ray originates from is air let eta = 1.0; let eta_prime = self.refraction; let refraction_ratio = if rec.front_face { eta / eta_prime } else { eta_prime }; // Total internal reflection: if // // (η / η′)⋅sinθ > 1.0 // // we must not refract (and have to reflect) instead! let r = ray.direction().normalized(); // cosθ = R⋅n let mut cos_theta = Vec3::dot(&(-r), &rec.normal); if cos_theta > 1.0 { cos_theta = 1.0; } // sinθ = sqrt(1 - cos²θ) let sin_theta = (1.0 - cos_theta * cos_theta).sqrt(); // Snell's law let can_refract = refraction_ratio * sin_theta <= 1.0; // Schlick approximation let can_refract = can_refract && (Dielectric::reflectance(cos_theta, refraction_ratio)) < rtweekend::random(0.0..1.0); // direction of the scattered ray let direction = if !can_refract { // must reflect Metal::reflect(&r, &rec.normal) } else { // can refract Dielectric::refract(&r, &rec.normal, refraction_ratio) }; let scatter = Ray::new(rec.point, direction); // attenuation is always 1 since air/glass/diamond do not absorb let attenuation = Color::new3(1.0, 1.0, 1.0); Some((scatter, attenuation)) } }
etal
identifier_name
singleVisit.component.ts
import { Component, OnDestroy, AfterViewInit, OnInit } from '@angular/core'; import {GoogleMaps, GoogleMap,GoogleMapsEvent,LatLng,CameraPosition,MarkerOptions,Marker} from '@ionic-native/google-maps'; import { Router } from '@angular/router'; import { SelectVisitServices} from '../services/selectVisit.service'; import * as _ from 'lodash'; import {HeaderService} from '../../shared/headerComponent/header.services'; import {TranslateService} from 'ng2-translate/ng2-translate'; import * as moment from 'moment'; import {ISingleVisitAddMarkerParams} from '../services/selectVisit.interface'; declare var window:any; declare var google:any; @Component({ templateUrl: './singleVisit.component.html', styleUrls:['./singleVisit.scss'] }) export class SingleVisitComponent implements OnDestroy, AfterViewInit, OnInit{ public errorMessage: string; private mapInstance:GoogleMap = null; public displayFooterSection = true; private markerIconBasePath; private officeLocationMarkerUrl: string; private visitLocationMarkerUrl: Array<string>; public visitStartDate: string; public visitEndDate: string; public selectedMapCityName; private displayVisitLocation: any = []; private displayAccentureOfficeLocation: any = []; private defaultShowInfoWindow; private _apiLoadingPromise: Promise<any>; private visitLocationWidth; private visitLocationHeight; private accentureOfficeWidth; private accentureOfficeHeight; private mapZoomLevel = 3; private googleMapDesktopAPIKey = 'AIzaSyD5yFBB69RwOsb6sVRsQEapd9ynwCuoBYo'; private googleMapInfoWindow; constructor( private googleMaps: GoogleMaps, private selectvisit: SelectVisitServices, private headerService: HeaderService, private _translate: TranslateService, private _router: Router) { if(window.cordova){ this.markerIconBasePath = 'www/resources/images/singlevisit/'; this.visitLocationWidth = 17; this.visitLocationHeight = 19; this.accentureOfficeWidth = 11; this.accentureOfficeHeight = 15; }else{ this.markerIconBasePath = './resources/images/singlevisit/'; this.visitLocationWidth = 27; this.visitLocationHeight = 29; this.accentureOfficeWidth = 21; this.accentureOfficeHeight = 15; } this.officeLocationMarkerUrl = this.markerIconBasePath+'AccentureOffice.png'; this.visitLocationMarkerUrl = [ this.markerIconBasePath+'pin_1.png', this.markerIconBasePath+'pin_2.png', this.markerIconBasePath+'pin_3.png', this.markerIconBasePath+'pin_4.png', this.markerIconBasePath+'pin_5.png', this.markerIconBasePath+'pin_6.png', this.markerIconBasePath+'pin_0.png']; } ngOnInit(){ let obj = { title:this._translate.instant('visitLocation'), isHomeBtn: false, isBackBtn: true, isMenuBtn: true }; this.headerService.setHeaderObject(obj); this.visitStartDate = this.selectvisit.selectedVisitObj.VisitStartDate; this.visitEndDate = this.selectvisit.selectedVisitObj.VisitEndDate; this.defaultShowInfoWindow = false; if(this.headerService.isSideMenuClick){ this.displayFooterSection = false; } } // Load map only after view is initialized ngAfterViewInit() { this.loadMap(); } loadMap() { this.selectvisit.getVisitLocationDetails(this.selectvisit.selectedVisitObj.VisitID) .subscribe(locations => this.successLocationCallback(locations), error => this.errorMessage = <any>error); } private successLocationCallback(locations: any[]){ // create a new map by passing HTMLElement let element: HTMLElement = document.getElementById('flatMapContainerPhone'); if(window.cordova){ this.mapInstance = this.googleMaps.create(element); // listen to MAP_READY event // You must wait for this event to fire before adding something to the map or modifying it in anyway this.mapInstance.one(GoogleMapsEvent.MAP_READY).then( () => { console.log('Map is ready!'); this.fetchMapCoordinatesAndDisplay(locations); } ); }else{ let me = this; if( !this.isApiLoaded() ){ this.loadApi(); this._apiLoadingPromise.then( () => { me.__onGoogleLoaded(locations); }); }else{ me.__onGoogleLoaded(locations); } } }; private fetchMapCoordinatesAndDisplay(locations){ this.headerService.setGoogleMapInstance(this.mapInstance); // Now you can add elements to the map like the marker this.filterVisitAndOfficeLocations(locations|| []); this.addVisitLocationMarker(); this.addAccentureLocationMarker(); this.zoomToCurrentLocation(); } private _loadGoogleMapAPIScript(){ let script = (<any>document).createElement('script'); script.async = true; script.defer = true; script.src = `https://maps.googleapis.com/maps/api/js?callback=__onGoogleLoaded&key=${this.googleMapDesktopAPIKey}`; script.type = 'text/javascript'; (<any>document).getElementsByTagName('head')[0].appendChild(script); } isApiLoaded(): boolean { return (<any>window).google ? true : false; } loadApi(): void{ if(!this._apiLoadingPromise){ this._apiLoadingPromise = new Promise( (resolve) => { (<any>window)['__onGoogleLoaded'] = (ev) => { console.log('google maps api loaded'); resolve('google maps api loaded'); } this._loadGoogleMapAPIScript(); }); } } public __onGoogleLoaded(locations){ let mapProp = { center: new google.maps.LatLng(51.508742,-0.120850), zoom: this.mapZoomLevel, tilt: 30 }; let element: HTMLElement = document.getElementById('flatMapContainerPhone'); this.mapInstance = new google.maps.Map(element, mapProp); this.fetchMapCoordinatesAndDisplay(locations); } private fetchPositionLatLong(latitude, longtitude){ let position; if(window.cordova){ position = new LatLng(latitude, longtitude); }else{ position = new google.maps.LatLng(latitude, longtitude); } return position; } private filterVisitAndOfficeLocations(clientvisitLocations: any[]): void { let me = this; //visit Location let clientLocationGrouped = _.groupBy(clientvisitLocations[0], function(locationObj: any){ return locationObj.CityName; }); _.forEach(clientLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); me.displayVisitLocation.push(locationObj); }); //Accenture Offices let visitLocationGrouped = _.groupBy(clientvisitLocations[1], function(locationObj: any){ return locationObj.CityName; }); let accentureOffices = []; _.forEach(visitLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); accentureOffices.push(locationObj); }); accentureOffices.map(function(locationObj, key){ let filteredData = _.filter(me.displayVisitLocation, function(o) { return o.CityName === locationObj.CityName; }); if(!filteredData.length){ me.displayAccentureOfficeLocation.push(locationObj); } }); } private addVisitLocationMarker() : void{ let me = this, counter = 0; me.displayVisitLocation.map(function(locationObj, key){ if(!counter){ me.selectedMapCityName = locationObj.CityName + ' '+ moment(locationObj.StartDate).format('Do MMM YYYY'); } // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longtitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: (counter<6) ? me.visitLocationMarkerUrl[counter] : me.visitLocationMarkerUrl[6], detail: locationObj, width: me.visitLocationWidth, height: me.visitLocationHeight }); counter++; }); } private addAccentureLocationMarker(): void{ let me = this; _.forEach(me.displayAccentureOfficeLocation, function(locationObj, key){ // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: me.officeLocationMarkerUrl, width: me.accentureOfficeWidth, height: me.accentureOfficeHeight }); }); }; private zoomToCurrentLocation(){ if(window.cordova){ //for mobile this.mapInstance.getMyLocation().then(res => { console.log('Give it to me' + res.latLng); let position = { target: res.latLng, zoom: this.mapZoomLevel, tilt: 30 }; if(this.mapInstance){ this.mapInstance.animateCamera(position); } }); }else{ //for desktop // Try HTML5 geolocation. let me = this; if (navigator.geolocation) { navigator.geolocation.getCurrentPosition(function(position) { let pos = new google.maps.LatLng(position.coords.latitude, position.coords.longitude); if(me.mapInstance){ me.mapInstance.setCenter(pos); } }, function() { //user denied permission }); } } } private addMapMarker(markerObj: ISingleVisitAddMarkerParams){ let me = this; if(this.mapInstance){ if(window.cordova){ //for mobile // create new marker let markerOptions: MarkerOptions = { position: markerObj.position, title: markerObj.title, icon: { url: markerObj.icon, size: { width: markerObj.width, height: markerObj.height } } }; this.mapInstance.addMarker(markerOptions) .then((marker: Marker) => { if(typeof markerObj.detail !== 'undefined'){ marker.set('custominfo', markerObj.detail); } if(!me.defaultShowInfoWindow)
marker.addEventListener('click').subscribe((mobj)=>{ marker.showInfoWindow(); me.displayClickedMarkerDetails(mobj.get('custominfo')); }); }); }else{ //for desktop let image = { url: markerObj.icon, // image is 512 x 512 scaledSize : new google.maps.Size(markerObj.width, markerObj.height) }; let markerOptions: any = { position: markerObj.position, title: markerObj.title, icon: image, map: this.mapInstance }; let marker = new google.maps.Marker(markerOptions); if(!this.googleMapInfoWindow){ this.googleMapInfoWindow = new google.maps.InfoWindow(); } if(!me.defaultShowInfoWindow){ this.googleMapInfoWindow.setContent(markerObj.detail.CityName); this.googleMapInfoWindow.open(this.mapInstance, marker); me.defaultShowInfoWindow = true; } google.maps.event.addListener(marker, 'click', (function(detailObj, marker) { return function() { if(me.googleMapInfoWindow){ me.googleMapInfoWindow.close(); } me.googleMapInfoWindow.setContent(detailObj.CityName); me.googleMapInfoWindow.open(this.mapInstance, marker); me.displayClickedMarkerDetails(detailObj); } })(markerObj.detail, marker)); } } }; private displayClickedMarkerDetails(clickMarkerObj: any){ if(clickMarkerObj){ this.selectedMapCityName = clickMarkerObj.CityName + ' '+ moment(clickMarkerObj.StartDate).format('Do MMM YYYY'); } } ngOnDestroy() { if(this.mapInstance){ if(window.cordova){ //for mobile this.mapInstance.remove(); } this.mapInstance = null; } } goToWelcomeView(){ this._router.navigate(['/visit/welcomevisit']); } }
{ marker.showInfoWindow(); me.defaultShowInfoWindow = true; }
conditional_block
singleVisit.component.ts
import { Component, OnDestroy, AfterViewInit, OnInit } from '@angular/core'; import {GoogleMaps, GoogleMap,GoogleMapsEvent,LatLng,CameraPosition,MarkerOptions,Marker} from '@ionic-native/google-maps'; import { Router } from '@angular/router'; import { SelectVisitServices} from '../services/selectVisit.service'; import * as _ from 'lodash'; import {HeaderService} from '../../shared/headerComponent/header.services'; import {TranslateService} from 'ng2-translate/ng2-translate'; import * as moment from 'moment'; import {ISingleVisitAddMarkerParams} from '../services/selectVisit.interface'; declare var window:any; declare var google:any; @Component({ templateUrl: './singleVisit.component.html', styleUrls:['./singleVisit.scss'] }) export class SingleVisitComponent implements OnDestroy, AfterViewInit, OnInit{ public errorMessage: string; private mapInstance:GoogleMap = null; public displayFooterSection = true; private markerIconBasePath; private officeLocationMarkerUrl: string; private visitLocationMarkerUrl: Array<string>; public visitStartDate: string; public visitEndDate: string; public selectedMapCityName; private displayVisitLocation: any = []; private displayAccentureOfficeLocation: any = []; private defaultShowInfoWindow; private _apiLoadingPromise: Promise<any>; private visitLocationWidth; private visitLocationHeight; private accentureOfficeWidth; private accentureOfficeHeight; private mapZoomLevel = 3; private googleMapDesktopAPIKey = 'AIzaSyD5yFBB69RwOsb6sVRsQEapd9ynwCuoBYo'; private googleMapInfoWindow; constructor( private googleMaps: GoogleMaps, private selectvisit: SelectVisitServices, private headerService: HeaderService, private _translate: TranslateService, private _router: Router) { if(window.cordova){ this.markerIconBasePath = 'www/resources/images/singlevisit/'; this.visitLocationWidth = 17; this.visitLocationHeight = 19; this.accentureOfficeWidth = 11; this.accentureOfficeHeight = 15; }else{ this.markerIconBasePath = './resources/images/singlevisit/'; this.visitLocationWidth = 27; this.visitLocationHeight = 29; this.accentureOfficeWidth = 21; this.accentureOfficeHeight = 15; } this.officeLocationMarkerUrl = this.markerIconBasePath+'AccentureOffice.png'; this.visitLocationMarkerUrl = [ this.markerIconBasePath+'pin_1.png', this.markerIconBasePath+'pin_2.png', this.markerIconBasePath+'pin_3.png', this.markerIconBasePath+'pin_4.png', this.markerIconBasePath+'pin_5.png', this.markerIconBasePath+'pin_6.png', this.markerIconBasePath+'pin_0.png']; } ngOnInit(){ let obj = { title:this._translate.instant('visitLocation'), isHomeBtn: false, isBackBtn: true, isMenuBtn: true }; this.headerService.setHeaderObject(obj); this.visitStartDate = this.selectvisit.selectedVisitObj.VisitStartDate; this.visitEndDate = this.selectvisit.selectedVisitObj.VisitEndDate; this.defaultShowInfoWindow = false; if(this.headerService.isSideMenuClick){ this.displayFooterSection = false; } } // Load map only after view is initialized ngAfterViewInit() { this.loadMap(); } loadMap() { this.selectvisit.getVisitLocationDetails(this.selectvisit.selectedVisitObj.VisitID) .subscribe(locations => this.successLocationCallback(locations), error => this.errorMessage = <any>error); } private successLocationCallback(locations: any[]){ // create a new map by passing HTMLElement let element: HTMLElement = document.getElementById('flatMapContainerPhone'); if(window.cordova){ this.mapInstance = this.googleMaps.create(element); // listen to MAP_READY event // You must wait for this event to fire before adding something to the map or modifying it in anyway this.mapInstance.one(GoogleMapsEvent.MAP_READY).then( () => { console.log('Map is ready!'); this.fetchMapCoordinatesAndDisplay(locations); } ); }else{ let me = this; if( !this.isApiLoaded() ){ this.loadApi(); this._apiLoadingPromise.then( () => { me.__onGoogleLoaded(locations); }); }else{ me.__onGoogleLoaded(locations); } } }; private fetchMapCoordinatesAndDisplay(locations){ this.headerService.setGoogleMapInstance(this.mapInstance); // Now you can add elements to the map like the marker this.filterVisitAndOfficeLocations(locations|| []); this.addVisitLocationMarker(); this.addAccentureLocationMarker(); this.zoomToCurrentLocation(); } private _loadGoogleMapAPIScript(){ let script = (<any>document).createElement('script'); script.async = true; script.defer = true; script.src = `https://maps.googleapis.com/maps/api/js?callback=__onGoogleLoaded&key=${this.googleMapDesktopAPIKey}`; script.type = 'text/javascript'; (<any>document).getElementsByTagName('head')[0].appendChild(script); } isApiLoaded(): boolean { return (<any>window).google ? true : false; } loadApi(): void{ if(!this._apiLoadingPromise){ this._apiLoadingPromise = new Promise( (resolve) => { (<any>window)['__onGoogleLoaded'] = (ev) => { console.log('google maps api loaded'); resolve('google maps api loaded'); } this._loadGoogleMapAPIScript(); }); } } public __onGoogleLoaded(locations){ let mapProp = { center: new google.maps.LatLng(51.508742,-0.120850), zoom: this.mapZoomLevel, tilt: 30 }; let element: HTMLElement = document.getElementById('flatMapContainerPhone'); this.mapInstance = new google.maps.Map(element, mapProp); this.fetchMapCoordinatesAndDisplay(locations); } private fetchPositionLatLong(latitude, longtitude){ let position; if(window.cordova){ position = new LatLng(latitude, longtitude); }else{ position = new google.maps.LatLng(latitude, longtitude); } return position; } private filterVisitAndOfficeLocations(clientvisitLocations: any[]): void { let me = this; //visit Location let clientLocationGrouped = _.groupBy(clientvisitLocations[0], function(locationObj: any){ return locationObj.CityName; }); _.forEach(clientLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); me.displayVisitLocation.push(locationObj); }); //Accenture Offices
let visitLocationGrouped = _.groupBy(clientvisitLocations[1], function(locationObj: any){ return locationObj.CityName; }); let accentureOffices = []; _.forEach(visitLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); accentureOffices.push(locationObj); }); accentureOffices.map(function(locationObj, key){ let filteredData = _.filter(me.displayVisitLocation, function(o) { return o.CityName === locationObj.CityName; }); if(!filteredData.length){ me.displayAccentureOfficeLocation.push(locationObj); } }); } private addVisitLocationMarker() : void{ let me = this, counter = 0; me.displayVisitLocation.map(function(locationObj, key){ if(!counter){ me.selectedMapCityName = locationObj.CityName + ' '+ moment(locationObj.StartDate).format('Do MMM YYYY'); } // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longtitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: (counter<6) ? me.visitLocationMarkerUrl[counter] : me.visitLocationMarkerUrl[6], detail: locationObj, width: me.visitLocationWidth, height: me.visitLocationHeight }); counter++; }); } private addAccentureLocationMarker(): void{ let me = this; _.forEach(me.displayAccentureOfficeLocation, function(locationObj, key){ // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: me.officeLocationMarkerUrl, width: me.accentureOfficeWidth, height: me.accentureOfficeHeight }); }); }; private zoomToCurrentLocation(){ if(window.cordova){ //for mobile this.mapInstance.getMyLocation().then(res => { console.log('Give it to me' + res.latLng); let position = { target: res.latLng, zoom: this.mapZoomLevel, tilt: 30 }; if(this.mapInstance){ this.mapInstance.animateCamera(position); } }); }else{ //for desktop // Try HTML5 geolocation. let me = this; if (navigator.geolocation) { navigator.geolocation.getCurrentPosition(function(position) { let pos = new google.maps.LatLng(position.coords.latitude, position.coords.longitude); if(me.mapInstance){ me.mapInstance.setCenter(pos); } }, function() { //user denied permission }); } } } private addMapMarker(markerObj: ISingleVisitAddMarkerParams){ let me = this; if(this.mapInstance){ if(window.cordova){ //for mobile // create new marker let markerOptions: MarkerOptions = { position: markerObj.position, title: markerObj.title, icon: { url: markerObj.icon, size: { width: markerObj.width, height: markerObj.height } } }; this.mapInstance.addMarker(markerOptions) .then((marker: Marker) => { if(typeof markerObj.detail !== 'undefined'){ marker.set('custominfo', markerObj.detail); } if(!me.defaultShowInfoWindow){ marker.showInfoWindow(); me.defaultShowInfoWindow = true; } marker.addEventListener('click').subscribe((mobj)=>{ marker.showInfoWindow(); me.displayClickedMarkerDetails(mobj.get('custominfo')); }); }); }else{ //for desktop let image = { url: markerObj.icon, // image is 512 x 512 scaledSize : new google.maps.Size(markerObj.width, markerObj.height) }; let markerOptions: any = { position: markerObj.position, title: markerObj.title, icon: image, map: this.mapInstance }; let marker = new google.maps.Marker(markerOptions); if(!this.googleMapInfoWindow){ this.googleMapInfoWindow = new google.maps.InfoWindow(); } if(!me.defaultShowInfoWindow){ this.googleMapInfoWindow.setContent(markerObj.detail.CityName); this.googleMapInfoWindow.open(this.mapInstance, marker); me.defaultShowInfoWindow = true; } google.maps.event.addListener(marker, 'click', (function(detailObj, marker) { return function() { if(me.googleMapInfoWindow){ me.googleMapInfoWindow.close(); } me.googleMapInfoWindow.setContent(detailObj.CityName); me.googleMapInfoWindow.open(this.mapInstance, marker); me.displayClickedMarkerDetails(detailObj); } })(markerObj.detail, marker)); } } }; private displayClickedMarkerDetails(clickMarkerObj: any){ if(clickMarkerObj){ this.selectedMapCityName = clickMarkerObj.CityName + ' '+ moment(clickMarkerObj.StartDate).format('Do MMM YYYY'); } } ngOnDestroy() { if(this.mapInstance){ if(window.cordova){ //for mobile this.mapInstance.remove(); } this.mapInstance = null; } } goToWelcomeView(){ this._router.navigate(['/visit/welcomevisit']); } }
random_line_split
singleVisit.component.ts
import { Component, OnDestroy, AfterViewInit, OnInit } from '@angular/core'; import {GoogleMaps, GoogleMap,GoogleMapsEvent,LatLng,CameraPosition,MarkerOptions,Marker} from '@ionic-native/google-maps'; import { Router } from '@angular/router'; import { SelectVisitServices} from '../services/selectVisit.service'; import * as _ from 'lodash'; import {HeaderService} from '../../shared/headerComponent/header.services'; import {TranslateService} from 'ng2-translate/ng2-translate'; import * as moment from 'moment'; import {ISingleVisitAddMarkerParams} from '../services/selectVisit.interface'; declare var window:any; declare var google:any; @Component({ templateUrl: './singleVisit.component.html', styleUrls:['./singleVisit.scss'] }) export class SingleVisitComponent implements OnDestroy, AfterViewInit, OnInit{ public errorMessage: string; private mapInstance:GoogleMap = null; public displayFooterSection = true; private markerIconBasePath; private officeLocationMarkerUrl: string; private visitLocationMarkerUrl: Array<string>; public visitStartDate: string; public visitEndDate: string; public selectedMapCityName; private displayVisitLocation: any = []; private displayAccentureOfficeLocation: any = []; private defaultShowInfoWindow; private _apiLoadingPromise: Promise<any>; private visitLocationWidth; private visitLocationHeight; private accentureOfficeWidth; private accentureOfficeHeight; private mapZoomLevel = 3; private googleMapDesktopAPIKey = 'AIzaSyD5yFBB69RwOsb6sVRsQEapd9ynwCuoBYo'; private googleMapInfoWindow; constructor( private googleMaps: GoogleMaps, private selectvisit: SelectVisitServices, private headerService: HeaderService, private _translate: TranslateService, private _router: Router) { if(window.cordova){ this.markerIconBasePath = 'www/resources/images/singlevisit/'; this.visitLocationWidth = 17; this.visitLocationHeight = 19; this.accentureOfficeWidth = 11; this.accentureOfficeHeight = 15; }else{ this.markerIconBasePath = './resources/images/singlevisit/'; this.visitLocationWidth = 27; this.visitLocationHeight = 29; this.accentureOfficeWidth = 21; this.accentureOfficeHeight = 15; } this.officeLocationMarkerUrl = this.markerIconBasePath+'AccentureOffice.png'; this.visitLocationMarkerUrl = [ this.markerIconBasePath+'pin_1.png', this.markerIconBasePath+'pin_2.png', this.markerIconBasePath+'pin_3.png', this.markerIconBasePath+'pin_4.png', this.markerIconBasePath+'pin_5.png', this.markerIconBasePath+'pin_6.png', this.markerIconBasePath+'pin_0.png']; } ngOnInit(){ let obj = { title:this._translate.instant('visitLocation'), isHomeBtn: false, isBackBtn: true, isMenuBtn: true }; this.headerService.setHeaderObject(obj); this.visitStartDate = this.selectvisit.selectedVisitObj.VisitStartDate; this.visitEndDate = this.selectvisit.selectedVisitObj.VisitEndDate; this.defaultShowInfoWindow = false; if(this.headerService.isSideMenuClick){ this.displayFooterSection = false; } } // Load map only after view is initialized ngAfterViewInit()
loadMap() { this.selectvisit.getVisitLocationDetails(this.selectvisit.selectedVisitObj.VisitID) .subscribe(locations => this.successLocationCallback(locations), error => this.errorMessage = <any>error); } private successLocationCallback(locations: any[]){ // create a new map by passing HTMLElement let element: HTMLElement = document.getElementById('flatMapContainerPhone'); if(window.cordova){ this.mapInstance = this.googleMaps.create(element); // listen to MAP_READY event // You must wait for this event to fire before adding something to the map or modifying it in anyway this.mapInstance.one(GoogleMapsEvent.MAP_READY).then( () => { console.log('Map is ready!'); this.fetchMapCoordinatesAndDisplay(locations); } ); }else{ let me = this; if( !this.isApiLoaded() ){ this.loadApi(); this._apiLoadingPromise.then( () => { me.__onGoogleLoaded(locations); }); }else{ me.__onGoogleLoaded(locations); } } }; private fetchMapCoordinatesAndDisplay(locations){ this.headerService.setGoogleMapInstance(this.mapInstance); // Now you can add elements to the map like the marker this.filterVisitAndOfficeLocations(locations|| []); this.addVisitLocationMarker(); this.addAccentureLocationMarker(); this.zoomToCurrentLocation(); } private _loadGoogleMapAPIScript(){ let script = (<any>document).createElement('script'); script.async = true; script.defer = true; script.src = `https://maps.googleapis.com/maps/api/js?callback=__onGoogleLoaded&key=${this.googleMapDesktopAPIKey}`; script.type = 'text/javascript'; (<any>document).getElementsByTagName('head')[0].appendChild(script); } isApiLoaded(): boolean { return (<any>window).google ? true : false; } loadApi(): void{ if(!this._apiLoadingPromise){ this._apiLoadingPromise = new Promise( (resolve) => { (<any>window)['__onGoogleLoaded'] = (ev) => { console.log('google maps api loaded'); resolve('google maps api loaded'); } this._loadGoogleMapAPIScript(); }); } } public __onGoogleLoaded(locations){ let mapProp = { center: new google.maps.LatLng(51.508742,-0.120850), zoom: this.mapZoomLevel, tilt: 30 }; let element: HTMLElement = document.getElementById('flatMapContainerPhone'); this.mapInstance = new google.maps.Map(element, mapProp); this.fetchMapCoordinatesAndDisplay(locations); } private fetchPositionLatLong(latitude, longtitude){ let position; if(window.cordova){ position = new LatLng(latitude, longtitude); }else{ position = new google.maps.LatLng(latitude, longtitude); } return position; } private filterVisitAndOfficeLocations(clientvisitLocations: any[]): void { let me = this; //visit Location let clientLocationGrouped = _.groupBy(clientvisitLocations[0], function(locationObj: any){ return locationObj.CityName; }); _.forEach(clientLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); me.displayVisitLocation.push(locationObj); }); //Accenture Offices let visitLocationGrouped = _.groupBy(clientvisitLocations[1], function(locationObj: any){ return locationObj.CityName; }); let accentureOffices = []; _.forEach(visitLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); accentureOffices.push(locationObj); }); accentureOffices.map(function(locationObj, key){ let filteredData = _.filter(me.displayVisitLocation, function(o) { return o.CityName === locationObj.CityName; }); if(!filteredData.length){ me.displayAccentureOfficeLocation.push(locationObj); } }); } private addVisitLocationMarker() : void{ let me = this, counter = 0; me.displayVisitLocation.map(function(locationObj, key){ if(!counter){ me.selectedMapCityName = locationObj.CityName + ' '+ moment(locationObj.StartDate).format('Do MMM YYYY'); } // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longtitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: (counter<6) ? me.visitLocationMarkerUrl[counter] : me.visitLocationMarkerUrl[6], detail: locationObj, width: me.visitLocationWidth, height: me.visitLocationHeight }); counter++; }); } private addAccentureLocationMarker(): void{ let me = this; _.forEach(me.displayAccentureOfficeLocation, function(locationObj, key){ // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: me.officeLocationMarkerUrl, width: me.accentureOfficeWidth, height: me.accentureOfficeHeight }); }); }; private zoomToCurrentLocation(){ if(window.cordova){ //for mobile this.mapInstance.getMyLocation().then(res => { console.log('Give it to me' + res.latLng); let position = { target: res.latLng, zoom: this.mapZoomLevel, tilt: 30 }; if(this.mapInstance){ this.mapInstance.animateCamera(position); } }); }else{ //for desktop // Try HTML5 geolocation. let me = this; if (navigator.geolocation) { navigator.geolocation.getCurrentPosition(function(position) { let pos = new google.maps.LatLng(position.coords.latitude, position.coords.longitude); if(me.mapInstance){ me.mapInstance.setCenter(pos); } }, function() { //user denied permission }); } } } private addMapMarker(markerObj: ISingleVisitAddMarkerParams){ let me = this; if(this.mapInstance){ if(window.cordova){ //for mobile // create new marker let markerOptions: MarkerOptions = { position: markerObj.position, title: markerObj.title, icon: { url: markerObj.icon, size: { width: markerObj.width, height: markerObj.height } } }; this.mapInstance.addMarker(markerOptions) .then((marker: Marker) => { if(typeof markerObj.detail !== 'undefined'){ marker.set('custominfo', markerObj.detail); } if(!me.defaultShowInfoWindow){ marker.showInfoWindow(); me.defaultShowInfoWindow = true; } marker.addEventListener('click').subscribe((mobj)=>{ marker.showInfoWindow(); me.displayClickedMarkerDetails(mobj.get('custominfo')); }); }); }else{ //for desktop let image = { url: markerObj.icon, // image is 512 x 512 scaledSize : new google.maps.Size(markerObj.width, markerObj.height) }; let markerOptions: any = { position: markerObj.position, title: markerObj.title, icon: image, map: this.mapInstance }; let marker = new google.maps.Marker(markerOptions); if(!this.googleMapInfoWindow){ this.googleMapInfoWindow = new google.maps.InfoWindow(); } if(!me.defaultShowInfoWindow){ this.googleMapInfoWindow.setContent(markerObj.detail.CityName); this.googleMapInfoWindow.open(this.mapInstance, marker); me.defaultShowInfoWindow = true; } google.maps.event.addListener(marker, 'click', (function(detailObj, marker) { return function() { if(me.googleMapInfoWindow){ me.googleMapInfoWindow.close(); } me.googleMapInfoWindow.setContent(detailObj.CityName); me.googleMapInfoWindow.open(this.mapInstance, marker); me.displayClickedMarkerDetails(detailObj); } })(markerObj.detail, marker)); } } }; private displayClickedMarkerDetails(clickMarkerObj: any){ if(clickMarkerObj){ this.selectedMapCityName = clickMarkerObj.CityName + ' '+ moment(clickMarkerObj.StartDate).format('Do MMM YYYY'); } } ngOnDestroy() { if(this.mapInstance){ if(window.cordova){ //for mobile this.mapInstance.remove(); } this.mapInstance = null; } } goToWelcomeView(){ this._router.navigate(['/visit/welcomevisit']); } }
{ this.loadMap(); }
identifier_body
singleVisit.component.ts
import { Component, OnDestroy, AfterViewInit, OnInit } from '@angular/core'; import {GoogleMaps, GoogleMap,GoogleMapsEvent,LatLng,CameraPosition,MarkerOptions,Marker} from '@ionic-native/google-maps'; import { Router } from '@angular/router'; import { SelectVisitServices} from '../services/selectVisit.service'; import * as _ from 'lodash'; import {HeaderService} from '../../shared/headerComponent/header.services'; import {TranslateService} from 'ng2-translate/ng2-translate'; import * as moment from 'moment'; import {ISingleVisitAddMarkerParams} from '../services/selectVisit.interface'; declare var window:any; declare var google:any; @Component({ templateUrl: './singleVisit.component.html', styleUrls:['./singleVisit.scss'] }) export class SingleVisitComponent implements OnDestroy, AfterViewInit, OnInit{ public errorMessage: string; private mapInstance:GoogleMap = null; public displayFooterSection = true; private markerIconBasePath; private officeLocationMarkerUrl: string; private visitLocationMarkerUrl: Array<string>; public visitStartDate: string; public visitEndDate: string; public selectedMapCityName; private displayVisitLocation: any = []; private displayAccentureOfficeLocation: any = []; private defaultShowInfoWindow; private _apiLoadingPromise: Promise<any>; private visitLocationWidth; private visitLocationHeight; private accentureOfficeWidth; private accentureOfficeHeight; private mapZoomLevel = 3; private googleMapDesktopAPIKey = 'AIzaSyD5yFBB69RwOsb6sVRsQEapd9ynwCuoBYo'; private googleMapInfoWindow; constructor( private googleMaps: GoogleMaps, private selectvisit: SelectVisitServices, private headerService: HeaderService, private _translate: TranslateService, private _router: Router) { if(window.cordova){ this.markerIconBasePath = 'www/resources/images/singlevisit/'; this.visitLocationWidth = 17; this.visitLocationHeight = 19; this.accentureOfficeWidth = 11; this.accentureOfficeHeight = 15; }else{ this.markerIconBasePath = './resources/images/singlevisit/'; this.visitLocationWidth = 27; this.visitLocationHeight = 29; this.accentureOfficeWidth = 21; this.accentureOfficeHeight = 15; } this.officeLocationMarkerUrl = this.markerIconBasePath+'AccentureOffice.png'; this.visitLocationMarkerUrl = [ this.markerIconBasePath+'pin_1.png', this.markerIconBasePath+'pin_2.png', this.markerIconBasePath+'pin_3.png', this.markerIconBasePath+'pin_4.png', this.markerIconBasePath+'pin_5.png', this.markerIconBasePath+'pin_6.png', this.markerIconBasePath+'pin_0.png']; } ngOnInit(){ let obj = { title:this._translate.instant('visitLocation'), isHomeBtn: false, isBackBtn: true, isMenuBtn: true }; this.headerService.setHeaderObject(obj); this.visitStartDate = this.selectvisit.selectedVisitObj.VisitStartDate; this.visitEndDate = this.selectvisit.selectedVisitObj.VisitEndDate; this.defaultShowInfoWindow = false; if(this.headerService.isSideMenuClick){ this.displayFooterSection = false; } } // Load map only after view is initialized ngAfterViewInit() { this.loadMap(); } loadMap() { this.selectvisit.getVisitLocationDetails(this.selectvisit.selectedVisitObj.VisitID) .subscribe(locations => this.successLocationCallback(locations), error => this.errorMessage = <any>error); } private successLocationCallback(locations: any[]){ // create a new map by passing HTMLElement let element: HTMLElement = document.getElementById('flatMapContainerPhone'); if(window.cordova){ this.mapInstance = this.googleMaps.create(element); // listen to MAP_READY event // You must wait for this event to fire before adding something to the map or modifying it in anyway this.mapInstance.one(GoogleMapsEvent.MAP_READY).then( () => { console.log('Map is ready!'); this.fetchMapCoordinatesAndDisplay(locations); } ); }else{ let me = this; if( !this.isApiLoaded() ){ this.loadApi(); this._apiLoadingPromise.then( () => { me.__onGoogleLoaded(locations); }); }else{ me.__onGoogleLoaded(locations); } } }; private fetchMapCoordinatesAndDisplay(locations){ this.headerService.setGoogleMapInstance(this.mapInstance); // Now you can add elements to the map like the marker this.filterVisitAndOfficeLocations(locations|| []); this.addVisitLocationMarker(); this.addAccentureLocationMarker(); this.zoomToCurrentLocation(); } private _loadGoogleMapAPIScript(){ let script = (<any>document).createElement('script'); script.async = true; script.defer = true; script.src = `https://maps.googleapis.com/maps/api/js?callback=__onGoogleLoaded&key=${this.googleMapDesktopAPIKey}`; script.type = 'text/javascript'; (<any>document).getElementsByTagName('head')[0].appendChild(script); } isApiLoaded(): boolean { return (<any>window).google ? true : false; } loadApi(): void{ if(!this._apiLoadingPromise){ this._apiLoadingPromise = new Promise( (resolve) => { (<any>window)['__onGoogleLoaded'] = (ev) => { console.log('google maps api loaded'); resolve('google maps api loaded'); } this._loadGoogleMapAPIScript(); }); } } public __onGoogleLoaded(locations){ let mapProp = { center: new google.maps.LatLng(51.508742,-0.120850), zoom: this.mapZoomLevel, tilt: 30 }; let element: HTMLElement = document.getElementById('flatMapContainerPhone'); this.mapInstance = new google.maps.Map(element, mapProp); this.fetchMapCoordinatesAndDisplay(locations); } private fetchPositionLatLong(latitude, longtitude){ let position; if(window.cordova){ position = new LatLng(latitude, longtitude); }else{ position = new google.maps.LatLng(latitude, longtitude); } return position; } private filterVisitAndOfficeLocations(clientvisitLocations: any[]): void { let me = this; //visit Location let clientLocationGrouped = _.groupBy(clientvisitLocations[0], function(locationObj: any){ return locationObj.CityName; }); _.forEach(clientLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); me.displayVisitLocation.push(locationObj); }); //Accenture Offices let visitLocationGrouped = _.groupBy(clientvisitLocations[1], function(locationObj: any){ return locationObj.CityName; }); let accentureOffices = []; _.forEach(visitLocationGrouped, function(paramObj, key){ //by default, display first marker position let locationObj = _.head(paramObj); accentureOffices.push(locationObj); }); accentureOffices.map(function(locationObj, key){ let filteredData = _.filter(me.displayVisitLocation, function(o) { return o.CityName === locationObj.CityName; }); if(!filteredData.length){ me.displayAccentureOfficeLocation.push(locationObj); } }); } private
() : void{ let me = this, counter = 0; me.displayVisitLocation.map(function(locationObj, key){ if(!counter){ me.selectedMapCityName = locationObj.CityName + ' '+ moment(locationObj.StartDate).format('Do MMM YYYY'); } // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longtitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: (counter<6) ? me.visitLocationMarkerUrl[counter] : me.visitLocationMarkerUrl[6], detail: locationObj, width: me.visitLocationWidth, height: me.visitLocationHeight }); counter++; }); } private addAccentureLocationMarker(): void{ let me = this; _.forEach(me.displayAccentureOfficeLocation, function(locationObj, key){ // create LatLng object let position = me.fetchPositionLatLong(locationObj.Latitude, locationObj.Longitude); //add the marker me.addMapMarker({ position: position, title: locationObj.CityName, icon: me.officeLocationMarkerUrl, width: me.accentureOfficeWidth, height: me.accentureOfficeHeight }); }); }; private zoomToCurrentLocation(){ if(window.cordova){ //for mobile this.mapInstance.getMyLocation().then(res => { console.log('Give it to me' + res.latLng); let position = { target: res.latLng, zoom: this.mapZoomLevel, tilt: 30 }; if(this.mapInstance){ this.mapInstance.animateCamera(position); } }); }else{ //for desktop // Try HTML5 geolocation. let me = this; if (navigator.geolocation) { navigator.geolocation.getCurrentPosition(function(position) { let pos = new google.maps.LatLng(position.coords.latitude, position.coords.longitude); if(me.mapInstance){ me.mapInstance.setCenter(pos); } }, function() { //user denied permission }); } } } private addMapMarker(markerObj: ISingleVisitAddMarkerParams){ let me = this; if(this.mapInstance){ if(window.cordova){ //for mobile // create new marker let markerOptions: MarkerOptions = { position: markerObj.position, title: markerObj.title, icon: { url: markerObj.icon, size: { width: markerObj.width, height: markerObj.height } } }; this.mapInstance.addMarker(markerOptions) .then((marker: Marker) => { if(typeof markerObj.detail !== 'undefined'){ marker.set('custominfo', markerObj.detail); } if(!me.defaultShowInfoWindow){ marker.showInfoWindow(); me.defaultShowInfoWindow = true; } marker.addEventListener('click').subscribe((mobj)=>{ marker.showInfoWindow(); me.displayClickedMarkerDetails(mobj.get('custominfo')); }); }); }else{ //for desktop let image = { url: markerObj.icon, // image is 512 x 512 scaledSize : new google.maps.Size(markerObj.width, markerObj.height) }; let markerOptions: any = { position: markerObj.position, title: markerObj.title, icon: image, map: this.mapInstance }; let marker = new google.maps.Marker(markerOptions); if(!this.googleMapInfoWindow){ this.googleMapInfoWindow = new google.maps.InfoWindow(); } if(!me.defaultShowInfoWindow){ this.googleMapInfoWindow.setContent(markerObj.detail.CityName); this.googleMapInfoWindow.open(this.mapInstance, marker); me.defaultShowInfoWindow = true; } google.maps.event.addListener(marker, 'click', (function(detailObj, marker) { return function() { if(me.googleMapInfoWindow){ me.googleMapInfoWindow.close(); } me.googleMapInfoWindow.setContent(detailObj.CityName); me.googleMapInfoWindow.open(this.mapInstance, marker); me.displayClickedMarkerDetails(detailObj); } })(markerObj.detail, marker)); } } }; private displayClickedMarkerDetails(clickMarkerObj: any){ if(clickMarkerObj){ this.selectedMapCityName = clickMarkerObj.CityName + ' '+ moment(clickMarkerObj.StartDate).format('Do MMM YYYY'); } } ngOnDestroy() { if(this.mapInstance){ if(window.cordova){ //for mobile this.mapInstance.remove(); } this.mapInstance = null; } } goToWelcomeView(){ this._router.navigate(['/visit/welcomevisit']); } }
addVisitLocationMarker
identifier_name
main.js
//新建二维数组用来表示table的坐标的 x坐标是第一层的数组 y坐标是第二层的数组 var arrs; //数组中有三种状态 墙1 空白0 图形2 //行 init(); function init(){ arrs = new Array(); for (var i = 0; i < 24; i++) { arrs[i] = new Array(); //列 for (var j = 0; j < 14; j++) { //当是第一行或者是最后一行或者是第一列或者是最后一列 为墙赋值为1 if (i == 0 || i == 23 || j == 0 || j == 13) { arrs[i][j] = 1; } } } } //先拿到所有的单元格 数组 var tds = document.getElementsByTagName("td"); //控制难度的时间 datatime = 500; //随机生成颜色的方法 function randomColor(){ var r,g,b; r=Math.floor(Math.random()*166+90); g=Math.floor(Math.random()*166+90); b=Math.floor(Math.random()*166+90); return "rgba("+r+","+g+","+b+",1)" } //绘图的方法 function draw() { //先遍历二维数组 for (var i = 0; i < arrs.length; i++) { for (var j = 0; j < arrs[i].length; j++) { //当二维数组里面的值是1 为墙 if (arrs[i][j] == 1) { //设置墙的背景为红色 js tds[i * 14 + j].style.background = "blue"; //jQuery中的$("td")是拿到的第一个td元素 //$("td")[i * 14 + j].css("background", "red"); 不起作用 拿到的不是数组 //当值为0 为空白 设置为白色 } else if (arrs[i][j] == 0) { tds[i * 14 + j].style.background = "none"; //$("td")[i * 14 + j].css("background", "white"); //当值为2 设置为蓝色 } else if (arrs[i][j] == 2) { // tds[i * 14 + j].style.background = randomColor(); tds[i * 14 + j].style.background = "red"; //$("td")[i * 14 + j].css("background", "blue"); } } } } //初始的块 var squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //定义一个二维数组用来保存随机生成的块的坐标 移动的块 var target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //随机数变量 var index; // console.log(squArr) //随机生成图形的方法 需要根据你传过来的值来生成不一样的图形 function randomBlock() { //初始化初值 squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //生成图形 Math.floor向下取整 Math.random生成随机数0-1不包含1 index = Math.floor(Math.random() * 5); //根据随机生成的数进行判断 // switch(index){ // case 0: // // 田字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][6] = 2; // // arrs[2][7] = 2; // squArr = [[1,6],[1,7],[2,6],[2,7]]; // // for (var i = 0; i < squArr.length; i++) { // // arrs[squArr[i][0]][squArr[i][1]] = 2; // // } // break; // case 1: // // 一字形 // // arrs[1][5] = 2; // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[1][8] = 2; // // for (var i = 5; i <= 8; i++) { // // arrs[1][i] = 2; // // } // squArr = [[1][5],[1][6],[1][7],[1][8]]; // break; // case 2: // // T字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[3][7] = 2; // squArr = [[1][6],[1][7],[2][7],[3][7]]; // break; // case 3: // // |-字形 // // arrs[1][6] = 2; // // arrs[2][6] = 2; // // arrs[3][6] = 2; // // arrs[2][7] = 2; // squArr = [[1][6],[2][6],[3][6],[2][7]]; // break; // case 4: // // z字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[2][8] = 2; // squArr = [[1][6],[1][7],[2][7],[2][8]]; // break; // // } switch(index){ case 0: //田字形 //arrs[1][6]=2; //arrs[1][7]=2; //arrs[2][6]=2; //arrs[2][7]=2; squArr = [[1,6],[1,7],[2,6],[2,7]]; Chgcount = 0; break; case 1: //一字型 // for(var i=5;i<=8;i++){ // arrs[1][i]=2; // } squArr = [[1,5],[1,6],[1,7],[1,8]]; Chgcount = 0; break; case 2: //7字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[3][7]=2; squArr = [[1,6],[1,7],[2,7],[3,7]]; Chgcount = 0; break; case 3: // |-型 // arrs[1][6]=2; // arrs[2][6]=2; // arrs[3][6]=2; // arrs[2][7]=2; squArr = [[1,6],[2,6],[3,6],[2,7]]; Chgcount = 0; break; case 4: //z字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[2][8]=2; squArr = [[1,6],[1,7],[2,7],[2,8]]; Chgcount = 0; break; } } //给块赋值的方法 function block() { for (var i = 0; i < squArr.length; i++) { arrs[squArr[i][0]][squArr[i][1]] = 2; } //重绘 draw(); } //隐藏原来的块 function hidden() { //遍历squArr //把原来的块隐藏 for (var i = 0; i < squArr.length; i++) { //原本的块隐藏 // console.log(squArr[i][0]+","+squArr[i][1]) arrs[squArr[i][0]][squArr[i][1]] = 0; } } //移动给对应坐标 function move(direction) { for (var i = 0; i < squArr.length; i++) { if (direction == "down") { target[i] = [squArr[i][0] + 1, squArr[i][1]]; } else if (direction == "left") { target[i] = [squArr[i][0], squArr[i][1] - 1]; } else if (direction == "right") { target[i] = [squAr
移动 能移动则把移动完数组的值赋给squarr if(canMove()){ squArr = target; //绘出来 block(); } } } } //拿到显示分数的盒子 var scoreBox = document.getElementById("score"); //显示的分数值 var score = 0; //消除的方法 function clear(){ //遍历整个数组 for(var i=22;i>=1;i--){ var isClear = true; for(var j=12;j>=1;j--){ if(arrs[i][j] != 2){ isClear = false; break; } } if(isClear){ score += 10; //显示分数 scoreBox.innerText = score; //当分数大于100 难道增加 if(score>=50){ datatime = 200; } for(var k=i;k>1;k--){ for(var j=12;j>=1;j--){ arrs[k][j] = arrs[k-1][j]; draw(); } } i++; } // if(isClear == true){ // for(var j=0;j<arrs[i].length;j++){ // arrs[i][j] = 0; // draw(); // } // // } } //draw(); } //形状变化的方法 //定义一个变量来记录变化的次数 var Chgcount; function change(){ //根据块的坐标发生改变的 squArr来发生改变 target来保存改变完的坐标 //隐藏原来的坐标 hidden(); //改变位置 根据随机数生成的图形来变化 switch(index){ case 0: //田字形 不需要改变 //target = squArr; break; case 1: //一字形 第一个点是不会改变的 根据第一点来变化 //squArr = [[1,5],[1,6],[1,7],[1,8]]; // target[0][0] = squArr[0][0]+0; // target[0][1] = squArr[0][1]-0; // target[1][0] = squArr[1][0]+1; // target[1][1] = squArr[1][1]-1; // target[2][0] = squArr[2][0]+2; // target[2][1] = squArr[2][1]-2; // target[3][0] = squArr[3][0]+3; // target[3][1] = squArr[3][1]-3; if(Chgcount%2){ //当为真的时候 值为1的时候变横条 否则变竖条 //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]; target[i][1] =squArr[0][1]+i; } }else{ //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]+i; target[i][1] =squArr[0][1]; } } break; case 2: //7字形 有四种形态 //squArr = [[1,6],[1,7],[2,7],[3,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]-1; break; case 1: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]+1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]-1; target[3][1] = squArr[2][1]; break; case 2: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]-1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; case 3: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; } break; case 3: // |-字形 //squArr = [[1,6],[2,6],[3,6],[2,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]+1; target[3][1] = squArr[1][1]; break; case 1: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]-1; break; case 2: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]-1; target[3][1] = squArr[1][1]; break; case 3: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]+1; break; } break; case 4: // z字形 //squArr = [[1,6],[1,7],[2,7],[2,8]]; switch(Chgcount%2){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; case 1: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; } break; } } //自动往下掉的方法 function autoDown() { //把原本的坐标还原 target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; hidden(); move("down"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } else { //多画一次 block(); clear(); //判断是否gameover var isGameOver = false; for(var i=0;i<target.length;i++){ if(target[i][0] == 1 || target[i][0] == 2){ isGameOver = true; break; } } if(isGameOver){ alert("Game over"); if(confirm("是否继续游戏?")){ isBegin = false; isGameOver = false; //清除定时器 clearInterval(timer); //初始化 init(); //随机生成块 randomBlock(); //给块的坐标位置赋值 // console.log(squArr) block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); // isBegin = true; // } else { // alert("当前游戏已经开始"); // } } } randomBlock(); } //赋值 block(); //遍历二维数组 // for (var i = 0; i < arrs.length; i++) { // for (var j = 0; j < arrs[i].length; j++) { // } // } } function canMove() { var isCanMove = false; //先判断底下是否有东西 遍历移动的坐标 查看移动的坐标上是否值为1或2 for (var i = 0; i < 4; i++) { if (arrs[target[i][0]][target[i][1]] != 1 && arrs[target[i][0]][target[i][1]] != 2) { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; isCanMove = true; } else { return false; } // if (tds[target[i][0] * 14 + target[i][1]].style.background != "blue" && tds.style.background != "red") { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; } return isCanMove; } //调用绘图方法 draw(); //当点击开始的时候执行 $("#action").click(function() { if (isBegin == false) { console.log(arrs) //随机生成块 randomBlock(); //给块的坐标位置赋值 block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } }) var count = 0; //暂停按钮 $("#pause").click(function() { // clearInterval(timer); // hidden(); // isBegin = false; if (count%2 == 0) { $("#pause").text("继续"); clearInterval(timer); } else { timer = setInterval("autoDown()",datatime); $("#pause").text("暂停"); } count++; }) //是否开始游戏 var isBegin = false; //定时器设置 var timer; //由键盘控制移动和变化 //键盘按下事件 window.onkeydown = function(event) { //拿到对应的键盘对象 event = window.event || event; target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //拿到键盘上是哪个键 //得到键盘的ASCII码 event.keyCode //alert(event.keyCode); switch (event.keyCode) { //回车的时候开始游戏 case 13: if (isBegin == false) { //随机生成块 randomBlock(); //给块的坐标位置赋值 console.log(squArr) block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } break; //按左箭头的时候 case 37: console.log("我按了左箭头"); //把原本的坐标还原 hidden(); move("left"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } //赋值 block(); break; //按上箭头的时候 case 38: Chgcount++; move("up"); console.log("我按了上箭头"); break; //按右箭头的时候 case 39: console.log("我按了右箭头"); //把原本的坐标还原 hidden(); move("right"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } //赋值 block(); break; //按下箭头的时候 case 40: //console.log(squArr[3]); console.log("我按了下箭头"); autoDown(); break; } }
r[i][0], squArr[i][1] + 1]; } else if (direction == "up") { //形状变化 给移动的数组赋变形后的值 change(); //判断是否能
identifier_body
main.js
//新建二维数组用来表示table的坐标的 x坐标是第一层的数组 y坐标是第二层的数组 var arrs; //数组中有三种状态 墙1 空白0 图形2 //行 init(); function init(){ arrs = new Array(); for (var i = 0; i < 24; i++) { arrs[i] = new Array(); //列 f
var j = 0; j < 14; j++) { //当是第一行或者是最后一行或者是第一列或者是最后一列 为墙赋值为1 if (i == 0 || i == 23 || j == 0 || j == 13) { arrs[i][j] = 1; } } } } //先拿到所有的单元格 数组 var tds = document.getElementsByTagName("td"); //控制难度的时间 datatime = 500; //随机生成颜色的方法 function randomColor(){ var r,g,b; r=Math.floor(Math.random()*166+90); g=Math.floor(Math.random()*166+90); b=Math.floor(Math.random()*166+90); return "rgba("+r+","+g+","+b+",1)" } //绘图的方法 function draw() { //先遍历二维数组 for (var i = 0; i < arrs.length; i++) { for (var j = 0; j < arrs[i].length; j++) { //当二维数组里面的值是1 为墙 if (arrs[i][j] == 1) { //设置墙的背景为红色 js tds[i * 14 + j].style.background = "blue"; //jQuery中的$("td")是拿到的第一个td元素 //$("td")[i * 14 + j].css("background", "red"); 不起作用 拿到的不是数组 //当值为0 为空白 设置为白色 } else if (arrs[i][j] == 0) { tds[i * 14 + j].style.background = "none"; //$("td")[i * 14 + j].css("background", "white"); //当值为2 设置为蓝色 } else if (arrs[i][j] == 2) { // tds[i * 14 + j].style.background = randomColor(); tds[i * 14 + j].style.background = "red"; //$("td")[i * 14 + j].css("background", "blue"); } } } } //初始的块 var squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //定义一个二维数组用来保存随机生成的块的坐标 移动的块 var target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //随机数变量 var index; // console.log(squArr) //随机生成图形的方法 需要根据你传过来的值来生成不一样的图形 function randomBlock() { //初始化初值 squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //生成图形 Math.floor向下取整 Math.random生成随机数0-1不包含1 index = Math.floor(Math.random() * 5); //根据随机生成的数进行判断 // switch(index){ // case 0: // // 田字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][6] = 2; // // arrs[2][7] = 2; // squArr = [[1,6],[1,7],[2,6],[2,7]]; // // for (var i = 0; i < squArr.length; i++) { // // arrs[squArr[i][0]][squArr[i][1]] = 2; // // } // break; // case 1: // // 一字形 // // arrs[1][5] = 2; // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[1][8] = 2; // // for (var i = 5; i <= 8; i++) { // // arrs[1][i] = 2; // // } // squArr = [[1][5],[1][6],[1][7],[1][8]]; // break; // case 2: // // T字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[3][7] = 2; // squArr = [[1][6],[1][7],[2][7],[3][7]]; // break; // case 3: // // |-字形 // // arrs[1][6] = 2; // // arrs[2][6] = 2; // // arrs[3][6] = 2; // // arrs[2][7] = 2; // squArr = [[1][6],[2][6],[3][6],[2][7]]; // break; // case 4: // // z字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[2][8] = 2; // squArr = [[1][6],[1][7],[2][7],[2][8]]; // break; // // } switch(index){ case 0: //田字形 //arrs[1][6]=2; //arrs[1][7]=2; //arrs[2][6]=2; //arrs[2][7]=2; squArr = [[1,6],[1,7],[2,6],[2,7]]; Chgcount = 0; break; case 1: //一字型 // for(var i=5;i<=8;i++){ // arrs[1][i]=2; // } squArr = [[1,5],[1,6],[1,7],[1,8]]; Chgcount = 0; break; case 2: //7字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[3][7]=2; squArr = [[1,6],[1,7],[2,7],[3,7]]; Chgcount = 0; break; case 3: // |-型 // arrs[1][6]=2; // arrs[2][6]=2; // arrs[3][6]=2; // arrs[2][7]=2; squArr = [[1,6],[2,6],[3,6],[2,7]]; Chgcount = 0; break; case 4: //z字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[2][8]=2; squArr = [[1,6],[1,7],[2,7],[2,8]]; Chgcount = 0; break; } } //给块赋值的方法 function block() { for (var i = 0; i < squArr.length; i++) { arrs[squArr[i][0]][squArr[i][1]] = 2; } //重绘 draw(); } //隐藏原来的块 function hidden() { //遍历squArr //把原来的块隐藏 for (var i = 0; i < squArr.length; i++) { //原本的块隐藏 // console.log(squArr[i][0]+","+squArr[i][1]) arrs[squArr[i][0]][squArr[i][1]] = 0; } } //移动给对应坐标 function move(direction) { for (var i = 0; i < squArr.length; i++) { if (direction == "down") { target[i] = [squArr[i][0] + 1, squArr[i][1]]; } else if (direction == "left") { target[i] = [squArr[i][0], squArr[i][1] - 1]; } else if (direction == "right") { target[i] = [squArr[i][0], squArr[i][1] + 1]; } else if (direction == "up") { //形状变化 给移动的数组赋变形后的值 change(); //判断是否能移动 能移动则把移动完数组的值赋给squarr if(canMove()){ squArr = target; //绘出来 block(); } } } } //拿到显示分数的盒子 var scoreBox = document.getElementById("score"); //显示的分数值 var score = 0; //消除的方法 function clear(){ //遍历整个数组 for(var i=22;i>=1;i--){ var isClear = true; for(var j=12;j>=1;j--){ if(arrs[i][j] != 2){ isClear = false; break; } } if(isClear){ score += 10; //显示分数 scoreBox.innerText = score; //当分数大于100 难道增加 if(score>=50){ datatime = 200; } for(var k=i;k>1;k--){ for(var j=12;j>=1;j--){ arrs[k][j] = arrs[k-1][j]; draw(); } } i++; } // if(isClear == true){ // for(var j=0;j<arrs[i].length;j++){ // arrs[i][j] = 0; // draw(); // } // // } } //draw(); } //形状变化的方法 //定义一个变量来记录变化的次数 var Chgcount; function change(){ //根据块的坐标发生改变的 squArr来发生改变 target来保存改变完的坐标 //隐藏原来的坐标 hidden(); //改变位置 根据随机数生成的图形来变化 switch(index){ case 0: //田字形 不需要改变 //target = squArr; break; case 1: //一字形 第一个点是不会改变的 根据第一点来变化 //squArr = [[1,5],[1,6],[1,7],[1,8]]; // target[0][0] = squArr[0][0]+0; // target[0][1] = squArr[0][1]-0; // target[1][0] = squArr[1][0]+1; // target[1][1] = squArr[1][1]-1; // target[2][0] = squArr[2][0]+2; // target[2][1] = squArr[2][1]-2; // target[3][0] = squArr[3][0]+3; // target[3][1] = squArr[3][1]-3; if(Chgcount%2){ //当为真的时候 值为1的时候变横条 否则变竖条 //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]; target[i][1] =squArr[0][1]+i; } }else{ //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]+i; target[i][1] =squArr[0][1]; } } break; case 2: //7字形 有四种形态 //squArr = [[1,6],[1,7],[2,7],[3,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]-1; break; case 1: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]+1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]-1; target[3][1] = squArr[2][1]; break; case 2: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]-1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; case 3: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; } break; case 3: // |-字形 //squArr = [[1,6],[2,6],[3,6],[2,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]+1; target[3][1] = squArr[1][1]; break; case 1: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]-1; break; case 2: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]-1; target[3][1] = squArr[1][1]; break; case 3: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]+1; break; } break; case 4: // z字形 //squArr = [[1,6],[1,7],[2,7],[2,8]]; switch(Chgcount%2){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; case 1: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; } break; } } //自动往下掉的方法 function autoDown() { //把原本的坐标还原 target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; hidden(); move("down"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } else { //多画一次 block(); clear(); //判断是否gameover var isGameOver = false; for(var i=0;i<target.length;i++){ if(target[i][0] == 1 || target[i][0] == 2){ isGameOver = true; break; } } if(isGameOver){ alert("Game over"); if(confirm("是否继续游戏?")){ isBegin = false; isGameOver = false; //清除定时器 clearInterval(timer); //初始化 init(); //随机生成块 randomBlock(); //给块的坐标位置赋值 // console.log(squArr) block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); // isBegin = true; // } else { // alert("当前游戏已经开始"); // } } } randomBlock(); } //赋值 block(); //遍历二维数组 // for (var i = 0; i < arrs.length; i++) { // for (var j = 0; j < arrs[i].length; j++) { // } // } } function canMove() { var isCanMove = false; //先判断底下是否有东西 遍历移动的坐标 查看移动的坐标上是否值为1或2 for (var i = 0; i < 4; i++) { if (arrs[target[i][0]][target[i][1]] != 1 && arrs[target[i][0]][target[i][1]] != 2) { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; isCanMove = true; } else { return false; } // if (tds[target[i][0] * 14 + target[i][1]].style.background != "blue" && tds.style.background != "red") { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; } return isCanMove; } //调用绘图方法 draw(); //当点击开始的时候执行 $("#action").click(function() { if (isBegin == false) { console.log(arrs) //随机生成块 randomBlock(); //给块的坐标位置赋值 block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } }) var count = 0; //暂停按钮 $("#pause").click(function() { // clearInterval(timer); // hidden(); // isBegin = false; if (count%2 == 0) { $("#pause").text("继续"); clearInterval(timer); } else { timer = setInterval("autoDown()",datatime); $("#pause").text("暂停"); } count++; }) //是否开始游戏 var isBegin = false; //定时器设置 var timer; //由键盘控制移动和变化 //键盘按下事件 window.onkeydown = function(event) { //拿到对应的键盘对象 event = window.event || event; target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //拿到键盘上是哪个键 //得到键盘的ASCII码 event.keyCode //alert(event.keyCode); switch (event.keyCode) { //回车的时候开始游戏 case 13: if (isBegin == false) { //随机生成块 randomBlock(); //给块的坐标位置赋值 console.log(squArr) block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } break; //按左箭头的时候 case 37: console.log("我按了左箭头"); //把原本的坐标还原 hidden(); move("left"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } //赋值 block(); break; //按上箭头的时候 case 38: Chgcount++; move("up"); console.log("我按了上箭头"); break; //按右箭头的时候 case 39: console.log("我按了右箭头"); //把原本的坐标还原 hidden(); move("right"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } //赋值 block(); break; //按下箭头的时候 case 40: //console.log(squArr[3]); console.log("我按了下箭头"); autoDown(); break; } }
or (
identifier_name
main.js
//新建二维数组用来表示table的坐标的 x坐标是第一层的数组 y坐标是第二层的数组 var arrs; //数组中有三种状态 墙1 空白0 图形2 //行 init(); function init(){ arrs = new Array(); for (var i = 0; i < 24; i++) { arrs[i] = new Array(); //列 for (var j = 0; j < 14; j++) { //当是第一行或者是最后一行或者是第一列或者是最后一列 为墙赋值为1 if (i == 0 || i == 23 || j == 0 || j == 13) { arrs[i][j] = 1; } } } } //先拿到所有的单元格 数组 var tds = document.getElementsByTagName("td"); //控制难度的时间 datatime = 500; //随机生成颜色的方法 function randomColor(){ var r,g,b; r=Math.floor(Math.random()*166+90); g=Math.floor(Math.random()*166+90); b=Math.floor(Math.random()*166+90); return "rgba("+r+","+g+","+b+",1)" } //绘图的方法 function draw() { //先遍历二维数组 for (var i = 0; i < arrs.length; i++) { for (var j = 0; j < arrs[i].length; j++) { //当二维数组里面的值是1 为墙 if (arrs[i][j] == 1) { //设置墙的背景为红色 js tds[i * 14 + j].style.background = "blue"; //jQuery中的$("td")是拿到的第一个td元素 //$("td")[i * 14 + j].css("background", "red"); 不起作用 拿到的不是数组 //当值为0 为空白 设置为白色 } else if (arrs[i][j] == 0) { tds[i * 14 + j].style.background = "none"; //$("td")[i * 14 + j].css("background", "white"); //当值为2 设置为蓝色 } else if (arrs[i][j] == 2) { // tds[i * 14 + j].style.background = randomColor(); tds[i * 14 + j].style.background = "red"; //$("td")[i * 14 + j].css("background", "blue"); } } } } //初始的块 var squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //定义一个二维数组用来保存随机生成的块的坐标 移动的块 var target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //随机数变量 var index; // console.log(squArr) //随机生成图形的方法 需要根据你传过来的值来生成不一样的图形 function randomBlock() { //初始化初值 squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //生成图形 Math.floor向下取整 Math.random生成随机数0-1不包含1 index = Math.floor(Math.random() * 5); //根据随机生成的数进行判断 // switch(index){ // case 0: // // 田字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][6] = 2; // // arrs[2][7] = 2; // squArr = [[1,6],[1,7],[2,6],[2,7]]; // // for (var i = 0; i < squArr.length; i++) { // // arrs[squArr[i][0]][squArr[i][1]] = 2; // // } // break; // case 1: // // 一字形 // // arrs[1][5] = 2; // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[1][8] = 2; // // for (var i = 5; i <= 8; i++) { // // arrs[1][i] = 2; // // } // squArr = [[1][5],[1][6],[1][7],[1][8]]; // break; // case 2: // // T字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[3][7] = 2; // squArr = [[1][6],[1][7],[2][7],[3][7]]; // break; // case 3: // // |-字形 // // arrs[1][6] = 2; // // arrs[2][6] = 2; // // arrs[3][6] = 2; // // arrs[2][7] = 2; // squArr = [[1][6],[2][6],[3][6],[2][7]]; // break; // case 4: // // z字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[2][8] = 2; // squArr = [[1][6],[1][7],[2][7],[2][8]]; // break; // // } switch(index){ case 0: //田字形 //arrs[1][6]=2; //arrs[1][7]=2; //arrs[2][6]=2; //arrs[2][7]=2; squArr = [[1,6],[1,7],[2,6],[2,7]]; Chgcount = 0; break; case 1: //一字型 // for(var i=5;i<=8;i++){ // arrs[1][i]=2; // } squArr = [[1,5],[1,6],[1,7],[1,8]]; Chgcount = 0; break; case 2: //7字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[3][7]=2; squArr = [[1,6],[1,7],[2,7],[3,7]]; Chgcount = 0; break; case 3: // |-型 // arrs[1][6]=2; // arrs[2][6]=2; // arrs[3][6]=2; // arrs[2][7]=2; squArr = [[1,6],[2,6],[3,6],[2,7]]; Chgcount = 0; break; case 4: //z字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[2][8]=2; squArr = [[1,6],[1,7],[2,7],[2,8]]; Chgcount = 0; break; } } //给块赋值的方法 function block() { for (var i = 0; i < squArr.length; i++) { arrs[squArr[i][0]][squArr[i][1]] = 2; } //重绘 draw(); } //隐藏原来的块 function hidden() { //遍历squArr //把原来的块隐藏 for (var i = 0; i < squArr.length; i++) { //原本的块隐藏 // console.log(squArr[i][0]+","+squArr[i][1]) arrs[squArr[i][0]][squArr[i][1]] = 0; } } //移动给对应坐标 function move(direction) { for (var i = 0; i < squArr.length; i++) { if (direction == "down") { target[i] = [squArr[i][0] + 1, squArr[i][1]]; } else if (direction == "left") { target[i] = [squArr[i][0], squArr[i][1] - 1]; } else if (direction == "right") { target[i] = [squArr[i][0], squArr[i][1] + 1]; } else if (direction == "up") { //形状变化 给移动的数组赋变形后的值 change(); //判断是否能移动 能移动则把移动完数组的值赋给squarr if(canMove()){ squArr = target; //绘出来 block(); } } } } //拿到显示分数的盒子 var scoreBox = document.getElementById("score"); //显示的分数值 var score = 0; //消除的方法 function clear(){ //遍历整个数组 for(var i=22;i>=1;i--){ var isClear = true; for(var j=12;j>=1;j--){ if(arrs[i][j] != 2){ isClear = false; break; } } if(isClear){ score += 10; //显示分数 scoreBox.innerText = score; //当分数大于100 难道增加 if(score>=50){ datatime = 200; } for(var k=i;k>1;k--){ for(var j=12;j>=1;j--){ arrs[k][j] = arrs[k-1][j]; draw(); } } i++; } // if(isClear == true){ // for(var j=0;j<arrs[i].length;j++){ // arrs[i][j] = 0; // draw(); // } // // } } //draw(); } //形状变化的方法 //定义一个变量来记录变化的次数 var Chgcount; function change(){ //根据块的坐标发生改变的 squArr来发生改变 target来保存改变完的坐标 //隐藏原来的坐标 hidden(); //改变位置 根据随机数生成的图形来变化 switch(index){ case 0: //田字形 不需要改变 //target = squArr; break; case 1: //一字形 第一个点是不会改变的 根据第一点来变化 //squArr = [[1,5],[1,6],[1,7],[1,8]]; // target[0][0] = squArr[0][0]+0; // target[0][1] = squArr[0][1]-0; // target[1][0] = squArr[1][0]+1; // target[1][1] = squArr[1][1]-1; // target[2][0] = squArr[2][0]+2; // target[2][1] = squArr[2][1]-2; // target[3][0] = squArr[3][0]+3; // target[3][1] = squArr[3][1]-3; if(Chgcount%2){ //当为真的时候 值为1的时候变横条 否则变竖条 //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]; target[i][1] =squArr[0][1]+i; } }else{ //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]+i; target[i][1] =squArr[0][1]; } } break; case 2: //7字形 有四种形态 //squArr = [[1,6],[1,7],[2,7],[3,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]-1; break; case 1: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]+1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]-1; target[3][1] = squArr[2][1]; break; case 2: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]-1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; case 3: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; } break; case 3: // |-字形 //squArr = [[1,6],[2,6],[3,6],[2,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]+1; target[3][1] = squArr[1][1]; break; case 1: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]-1; break; case 2: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]-1; target[3][1] = squArr[1][1]; break; case 3: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]+1; break; } break; case 4: // z字形 //squArr = [[1,6],[1,7],[2,7],[2,8]]; switch(Chgcount%2){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; case 1: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; } break; } } //自动往下掉的方法 function autoDown() { //把原本的坐标还原 target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; hidden(); move("down"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } else { //多画一次 block(); clear(); //判断是否gameover var isGameOver = false; for(var i=0;i<target.length;i++){ if(target[i][0] == 1 || target[i][0] == 2){ isGameOver = true; break; } } if(isGameOver){ alert("Game over"); if(confirm("是否继续游戏?")){ isBegin = false; isGameOver = false; //清除定时器 clearInterval(timer); //初始化 init(); //随机生成块 randomBlock(); //给块的坐标位置赋值 // console.log(squArr) block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); // isBegin = true; // } else { // alert("当前游戏已经开始"); // } } } randomBlock(); } //赋值 block(); //遍历二维数组 // for (var i = 0; i < arrs.length; i++) { // for (var j = 0; j < arrs[i].length; j++) { // } // } } function canMove() { var isCanMove = false; //先判断底下是否有东西 遍历移动的坐标 查看移动的坐标上是否值为1或2 for (var i = 0; i < 4; i++) { if (arrs[target[i][0]][target[i][1]] != 1 && arrs[target[i][0]][target[i][1]] != 2) { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; isCanMove = true; } else { return false; } // if (tds[target[i][0] * 14 + target[i][1]].style.background != "blue" && tds.style.background != "red") { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; } return isCanMove; } //调用绘图方法 draw(); //当点击开始的时候执行 $("#action").click(function() { if (isBegin == false) { console.log(arrs) //随机生成块 randomBlock(); //给块的坐标位置赋值 block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } }) var count = 0; //暂停按钮 $("#pause").click(function() { // clearInterval(timer); // hidden(); // isBegin = false; if (count%2 == 0) { $("#pause").text("继续"); clearInterval(timer); } else { timer = setInterval("autoDown()",datatime); $("#pause").text("暂停"); } count++; }) //是否开始游戏 var isBegin = false; //定时器设置 var timer; //由键盘控制移动和变化 //键盘按下事件 window.onkeydown = function(event) { //拿到对应的键盘对象 event = window.event || event; target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //拿到键盘上是哪个键 //得到键盘的ASCII码 event.keyCode //alert(event.keyCode); switch (event.keyCode) { //回车的时候开始游戏 case 13: if (isBegin == false) { //随机生成块 randomBlock(); //给块的坐标位置赋值 console.log(squArr)
le.log("我按了左箭头"); //把原本的坐标还原 hidden(); move("left"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } //赋值 block(); break; //按上箭头的时候 case 38: Chgcount++; move("up"); console.log("我按了上箭头"); break; //按右箭头的时候 case 39: console.log("我按了右箭头"); //把原本的坐标还原 hidden(); move("right"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } //赋值 block(); break; //按下箭头的时候 case 40: //console.log(squArr[3]); console.log("我按了下箭头"); autoDown(); break; } }
block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } break; //按左箭头的时候 case 37: conso
conditional_block
main.js
//新建二维数组用来表示table的坐标的 x坐标是第一层的数组 y坐标是第二层的数组 var arrs; //数组中有三种状态 墙1 空白0 图形2 //行 init(); function init(){ arrs = new Array(); for (var i = 0; i < 24; i++) { arrs[i] = new Array(); //列 for (var j = 0; j < 14; j++) { //当是第一行或者是最后一行或者是第一列或者是最后一列 为墙赋值为1 if (i == 0 || i == 23 || j == 0 || j == 13) { arrs[i][j] = 1; } } } } //先拿到所有的单元格 数组 var tds = document.getElementsByTagName("td"); //控制难度的时间 datatime = 500; //随机生成颜色的方法 function randomColor(){ var r,g,b; r=Math.floor(Math.random()*166+90); g=Math.floor(Math.random()*166+90); b=Math.floor(Math.random()*166+90); return "rgba("+r+","+g+","+b+",1)" } //绘图的方法 function draw() { //先遍历二维数组 for (var i = 0; i < arrs.length; i++) { for (var j = 0; j < arrs[i].length; j++) { //当二维数组里面的值是1 为墙 if (arrs[i][j] == 1) { //设置墙的背景为红色 js tds[i * 14 + j].style.background = "blue"; //jQuery中的$("td")是拿到的第一个td元素 //$("td")[i * 14 + j].css("background", "red"); 不起作用 拿到的不是数组 //当值为0 为空白 设置为白色 } else if (arrs[i][j] == 0) { tds[i * 14 + j].style.background = "none"; //$("td")[i * 14 + j].css("background", "white"); //当值为2 设置为蓝色 } else if (arrs[i][j] == 2) { // tds[i * 14 + j].style.background = randomColor(); tds[i * 14 + j].style.background = "red"; //$("td")[i * 14 + j].css("background", "blue"); } } } } //初始的块 var squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //定义一个二维数组用来保存随机生成的块的坐标 移动的块 var target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //随机数变量 var index; // console.log(squArr) //随机生成图形的方法 需要根据你传过来的值来生成不一样的图形 function randomBlock() { //初始化初值 squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //生成图形 Math.floor向下取整 Math.random生成随机数0-1不包含1 index = Math.floor(Math.random() * 5); //根据随机生成的数进行判断 // switch(index){ // case 0: // // 田字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][6] = 2; // // arrs[2][7] = 2; // squArr = [[1,6],[1,7],[2,6],[2,7]]; // // for (var i = 0; i < squArr.length; i++) { // // arrs[squArr[i][0]][squArr[i][1]] = 2; // // } // break; // case 1: // // 一字形 // // arrs[1][5] = 2; // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[1][8] = 2; // // for (var i = 5; i <= 8; i++) { // // arrs[1][i] = 2; // // } // squArr = [[1][5],[1][6],[1][7],[1][8]]; // break; // case 2: // // T字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[3][7] = 2; // squArr = [[1][6],[1][7],[2][7],[3][7]]; // break; // case 3: // // |-字形 // // arrs[1][6] = 2; // // arrs[2][6] = 2; // // arrs[3][6] = 2; // // arrs[2][7] = 2; // squArr = [[1][6],[2][6],[3][6],[2][7]]; // break; // case 4: // // z字形 // // arrs[1][6] = 2; // // arrs[1][7] = 2; // // arrs[2][7] = 2; // // arrs[2][8] = 2; // squArr = [[1][6],[1][7],[2][7],[2][8]]; // break; // // } switch(index){ case 0: //田字形 //arrs[1][6]=2; //arrs[1][7]=2; //arrs[2][6]=2; //arrs[2][7]=2; squArr = [[1,6],[1,7],[2,6],[2,7]]; Chgcount = 0; break; case 1: //一字型 // for(var i=5;i<=8;i++){ // arrs[1][i]=2; // } squArr = [[1,5],[1,6],[1,7],[1,8]]; Chgcount = 0; break; case 2: //7字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[3][7]=2; squArr = [[1,6],[1,7],[2,7],[3,7]]; Chgcount = 0; break; case 3: // |-型 // arrs[1][6]=2; // arrs[2][6]=2; // arrs[3][6]=2; // arrs[2][7]=2; squArr = [[1,6],[2,6],[3,6],[2,7]]; Chgcount = 0; break; case 4: //z字形 // arrs[1][6]=2; // arrs[1][7]=2; // arrs[2][7]=2; // arrs[2][8]=2; squArr = [[1,6],[1,7],[2,7],[2,8]]; Chgcount = 0; break; } } //给块赋值的方法 function block() { for (var i = 0; i < squArr.length; i++) { arrs[squArr[i][0]][squArr[i][1]] = 2; } //重绘 draw(); } //隐藏原来的块 function hidden() { //遍历squArr //把原来的块隐藏 for (var i = 0; i < squArr.length; i++) { //原本的块隐藏 // console.log(squArr[i][0]+","+squArr[i][1]) arrs[squArr[i][0]][squArr[i][1]] = 0; } } //移动给对应坐标 function move(direction) { for (var i = 0; i < squArr.length; i++) { if (direction == "down") { target[i] = [squArr[i][0] + 1, squArr[i][1]]; } else if (direction == "left") { target[i] = [squArr[i][0], squArr[i][1] - 1]; } else if (direction == "right") { target[i] = [squArr[i][0], squArr[i][1] + 1]; } else if (direction == "up") { //形状变化 给移动的数组赋变形后的值 change(); //判断是否能移动 能移动则把移动完数组的值赋给squarr if(canMove()){ squArr = target; //绘出来 block(); } } } } //拿到显示分数的盒子 var scoreBox = document.getElementById("score"); //显示的分数值 var score = 0; //消除的方法 function clear(){ //遍历整个数组 for(var i=22;i>=1;i--){ var isClear = true; for(var j=12;j>=1;j--){ if(arrs[i][j] != 2){ isClear = false; break; } } if(isClear){ score += 10; //显示分数 scoreBox.innerText = score; //当分数大于100 难道增加 if(score>=50){ datatime = 200; } for(var k=i;k>1;k--){ for(var j=12;j>=1;j--){ arrs[k][j] = arrs[k-1][j]; draw(); } } i++; } // if(isClear == true){ // for(var j=0;j<arrs[i].length;j++){ // arrs[i][j] = 0; // draw(); // } // // } } //draw(); } //形状变化的方法 //定义一个变量来记录变化的次数 var Chgcount; function change(){ //根据块的坐标发生改变的 squArr来发生改变 target来保存改变完的坐标 //隐藏原来的坐标 hidden(); //改变位置 根据随机数生成的图形来变化 switch(index){ case 0: //田字形 不需要改变 //target = squArr; break; case 1: //一字形 第一个点是不会改变的 根据第一点来变化 //squArr = [[1,5],[1,6],[1,7],[1,8]]; // target[0][0] = squArr[0][0]+0; // target[0][1] = squArr[0][1]-0; // target[1][0] = squArr[1][0]+1; // target[1][1] = squArr[1][1]-1; // target[2][0] = squArr[2][0]+2; // target[2][1] = squArr[2][1]-2; // target[3][0] = squArr[3][0]+3; // target[3][1] = squArr[3][1]-3; if(Chgcount%2){ //当为真的时候 值为1的时候变横条 否则变竖条 //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]; target[i][1] =squArr[0][1]+i; } }else{ //隐藏开始的位置 hidden(); //给变化后的位置赋值 for(var i=0;i<4;i++){ target[i][0] =squArr[0][0]+i; target[i][1] =squArr[0][1]; } } break; case 2: //7字形 有四种形态 //squArr = [[1,6],[1,7],[2,7],[3,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]-1; break; case 1: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]+1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]-1; target[3][1] = squArr[2][1]; break; case 2: hidden(); target[0][0] = squArr[2][0]+1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]-1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; case 3: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; } break; case 3: // |-字形 //squArr = [[1,6],[2,6],[3,6],[2,7]]; switch(Chgcount%4){ case 0: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]+1; target[3][1] = squArr[1][1]; break; case 1: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]-1; break; case 2: hidden(); target[0][0] = squArr[1][0]; target[0][1] = squArr[1][1]+1; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]; target[2][1] = squArr[1][1]-1; target[3][0] = squArr[1][0]-1; target[3][1] = squArr[1][1]; break; case 3: hidden(); target[0][0] = squArr[1][0]-1; target[0][1] = squArr[1][1]; target[1][0] = squArr[1][0]; target[1][1] = squArr[1][1]; target[2][0] = squArr[1][0]+1; target[2][1] = squArr[1][1]; target[3][0] = squArr[1][0]; target[3][1] = squArr[1][1]+1; break; } break; case 4: // z字形 //squArr = [[1,6],[1,7],[2,7],[2,8]]; switch(Chgcount%2){ case 0: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]+1; target[1][0] = squArr[2][0]; target[1][1] = squArr[2][1]+1; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]+1; target[3][1] = squArr[2][1]; break; case 1: hidden(); target[0][0] = squArr[2][0]-1; target[0][1] = squArr[2][1]-1; target[1][0] = squArr[2][0]-1; target[1][1] = squArr[2][1]; target[2][0] = squArr[2][0]; target[2][1] = squArr[2][1]; target[3][0] = squArr[2][0]; target[3][1] = squArr[2][1]+1; break; } break; } } //自动往下掉的方法 function autoDown() { //把原本的坐标还原 target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; hidden(); move("down"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } else { //多画一次 block(); clear(); //判断是否gameover var isGameOver = false; for(var i=0;i<target.length;i++){ if(target[i][0] == 1 || target[i][0] == 2){ isGameOver = true; break; } } if(isGameOver){ alert("Game over"); if(confirm("是否继续游戏?")){ isBegin = false; isGameOver = false; //清除定时器 clearInterval(timer); //初始化 init(); //随机生成块 randomBlock(); //给块的坐标位置赋值 // console.log(squArr)
timer = setInterval("autoDown()", datatime); // isBegin = true; // } else { // alert("当前游戏已经开始"); // } } } randomBlock(); } //赋值 block(); //遍历二维数组 // for (var i = 0; i < arrs.length; i++) { // for (var j = 0; j < arrs[i].length; j++) { // } // } } function canMove() { var isCanMove = false; //先判断底下是否有东西 遍历移动的坐标 查看移动的坐标上是否值为1或2 for (var i = 0; i < 4; i++) { if (arrs[target[i][0]][target[i][1]] != 1 && arrs[target[i][0]][target[i][1]] != 2) { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; isCanMove = true; } else { return false; } // if (tds[target[i][0] * 14 + target[i][1]].style.background != "blue" && tds.style.background != "red") { // squArr = [[-1, -1],[-1, -1],[-1, -1],[-1, -1]]; // //如果可以移动 那么我的坐标就变成了移动完的坐标 // squArr = target; } return isCanMove; } //调用绘图方法 draw(); //当点击开始的时候执行 $("#action").click(function() { if (isBegin == false) { console.log(arrs) //随机生成块 randomBlock(); //给块的坐标位置赋值 block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } }) var count = 0; //暂停按钮 $("#pause").click(function() { // clearInterval(timer); // hidden(); // isBegin = false; if (count%2 == 0) { $("#pause").text("继续"); clearInterval(timer); } else { timer = setInterval("autoDown()",datatime); $("#pause").text("暂停"); } count++; }) //是否开始游戏 var isBegin = false; //定时器设置 var timer; //由键盘控制移动和变化 //键盘按下事件 window.onkeydown = function(event) { //拿到对应的键盘对象 event = window.event || event; target = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //拿到键盘上是哪个键 //得到键盘的ASCII码 event.keyCode //alert(event.keyCode); switch (event.keyCode) { //回车的时候开始游戏 case 13: if (isBegin == false) { //随机生成块 randomBlock(); //给块的坐标位置赋值 console.log(squArr) block(); //重新绘图 draw(); timer = setInterval("autoDown()", datatime); isBegin = true; } else { alert("当前游戏已经开始"); } break; //按左箭头的时候 case 37: console.log("我按了左箭头"); //把原本的坐标还原 hidden(); move("left"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } //赋值 block(); break; //按上箭头的时候 case 38: Chgcount++; move("up"); console.log("我按了上箭头"); break; //按右箭头的时候 case 39: console.log("我按了右箭头"); //把原本的坐标还原 hidden(); move("right"); if (canMove()) { squArr = [[-1,-1],[-1,-1],[-1,-1],[-1,-1]]; //如果可以移动 那么我的坐标就变成了移动完的坐标 squArr = target; } //赋值 block(); break; //按下箭头的时候 case 40: //console.log(squArr[3]); console.log("我按了下箭头"); autoDown(); break; } }
block(); //重新绘图 draw();
random_line_split
model.go
package word import "encoding/xml" //RelationshipTypeImage 图片文档映射关系类型 const RelationshipTypeImage = "http://schemas.openxmlformats.org/officeDocument/2006/relationships/image" //PrefixRID rId前缀 const PrefixRID = "rId" //W word文档 type W struct { XMLName xml.Name `xml:"w:document"` Wpc string `xml:"xmlns:wpc,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingCanvas" MC string `xml:"xmlns:mc,attr"` //"http://schemas.openxmlformats.org/markup-compatibility/2006" O string `xml:"xmlns:o,attr"` //"urn:schemas-microsoft-com:office:office" R string `xml:"xmlns:r,attr"` //"http://schemas.openxmlformats.org/officeDocument/2006/relationships" M string `xml:"xmlns:m,attr"` //"http://schemas.openxmlformats.org/officeDocument/2006/math" V string `xml:"xmlns:v,attr"` //"urn:schemas-microsoft-com:vml" WP14 string `xml:"xmlns:wp14,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingDrawing" WP string `xml:"xmlns:wp,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing" W10 string `xml:"xmlns:w10,attr"` //"urn:schemas-microsoft-com:office:word" W string `xml:"xmlns:w,attr"` //"http://schemas.openxmlformats.org/wordprocessingml/2006/main" W14 string `xml:"xmlns:w14,attr"` //"http://schemas.microsoft.com/office/word/2010/wordml W15 string `xml:"xmlns:w15,attr"` //"http://schemas.microsoft.com/office/word/2012/wordml" WPG string `xml:"xmlns:wpg,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingGroup" WPI string `xml:"xmlns:wpi,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingInk" WNE string `xml:"xmlns:wne,attr"` //"http://schemas.microsoft.com/office/word/2006/wordml" WPS string `xml:"xmlns:wps,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingShape" Ignorable string `xml:"mc:Ignorable,attr"` //"w14 w15 wp14" Body *Body `xml:"w:body"` } //SetBody 添加文档主体 func (w *W) SetBody(b *Body) { w.Body = b } //Body Word文档主体 type Body struct { Sects []Sect } //AddSect 增加章节 func (b *Body) AddSect(s Sect) { b.Sects = append(b.Sects, s) } //Sect 章节 type Sect interface { Sfunc() } //P 段落 type P struct { XMLName xml.Name `xml:"w:p"` RsidR string `xml:"w:rsidR,attr,omitempty"` RsidRDefault string `xml:"w:rsidRDefault,attr,omitempty"` PPr *PPr `xml:"w:pPr,omitempty"` Rs []Run `xml:"w:r,omitempty"` } //Sfunc P sect func (p *P) Sfunc() { } //Run 段落内容 type Run interface { Rfunc() } //PPr 段落属性 type PPr struct { XMLName xml.Name `xml:"w:pPr"` SnapToGrid *SnapToGrid `xml:"w:snapToGrid,omitempty"` Spacing *Spacing `xml:"w:spacing,omitempty"` Ind *Ind `xml:"w:ind,omitempty"` Jc *Jc `xml:"w:jc,omitempty"` RPr *RPr `xml:"w:rPr,omitempty"` } //Ind 首行:缩进、行高 type Ind struct { XMLName xml.Name `xml:"w:ind"` FirstLineChars int64 `xml:"w:firstLineChars,attr"` //首行缩进字符数,100是一个字符 LeftChars int64 `xml:"w:leftChars,attr"` //左缩进字符数,100是一个字符 RightChars int64 `xml:"w:rightChars,attr"` //右缩进字符数,100是一个字符 // FirstLine int64 `xml:"w:firstLine,attr"` } // <w:ind w:firstLineChars="200" w:firstLine="420" /> //R 块 type R struct { XMLName xml.Name `xml:"w:r"` RPr *RPr `xml:"w:rPr,omitempty"` //块属性 T *T `xml:"w:t,omitempty"` //文字 Drawing *Drawing `xml:"w:drawing,omitempty"` //图片 } //Rfunc run sect func (r *R) Rfunc() { } //SnapToGrid 对齐网格 type SnapToGrid struct { XMLName xml.Name `xml:"w:snapToGrid"` Val string `xml:"w:val,attr,omitempty"` } //RPr 文字块属性 type RPr struct { XMLName xm
e `xml:"w:rPr"` RFonts *RFonts `xml:"w:rFonts,omitempty"` B *Bold `xml:"w:b,omitempty"` // BCs string `xml:"w:bCs,omitempty"` Color *Color `xml:"w:color"` Sz *Sz `xml:"w:sz"` SzCs *SzCs `xml:"w:szCs"` } //Bold 加粗 type Bold struct { XMLName xml.Name `xml:"w:b"` Val string `xml:"w:val,attr,omitempty"` } //RFonts <w:rFonts w:ascii="微软雅黑" w:eastAsia="微软雅黑" w:hAnsi="微软雅黑"/> type RFonts struct { XMLName xml.Name `xml:"w:rFonts"` ASCII string `xml:"w:ascii,attr,omitempty"` EastAsia string `xml:"w:eastAsia,attr,omitempty"` HAnsi string `xml:"w:hAnsi,attr,omitempty"` } //Color <w:color w:val="3A3838"/> type Color struct { XMLName xml.Name `xml:"w:color"` Val string `xml:"w:val,attr,omitempty"` } //Sz 字号大小,如:14号字为28,Word上的字号乘以2 <w:sz w:val="56"/> type Sz struct { XMLName xml.Name `xml:"w:sz"` Val int64 `xml:"w:val,attr,omitempty"` } //SzCs 未知 <w:szCs w:val="56"/> type SzCs struct { XMLName xml.Name `xml:"w:szCs"` Val int64 `xml:"w:val,attr,omitempty"` } // Spacing 行间距 // <w:spacing w:before="360" w:after="120" w:line="480" w:lineRule="auto" w:beforeAutospacing="0" w:afterAutospacing="0"/> // http://officeopenxml.com/WPspacing.php // Values are in twentieths of a point. A normal single-spaced paragaph has a w:line value of 240, or 12 points. // To specify units in hundreths of a line, use attributes 'afterLines'/'beforeLines'. // The space between adjacent paragraphs will be the greater of the 'line' spacing of each paragraph, the spacing // after the first paragraph, and the spacing before the second paragraph. So if the first paragraph specifies 240 // after and the second 80 before, and they are both single-spaced ('line' value of 240), then the space between // the paragraphs will be 240. // Specifies how the spacing between lines as specified in the line attribute is calculated. // Note: If the value of the lineRule attribute is atLeast or exactly, then the value of the line attribute is interpreted as 240th of a point. If the value of lineRule is auto, then the value of line is interpreted as 240th of a line. type Spacing struct { XMLName xml.Name `xml:"w:spacing"` Before int64 `xml:"w:before,attr,omitempty"` After int64 `xml:"w:after,attr,omitempty"` Line int64 `xml:"w:line,attr,omitempty"` LineRule LineRule `xml:"w:lineRule,attr,omitempty"` BeforeAutospacing int64 `xml:"w:beforeAutospacing"` AfterAutospacing int64 `xml:"w:afterAutospacing"` } //Jc 对齐方式 <w:jc w:val="left"/> type Jc struct { XMLName xml.Name `xml:"w:jc"` Val string `xml:"w:val,attr,omitempty"` } //T 文本 type T struct { XMLName xml.Name `xml:"w:t"` Space string `xml:"xml:space,attr,omitempty"` //"preserve" // Space string `xml:"w:space,attr,omitempty"` Text string `xml:",chardata"` } //Drawing 绘图 type Drawing struct { XMLName xml.Name `xml:"w:drawing"` Inline *Inline `xml:"wp:inline,omitempty"` //插入图片 Anchor *Anchor `xml:"wp:anchor,omitempty"` //插入形状 } //Inline 绘图边框 type Inline struct { XMLName xml.Name `xml:"wp:inline"` DistT int64 `xml:"distT,attr"` DistB int64 `xml:"distB,attr"` DistL int64 `xml:"distL,attr"` DistR int64 `xml:"distR,attr"` Extent *Extent `xml:"wp:extent"` EffectExtent *EffectExtent `xml:"wp:effectExtent"` DocPr *DocPr `xml:"wp:docPr"` CNvGraphicFramePr *CNvGraphicFramePr `xml:"wp:cNvGraphicFramePr"` Graphic *Graphic `xml:"a:graphic"` } //Extent 绘图范围 type Extent struct { XMLName xml.Name `xml:"wp:extent"` CX int64 `xml:"cx,attr"` CY int64 `xml:"cy,attr"` } //EffectExtent 绘图有效范围 type EffectExtent struct { XMLName xml.Name `xml:"wp:effectExtent"` L int64 `xml:"l,attr"` //左边距 T int64 `xml:"t,attr"` //上边距 R int64 `xml:"r,attr"` //右边距 B int64 `xml:"b,attr"` //下边距 } //WrapNone 不断行 type WrapNone struct { XMLName xml.Name `xml:"wp:wrapNone"` } //DocPr 文档属性,唯一就行,好像没鸟用 type DocPr struct { XMLName xml.Name `xml:"wp:docPr"` ID int64 `xml:"id,attr"` Name string `xml:"name,attr"` } //CNvGraphicFramePr 图形框架属性 type CNvGraphicFramePr struct { XMLName xml.Name `xml:"wp:cNvGraphicFramePr"` GraphicFrameLocks *GraphicFrameLocks `xml:"a:graphicFrameLocks"` } //GraphicFrameLocks 图形框架锁 type GraphicFrameLocks struct { XMLName xml.Name `xml:"a:graphicFrameLocks"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" NoChangeAspect int64 `xml:"noChangeAspect,attr"` } //Graphic 图形 type Graphic struct { XMLName xml.Name `xml:"a:graphic"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" GraphicData *GraphicData `xml:"a:graphicData"` } //GraphicData 图形数据 type GraphicData struct { XMLName xml.Name `xml:"a:graphicData"` //uri = "http://schemas.openxmlformats.org/drawingml/2006/picture" 插入图片 //uri = "http://schemas.microsoft.com/office/word/2010/wordprocessingShape" 插入形状 URI string `xml:"uri,attr"` Pic *Pic `xml:"pic:pic,omitempty"` //图片 Wsp *Wsp `xml:"wps:wsp,omitempty"` //形状 } //Pic 图形 type Pic struct { XMLName xml.Name `xml:"pic:pic"` NSPic string `xml:"xmlns:pic,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/picture" NvPicPr *NvPicPr `xml:"pic:nvPicPr"` BlipFill *BlipFill `xml:"pic:blipFill"` PicSpPr *PicSpPr `xml:"pic:spPr"` } //NvPicPr pic:nvPicPr type NvPicPr struct { XMLName xml.Name `xml:"pic:nvPicPr"` CNvPr *CNvPr `xml:"pic:cNvPr"` CNvPicPr string `xml:"pic:cNvPicPr"` } //CNvPr pic:cNvPr type CNvPr struct { XMLName xml.Name `xml:"pic:cNvPr"` ID int64 `xml:"id,attr"` Name string `xml:"name,attr"` } //BlipFill 填充 type BlipFill struct { XMLName xml.Name `xml:"pic:blipFill"` Blip *Blip `xml:"a:blip"` Stretch *Stretch `xml:"a:stretch"` } //Blip a:blip type Blip struct { XMLName xml.Name `xml:"a:blip"` Embed string `xml:"r:embed,attr"` //填充图片对应rel ID } //Stretch 拉伸 type Stretch struct { XMLName xml.Name `xml:"a:stretch"` FillRect string `xml:"a:fillRect"` } //PicSpPr pic:spPr type PicSpPr struct { XMLName xml.Name `xml:"pic:spPr"` Xfrm *Xfrm `xml:"a:xfrm"` PrstGeom *PrstGeom `xml:"a:prstGeom"` } //Xfrm a:xfrm type Xfrm struct { XMLName xml.Name `xml:"a:xfrm"` FlipV int64 `xml:"flipV,attr"` AOff *AOff `xml:"a:off"` AExt *AExt `xml:"a:ext"` } //AOff a:off type AOff struct { XMLName xml.Name `xml:"a:off"` X int64 `xml:"x,attr"` Y int64 `xml:"y,attr"` } //AExt "a:ext type AExt struct { XMLName xml.Name `xml:"a:ext"` CX int64 `xml:"cx,attr"` //图片宽度,36000为1毫米 CY int64 `xml:"cy,attr"` //图片高度,36000为1毫米 } //PrstGeom 几何形状,rect:矩形 type PrstGeom struct { XMLName xml.Name `xml:"a:prstGeom"` Prst string `xml:"prst,attr"` AVLst string `xml:"a:avLst"` } //Anchor 形状 type Anchor struct { XMLName xml.Name `xml:"wp:anchor"` DistT int64 `xml:"distT,attr"` DistB int64 `xml:"distB,attr"` DistL int64 `xml:"distL,attr"` DistR int64 `xml:"distR,attr"` SimplePos int64 `xml:"simplePos,attr"` //默认0 RelativeHeight int64 `xml:"relativeHeight,attr"` //默认0 BehindDoc int64 `xml:"behindDoc,attr"` //默认0 Locked int64 `xml:"locked,attr"` //默认0 LayoutInCell int64 `xml:"layoutInCell,attr"` //默认1 AllowOverlap int64 `xml:"allowOverlap,attr"` //默认1 AnchorID string `xml:"wp14:anchorId,attr"` //"69E31D9A" EditID string `xml:"wp14:editId,attr"` //"48F3AB62" WpSimplePos *SimplePos `xml:"wp:simplePos"` PositionH *PositionH `xml:"wp:positionH"` PositionV *PositionV `xml:"wp:positionV"` Extent *Extent `xml:"wp:extent"` EffectExtent *EffectExtent `xml:"wp:effectExtent"` WrapNone string `xml:"wp:wrapNone"` DocPr *DocPr `xml:"wp:docPr"` CNvGraphicFramePr *CNvGraphicFramePr `xml:"wp:cNvGraphicFramePr"` Graphic *Graphic `xml:"a:graphic"` } //SimplePos wp:simplePos type SimplePos struct { XMLName xml.Name `xml:"wp:simplePos"` X int64 `xml:"x,attr"` Y int64 `xml:"y,attr"` } //PositionH wp:positionH type PositionH struct { XMLName xml.Name `xml:"wp:positionH"` RelativeFrom string `xml:"relativeFrom,attr"` //column PosOffset *PosOffset `xml:"wp:posOffset"` } //PositionV wp:positionV type PositionV struct { XMLName xml.Name `xml:"wp:positionV"` RelativeFrom string `xml:"relativeFrom,attr"` //paragraph PosOffset *PosOffset `xml:"wp:posOffset"` } //PosOffset wp:posOffset type PosOffset struct { XMLName xml.Name `xml:"wp:posOffset"` Text string `xml:",chardata"` } //Wsp word形状数据,wps:wsp Word Processing Shape type Wsp struct { XMLName xml.Name `xml:"wps:wsp"` CNvCnPr string `xml:"wps:cNvCnPr"` WpsSpPr *WpsSpPr `xml:"wps:spPr"` BodyPr string `xml:"wps:bodyPr"` } //WpsSpPr wps:spPr type WpsSpPr struct { XMLName xml.Name `xml:"wps:spPr"` Xfrm *Xfrm `xml:"a:xfrm"` PrstGeom *PrstGeom `xml:"a:prstGeom"` Ln *Ln `xml:"a:ln"` } //Ln 线 type Ln struct { XMLName xml.Name `xml:"a:ln"` W int64 `xml:"w,attr"` //线宽 SolidFill *SolidFill `xml:"a:solidFill"` //填充 } //SolidFill 实心填充 type SolidFill struct { XMLName xml.Name `xml:"a:solidFill"` SrgbClr *SrgbClr `xml:"a:srgbClr"` } //SrgbClr 填充颜色 type SrgbClr struct { XMLName xml.Name `xml:"a:srgbClr"` Val string `xml:"val,attr"` //颜色 RGB } //WpsStyle 样式 type WpsStyle struct { XMLName xml.Name `xml:"wps:style"` LnRef *LnRef `xml:"a:lnRef"` FillRef *FillRef `xml:"a:fillRef"` EffectRef *EffectRef `xml:"a:effectRef"` FontRef *FontRef `xml:"a:fontRef"` } //LnRef a:lnRef type LnRef struct { XMLName xml.Name `xml:"a:lnRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //FillRef a:fillRef type FillRef struct { XMLName xml.Name `xml:"a:fillRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //EffectRef a:effectRef type EffectRef struct { XMLName xml.Name `xml:"a:effectRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //FontRef a:fontRef type FontRef struct { XMLName xml.Name `xml:"a:fontRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //SchemeClr a:schemeClr type SchemeClr struct { XMLName xml.Name `xml:"a:schemeClr"` Val string `xml:"val,attr"` } //Relationship 文档映射关系 type Relationship struct { XMLName xml.Name `xml:"Relationship"` ID string `xml:"Id,attr"` //rId9 Target string `xml:"Target,attr"` //media/image2.png Type string `xml:"Type,attr"` //"http://schemas.openxmlformats.org/officeDocument/2006/relationships/image" } //DocumentRels 文档映射关系 type DocumentRels struct { XMLName xml.Name `xml:"Relationships"` Relationships []*Relationship `xml:"Relationship"` }
l.Nam
identifier_name
model.go
package word import "encoding/xml" //RelationshipTypeImage 图片文档映射关系类型 const RelationshipTypeImage = "http://schemas.openxmlformats.org/officeDocument/2006/relationships/image" //PrefixRID rId前缀 const PrefixRID = "rId" //W word文档 type W struct { XMLName xml.Name `xml:"w:document"` Wpc string `xml:"xmlns:wpc,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingCanvas" MC string `xml:"xmlns:mc,attr"` //"http://schemas.openxmlformats.org/markup-compatibility/2006" O string `xml:"xmlns:o,attr"` //"urn:schemas-microsoft-com:office:office" R string `xml:"xmlns:r,attr"` //"http://schemas.openxmlformats.org/officeDocument/2006/relationships" M string `xml:"xmlns:m,attr"` //"http://schemas.openxmlformats.org/officeDocument/2006/math" V string `xml:"xmlns:v,attr"` //"urn:schemas-microsoft-com:vml" WP14 string `xml:"xmlns:wp14,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingDrawing" WP string `xml:"xmlns:wp,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing" W10 string `xml:"xmlns:w10,attr"` //"urn:schemas-microsoft-com:office:word" W string `xml:"xmlns:w,attr"` //"http://schemas.openxmlformats.org/wordprocessingml/2006/main" W14 string `xml:"xmlns:w14,attr"` //"http://schemas.microsoft.com/office/word/2010/wordml W15 string `xml:"xmlns:w15,attr"` //"http://schemas.microsoft.com/office/word/2012/wordml" WPG string `xml:"xmlns:wpg,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingGroup" WPI string `xml:"xmlns:wpi,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingInk" WNE string `xml:"xmlns:wne,attr"` //"http://schemas.microsoft.com/office/word/2006/wordml" WPS string `xml:"xmlns:wps,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingShape" Ignorable string `xml:"mc:Ignorable,attr"` //"w14 w15 wp14" Body *Body `xml:"w:body"` } //SetBody 添加文档主体 func (w *W) SetBody(b *Body) { w.Body = b } //Body Word文档主体 type Body struct { Sects []Sect } //AddSect 增加章节 func (b *Body) AddSect(s Sect) { b.Sects = append(b.Sects, s) } //Sect 章节 type Sect interface { Sfunc() } //P 段落 type P struct { XMLName xml.Name `xml:"w:p"` RsidR string `xml:"w:rsidR,attr,omitempty"` RsidRDefault string `xml:"w:rsidRDefault,attr,omitempty"` PPr *PPr `xml:"w:pPr,omitempty"` Rs []Run `xml:"w:r,omitempty"` } //Sfunc P sect func (p *P) Sfunc() { } //Run 段落内容 type Run interface { Rfunc() } //PPr 段落属性 type PPr struct { XMLName xml.Name `xml:"w:pPr"` SnapToGrid *SnapToGrid `xml:"w:snapToGrid,omitempty"` Spacing *Spacing `xml:"w:spacing,omitempty"` Ind *Ind `xml:"w:ind,omitempty"` Jc *Jc `xml:"w:jc,omitempty"` RPr *RPr `xml:"w:rPr,omitempty"` } //Ind 首行:缩进、行高 type Ind struct { XMLName xml.Name `xml:"w:ind"` FirstLineChars int64 `xml:"w:firstLineChars,attr"` //首行缩进字符数,100是一个字符 LeftChars int64 `xml:"w:leftChars,attr"` //左缩进字符数,100是一个字符 RightChars int64 `xml:"w:rightChars,attr"` //右缩进字符数,100是一个字符 // FirstLine int64 `xml:"w:firstLine,attr"` } // <w:ind w:firstLineChars="200" w:firstLine="420" /> //R 块 type R struct { XMLName xml.Name `xml:"w:r"` RPr *RPr `xml:"w:rPr,omitempty"` //块属性 T *T `xml:"w:t,omitempty"` //文字 Drawing *Drawing `xml:"w:drawing,omitempty"` //图片 } //Rfunc run sect func (r *R) Rfunc() { } //SnapToGrid 对齐网格 type SnapToGrid struct { XMLName xml.Name `xml:"w:snapToGrid"` Val string `xml:"w:val,attr,omitempty"` } //RPr 文字块属性 type RPr struct { XMLName xml.Name `
:"w:rPr"` RFonts *RFonts `xml:"w:rFonts,omitempty"` B *Bold `xml:"w:b,omitempty"` // BCs string `xml:"w:bCs,omitempty"` Color *Color `xml:"w:color"` Sz *Sz `xml:"w:sz"` SzCs *SzCs `xml:"w:szCs"` } //Bold 加粗 type Bold struct { XMLName xml.Name `xml:"w:b"` Val string `xml:"w:val,attr,omitempty"` } //RFonts <w:rFonts w:ascii="微软雅黑" w:eastAsia="微软雅黑" w:hAnsi="微软雅黑"/> type RFonts struct { XMLName xml.Name `xml:"w:rFonts"` ASCII string `xml:"w:ascii,attr,omitempty"` EastAsia string `xml:"w:eastAsia,attr,omitempty"` HAnsi string `xml:"w:hAnsi,attr,omitempty"` } //Color <w:color w:val="3A3838"/> type Color struct { XMLName xml.Name `xml:"w:color"` Val string `xml:"w:val,attr,omitempty"` } //Sz 字号大小,如:14号字为28,Word上的字号乘以2 <w:sz w:val="56"/> type Sz struct { XMLName xml.Name `xml:"w:sz"` Val int64 `xml:"w:val,attr,omitempty"` } //SzCs 未知 <w:szCs w:val="56"/> type SzCs struct { XMLName xml.Name `xml:"w:szCs"` Val int64 `xml:"w:val,attr,omitempty"` } // Spacing 行间距 // <w:spacing w:before="360" w:after="120" w:line="480" w:lineRule="auto" w:beforeAutospacing="0" w:afterAutospacing="0"/> // http://officeopenxml.com/WPspacing.php // Values are in twentieths of a point. A normal single-spaced paragaph has a w:line value of 240, or 12 points. // To specify units in hundreths of a line, use attributes 'afterLines'/'beforeLines'. // The space between adjacent paragraphs will be the greater of the 'line' spacing of each paragraph, the spacing // after the first paragraph, and the spacing before the second paragraph. So if the first paragraph specifies 240 // after and the second 80 before, and they are both single-spaced ('line' value of 240), then the space between // the paragraphs will be 240. // Specifies how the spacing between lines as specified in the line attribute is calculated. // Note: If the value of the lineRule attribute is atLeast or exactly, then the value of the line attribute is interpreted as 240th of a point. If the value of lineRule is auto, then the value of line is interpreted as 240th of a line. type Spacing struct { XMLName xml.Name `xml:"w:spacing"` Before int64 `xml:"w:before,attr,omitempty"` After int64 `xml:"w:after,attr,omitempty"` Line int64 `xml:"w:line,attr,omitempty"` LineRule LineRule `xml:"w:lineRule,attr,omitempty"` BeforeAutospacing int64 `xml:"w:beforeAutospacing"` AfterAutospacing int64 `xml:"w:afterAutospacing"` } //Jc 对齐方式 <w:jc w:val="left"/> type Jc struct { XMLName xml.Name `xml:"w:jc"` Val string `xml:"w:val,attr,omitempty"` } //T 文本 type T struct { XMLName xml.Name `xml:"w:t"` Space string `xml:"xml:space,attr,omitempty"` //"preserve" // Space string `xml:"w:space,attr,omitempty"` Text string `xml:",chardata"` } //Drawing 绘图 type Drawing struct { XMLName xml.Name `xml:"w:drawing"` Inline *Inline `xml:"wp:inline,omitempty"` //插入图片 Anchor *Anchor `xml:"wp:anchor,omitempty"` //插入形状 } //Inline 绘图边框 type Inline struct { XMLName xml.Name `xml:"wp:inline"` DistT int64 `xml:"distT,attr"` DistB int64 `xml:"distB,attr"` DistL int64 `xml:"distL,attr"` DistR int64 `xml:"distR,attr"` Extent *Extent `xml:"wp:extent"` EffectExtent *EffectExtent `xml:"wp:effectExtent"` DocPr *DocPr `xml:"wp:docPr"` CNvGraphicFramePr *CNvGraphicFramePr `xml:"wp:cNvGraphicFramePr"` Graphic *Graphic `xml:"a:graphic"` } //Extent 绘图范围 type Extent struct { XMLName xml.Name `xml:"wp:extent"` CX int64 `xml:"cx,attr"` CY int64 `xml:"cy,attr"` } //EffectExtent 绘图有效范围 type EffectExtent struct { XMLName xml.Name `xml:"wp:effectExtent"` L int64 `xml:"l,attr"` //左边距 T int64 `xml:"t,attr"` //上边距 R int64 `xml:"r,attr"` //右边距 B int64 `xml:"b,attr"` //下边距 } //WrapNone 不断行 type WrapNone struct { XMLName xml.Name `xml:"wp:wrapNone"` } //DocPr 文档属性,唯一就行,好像没鸟用 type DocPr struct { XMLName xml.Name `xml:"wp:docPr"` ID int64 `xml:"id,attr"` Name string `xml:"name,attr"` } //CNvGraphicFramePr 图形框架属性 type CNvGraphicFramePr struct { XMLName xml.Name `xml:"wp:cNvGraphicFramePr"` GraphicFrameLocks *GraphicFrameLocks `xml:"a:graphicFrameLocks"` } //GraphicFrameLocks 图形框架锁 type GraphicFrameLocks struct { XMLName xml.Name `xml:"a:graphicFrameLocks"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" NoChangeAspect int64 `xml:"noChangeAspect,attr"` } //Graphic 图形 type Graphic struct { XMLName xml.Name `xml:"a:graphic"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" GraphicData *GraphicData `xml:"a:graphicData"` } //GraphicData 图形数据 type GraphicData struct { XMLName xml.Name `xml:"a:graphicData"` //uri = "http://schemas.openxmlformats.org/drawingml/2006/picture" 插入图片 //uri = "http://schemas.microsoft.com/office/word/2010/wordprocessingShape" 插入形状 URI string `xml:"uri,attr"` Pic *Pic `xml:"pic:pic,omitempty"` //图片 Wsp *Wsp `xml:"wps:wsp,omitempty"` //形状 } //Pic 图形 type Pic struct { XMLName xml.Name `xml:"pic:pic"` NSPic string `xml:"xmlns:pic,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/picture" NvPicPr *NvPicPr `xml:"pic:nvPicPr"` BlipFill *BlipFill `xml:"pic:blipFill"` PicSpPr *PicSpPr `xml:"pic:spPr"` } //NvPicPr pic:nvPicPr type NvPicPr struct { XMLName xml.Name `xml:"pic:nvPicPr"` CNvPr *CNvPr `xml:"pic:cNvPr"` CNvPicPr string `xml:"pic:cNvPicPr"` } //CNvPr pic:cNvPr type CNvPr struct { XMLName xml.Name `xml:"pic:cNvPr"` ID int64 `xml:"id,attr"` Name string `xml:"name,attr"` } //BlipFill 填充 type BlipFill struct { XMLName xml.Name `xml:"pic:blipFill"` Blip *Blip `xml:"a:blip"` Stretch *Stretch `xml:"a:stretch"` } //Blip a:blip type Blip struct { XMLName xml.Name `xml:"a:blip"` Embed string `xml:"r:embed,attr"` //填充图片对应rel ID } //Stretch 拉伸 type Stretch struct { XMLName xml.Name `xml:"a:stretch"` FillRect string `xml:"a:fillRect"` } //PicSpPr pic:spPr type PicSpPr struct { XMLName xml.Name `xml:"pic:spPr"` Xfrm *Xfrm `xml:"a:xfrm"` PrstGeom *PrstGeom `xml:"a:prstGeom"` } //Xfrm a:xfrm type Xfrm struct { XMLName xml.Name `xml:"a:xfrm"` FlipV int64 `xml:"flipV,attr"` AOff *AOff `xml:"a:off"` AExt *AExt `xml:"a:ext"` } //AOff a:off type AOff struct { XMLName xml.Name `xml:"a:off"` X int64 `xml:"x,attr"` Y int64 `xml:"y,attr"` } //AExt "a:ext type AExt struct { XMLName xml.Name `xml:"a:ext"` CX int64 `xml:"cx,attr"` //图片宽度,36000为1毫米 CY int64 `xml:"cy,attr"` //图片高度,36000为1毫米 } //PrstGeom 几何形状,rect:矩形 type PrstGeom struct { XMLName xml.Name `xml:"a:prstGeom"` Prst string `xml:"prst,attr"` AVLst string `xml:"a:avLst"` } //Anchor 形状 type Anchor struct { XMLName xml.Name `xml:"wp:anchor"` DistT int64 `xml:"distT,attr"` DistB int64 `xml:"distB,attr"` DistL int64 `xml:"distL,attr"` DistR int64 `xml:"distR,attr"` SimplePos int64 `xml:"simplePos,attr"` //默认0 RelativeHeight int64 `xml:"relativeHeight,attr"` //默认0 BehindDoc int64 `xml:"behindDoc,attr"` //默认0 Locked int64 `xml:"locked,attr"` //默认0 LayoutInCell int64 `xml:"layoutInCell,attr"` //默认1 AllowOverlap int64 `xml:"allowOverlap,attr"` //默认1 AnchorID string `xml:"wp14:anchorId,attr"` //"69E31D9A" EditID string `xml:"wp14:editId,attr"` //"48F3AB62" WpSimplePos *SimplePos `xml:"wp:simplePos"` PositionH *PositionH `xml:"wp:positionH"` PositionV *PositionV `xml:"wp:positionV"` Extent *Extent `xml:"wp:extent"` EffectExtent *EffectExtent `xml:"wp:effectExtent"` WrapNone string `xml:"wp:wrapNone"` DocPr *DocPr `xml:"wp:docPr"` CNvGraphicFramePr *CNvGraphicFramePr `xml:"wp:cNvGraphicFramePr"` Graphic *Graphic `xml:"a:graphic"` } //SimplePos wp:simplePos type SimplePos struct { XMLName xml.Name `xml:"wp:simplePos"` X int64 `xml:"x,attr"` Y int64 `xml:"y,attr"` } //PositionH wp:positionH type PositionH struct { XMLName xml.Name `xml:"wp:positionH"` RelativeFrom string `xml:"relativeFrom,attr"` //column PosOffset *PosOffset `xml:"wp:posOffset"` } //PositionV wp:positionV type PositionV struct { XMLName xml.Name `xml:"wp:positionV"` RelativeFrom string `xml:"relativeFrom,attr"` //paragraph PosOffset *PosOffset `xml:"wp:posOffset"` } //PosOffset wp:posOffset type PosOffset struct { XMLName xml.Name `xml:"wp:posOffset"` Text string `xml:",chardata"` } //Wsp word形状数据,wps:wsp Word Processing Shape type Wsp struct { XMLName xml.Name `xml:"wps:wsp"` CNvCnPr string `xml:"wps:cNvCnPr"` WpsSpPr *WpsSpPr `xml:"wps:spPr"` BodyPr string `xml:"wps:bodyPr"` } //WpsSpPr wps:spPr type WpsSpPr struct { XMLName xml.Name `xml:"wps:spPr"` Xfrm *Xfrm `xml:"a:xfrm"` PrstGeom *PrstGeom `xml:"a:prstGeom"` Ln *Ln `xml:"a:ln"` } //Ln 线 type Ln struct { XMLName xml.Name `xml:"a:ln"` W int64 `xml:"w,attr"` //线宽 SolidFill *SolidFill `xml:"a:solidFill"` //填充 } //SolidFill 实心填充 type SolidFill struct { XMLName xml.Name `xml:"a:solidFill"` SrgbClr *SrgbClr `xml:"a:srgbClr"` } //SrgbClr 填充颜色 type SrgbClr struct { XMLName xml.Name `xml:"a:srgbClr"` Val string `xml:"val,attr"` //颜色 RGB } //WpsStyle 样式 type WpsStyle struct { XMLName xml.Name `xml:"wps:style"` LnRef *LnRef `xml:"a:lnRef"` FillRef *FillRef `xml:"a:fillRef"` EffectRef *EffectRef `xml:"a:effectRef"` FontRef *FontRef `xml:"a:fontRef"` } //LnRef a:lnRef type LnRef struct { XMLName xml.Name `xml:"a:lnRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //FillRef a:fillRef type FillRef struct { XMLName xml.Name `xml:"a:fillRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //EffectRef a:effectRef type EffectRef struct { XMLName xml.Name `xml:"a:effectRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //FontRef a:fontRef type FontRef struct { XMLName xml.Name `xml:"a:fontRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //SchemeClr a:schemeClr type SchemeClr struct { XMLName xml.Name `xml:"a:schemeClr"` Val string `xml:"val,attr"` } //Relationship 文档映射关系 type Relationship struct { XMLName xml.Name `xml:"Relationship"` ID string `xml:"Id,attr"` //rId9 Target string `xml:"Target,attr"` //media/image2.png Type string `xml:"Type,attr"` //"http://schemas.openxmlformats.org/officeDocument/2006/relationships/image" } //DocumentRels 文档映射关系 type DocumentRels struct { XMLName xml.Name `xml:"Relationships"` Relationships []*Relationship `xml:"Relationship"` }
xml
identifier_body
model.go
package word import "encoding/xml" //RelationshipTypeImage 图片文档映射关系类型 const RelationshipTypeImage = "http://schemas.openxmlformats.org/officeDocument/2006/relationships/image" //PrefixRID rId前缀 const PrefixRID = "rId" //W word文档 type W struct { XMLName xml.Name `xml:"w:document"` Wpc string `xml:"xmlns:wpc,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingCanvas" MC string `xml:"xmlns:mc,attr"` //"http://schemas.openxmlformats.org/markup-compatibility/2006" O string `xml:"xmlns:o,attr"` //"urn:schemas-microsoft-com:office:office" R string `xml:"xmlns:r,attr"` //"http://schemas.openxmlformats.org/officeDocument/2006/relationships" M string `xml:"xmlns:m,attr"` //"http://schemas.openxmlformats.org/officeDocument/2006/math" V string `xml:"xmlns:v,attr"` //"urn:schemas-microsoft-com:vml" WP14 string `xml:"xmlns:wp14,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingDrawing" WP string `xml:"xmlns:wp,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing" W10 string `xml:"xmlns:w10,attr"` //"urn:schemas-microsoft-com:office:word" W string `xml:"xmlns:w,attr"` //"http://schemas.openxmlformats.org/wordprocessingml/2006/main" W14 string `xml:"xmlns:w14,attr"` //"http://schemas.microsoft.com/office/word/2010/wordml W15 string `xml:"xmlns:w15,attr"` //"http://schemas.microsoft.com/office/word/2012/wordml" WPG string `xml:"xmlns:wpg,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingGroup" WPI string `xml:"xmlns:wpi,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingInk" WNE string `xml:"xmlns:wne,attr"` //"http://schemas.microsoft.com/office/word/2006/wordml" WPS string `xml:"xmlns:wps,attr"` //"http://schemas.microsoft.com/office/word/2010/wordprocessingShape" Ignorable string `xml:"mc:Ignorable,attr"` //"w14 w15 wp14" Body *Body `xml:"w:body"` } //SetBody 添加文档主体 func (w *W) SetBody(b *Body) { w.Body = b } //Body Word文档主体 type Body struct { Sects []Sect } //AddSect 增加章节 func (b *Body) AddSect(s Sect) { b.Sects = append(b.Sects, s) } //Sect 章节 type Sect interface { Sfunc() } //P 段落 type P struct { XMLName xml.Name `xml:"w:p"` RsidR string `xml:"w:rsidR,attr,omitempty"` RsidRDefault string `xml:"w:rsidRDefault,attr,omitempty"` PPr *PPr `xml:"w:pPr,omitempty"` Rs []Run `xml:"w:r,omitempty"` } //Sfunc P sect func (p *P) Sfunc() { } //Run 段落内容 type Run interface { Rfunc() } //PPr 段落属性 type PPr struct { XMLName xml.Name `xml:"w:pPr"` SnapToGrid *SnapToGrid `xml:"w:snapToGrid,omitempty"` Spacing *Spacing `xml:"w:spacing,omitempty"` Ind *Ind `xml:"w:ind,omitempty"` Jc *Jc `xml:"w:jc,omitempty"` RPr *RPr `xml:"w:rPr,omitempty"` } //Ind 首行:缩进、行高 type Ind struct { XMLName xml.Name `xml:"w:ind"` FirstLineChars int64 `xml:"w:firstLineChars,attr"` //首行缩进字符数,100是一个字符 LeftChars int64 `xml:"w:leftChars,attr"` //左缩进字符数,100是一个字符 RightChars int64 `xml:"w:rightChars,attr"` //右缩进字符数,100是一个字符 // FirstLine int64 `xml:"w:firstLine,attr"` } // <w:ind w:firstLineChars="200" w:firstLine="420" /> //R 块 type R struct { XMLName xml.Name `xml:"w:r"` RPr *RPr `xml:"w:rPr,omitempty"` //块属性 T *T `xml:"w:t,omitempty"` //文字 Drawing *Drawing `xml:"w:drawing,omitempty"` //图片 } //Rfunc run sect func (r *R) Rfunc() { } //SnapToGrid 对齐网格 type SnapToGrid struct { XMLName xml.Name `xml:"w:snapToGrid"` Val string `xml:"w:val,attr,omitempty"` } //RPr 文字块属性 type RPr struct { XMLName xml.Name `xml:"w:rPr"` RFonts *RFonts `xml:"w:rFonts,omitempty"` B *Bold `xml:"w:b,omitempty"` // BCs string `xml:"w:bCs,omitempty"` Color *Color `xml:"w:color"` Sz *Sz `xml:"w:sz"` SzCs *SzCs `xml:"w:szCs"` } //Bold 加粗 type Bold struct { XMLName xml.Name `xml:"w:b"` Val string `xml:"w:val,attr,omitempty"` } //RFonts <w:rFonts w:ascii="微软雅黑" w:eastAsia="微软雅黑" w:hAnsi="微软雅黑"/> type RFonts struct { XMLName xml.Name `xml:"w:rFonts"` ASCII string `xml:"w:ascii,attr,omitempty"` EastAsia string `xml:"w:eastAsia,attr,omitempty"` HAnsi string `xml:"w:hAnsi,attr,omitempty"` } //Color <w:color w:val="3A3838"/> type Color struct { XMLName xml.Name `xml:"w:color"` Val string `xml:"w:val,attr,omitempty"` } //Sz 字号大小,如:14号字为28,Word上的字号乘以2 <w:sz w:val="56"/> type Sz struct { XMLName xml.Name `xml:"w:sz"` Val int64 `xml:"w:val,attr,omitempty"` } //SzCs 未知 <w:szCs w:val="56"/> type SzCs struct { XMLName xml.Name `xml:"w:szCs"` Val int64 `xml:"w:val,attr,omitempty"` } // Spacing 行间距 // <w:spacing w:before="360" w:after="120" w:line="480" w:lineRule="auto" w:beforeAutospacing="0" w:afterAutospacing="0"/> // http://officeopenxml.com/WPspacing.php // Values are in twentieths of a point. A normal single-spaced paragaph has a w:line value of 240, or 12 points. // To specify units in hundreths of a line, use attributes 'afterLines'/'beforeLines'. // The space between adjacent paragraphs will be the greater of the 'line' spacing of each paragraph, the spacing // after the first paragraph, and the spacing before the second paragraph. So if the first paragraph specifies 240 // after and the second 80 before, and they are both single-spaced ('line' value of 240), then the space between // the paragraphs will be 240. // Specifies how the spacing between lines as specified in the line attribute is calculated. // Note: If the value of the lineRule attribute is atLeast or exactly, then the value of the line attribute is interpreted as 240th of a point. If the value of lineRule is auto, then the value of line is interpreted as 240th of a line. type Spacing struct { XMLName xml.Name `xml:"w:spacing"` Before int64 `xml:"w:before,attr,omitempty"` After int64 `xml:"w:after,attr,omitempty"` Line int64 `xml:"w:line,attr,omitempty"` LineRule LineRule `xml:"w:lineRule,attr,omitempty"` BeforeAutospacing int64 `xml:"w:beforeAutospacing"` AfterAutospacing int64 `xml:"w:afterAutospacing"` }
XMLName xml.Name `xml:"w:jc"` Val string `xml:"w:val,attr,omitempty"` } //T 文本 type T struct { XMLName xml.Name `xml:"w:t"` Space string `xml:"xml:space,attr,omitempty"` //"preserve" // Space string `xml:"w:space,attr,omitempty"` Text string `xml:",chardata"` } //Drawing 绘图 type Drawing struct { XMLName xml.Name `xml:"w:drawing"` Inline *Inline `xml:"wp:inline,omitempty"` //插入图片 Anchor *Anchor `xml:"wp:anchor,omitempty"` //插入形状 } //Inline 绘图边框 type Inline struct { XMLName xml.Name `xml:"wp:inline"` DistT int64 `xml:"distT,attr"` DistB int64 `xml:"distB,attr"` DistL int64 `xml:"distL,attr"` DistR int64 `xml:"distR,attr"` Extent *Extent `xml:"wp:extent"` EffectExtent *EffectExtent `xml:"wp:effectExtent"` DocPr *DocPr `xml:"wp:docPr"` CNvGraphicFramePr *CNvGraphicFramePr `xml:"wp:cNvGraphicFramePr"` Graphic *Graphic `xml:"a:graphic"` } //Extent 绘图范围 type Extent struct { XMLName xml.Name `xml:"wp:extent"` CX int64 `xml:"cx,attr"` CY int64 `xml:"cy,attr"` } //EffectExtent 绘图有效范围 type EffectExtent struct { XMLName xml.Name `xml:"wp:effectExtent"` L int64 `xml:"l,attr"` //左边距 T int64 `xml:"t,attr"` //上边距 R int64 `xml:"r,attr"` //右边距 B int64 `xml:"b,attr"` //下边距 } //WrapNone 不断行 type WrapNone struct { XMLName xml.Name `xml:"wp:wrapNone"` } //DocPr 文档属性,唯一就行,好像没鸟用 type DocPr struct { XMLName xml.Name `xml:"wp:docPr"` ID int64 `xml:"id,attr"` Name string `xml:"name,attr"` } //CNvGraphicFramePr 图形框架属性 type CNvGraphicFramePr struct { XMLName xml.Name `xml:"wp:cNvGraphicFramePr"` GraphicFrameLocks *GraphicFrameLocks `xml:"a:graphicFrameLocks"` } //GraphicFrameLocks 图形框架锁 type GraphicFrameLocks struct { XMLName xml.Name `xml:"a:graphicFrameLocks"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" NoChangeAspect int64 `xml:"noChangeAspect,attr"` } //Graphic 图形 type Graphic struct { XMLName xml.Name `xml:"a:graphic"` A string `xml:"xmlns:a,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/main" GraphicData *GraphicData `xml:"a:graphicData"` } //GraphicData 图形数据 type GraphicData struct { XMLName xml.Name `xml:"a:graphicData"` //uri = "http://schemas.openxmlformats.org/drawingml/2006/picture" 插入图片 //uri = "http://schemas.microsoft.com/office/word/2010/wordprocessingShape" 插入形状 URI string `xml:"uri,attr"` Pic *Pic `xml:"pic:pic,omitempty"` //图片 Wsp *Wsp `xml:"wps:wsp,omitempty"` //形状 } //Pic 图形 type Pic struct { XMLName xml.Name `xml:"pic:pic"` NSPic string `xml:"xmlns:pic,attr"` //"http://schemas.openxmlformats.org/drawingml/2006/picture" NvPicPr *NvPicPr `xml:"pic:nvPicPr"` BlipFill *BlipFill `xml:"pic:blipFill"` PicSpPr *PicSpPr `xml:"pic:spPr"` } //NvPicPr pic:nvPicPr type NvPicPr struct { XMLName xml.Name `xml:"pic:nvPicPr"` CNvPr *CNvPr `xml:"pic:cNvPr"` CNvPicPr string `xml:"pic:cNvPicPr"` } //CNvPr pic:cNvPr type CNvPr struct { XMLName xml.Name `xml:"pic:cNvPr"` ID int64 `xml:"id,attr"` Name string `xml:"name,attr"` } //BlipFill 填充 type BlipFill struct { XMLName xml.Name `xml:"pic:blipFill"` Blip *Blip `xml:"a:blip"` Stretch *Stretch `xml:"a:stretch"` } //Blip a:blip type Blip struct { XMLName xml.Name `xml:"a:blip"` Embed string `xml:"r:embed,attr"` //填充图片对应rel ID } //Stretch 拉伸 type Stretch struct { XMLName xml.Name `xml:"a:stretch"` FillRect string `xml:"a:fillRect"` } //PicSpPr pic:spPr type PicSpPr struct { XMLName xml.Name `xml:"pic:spPr"` Xfrm *Xfrm `xml:"a:xfrm"` PrstGeom *PrstGeom `xml:"a:prstGeom"` } //Xfrm a:xfrm type Xfrm struct { XMLName xml.Name `xml:"a:xfrm"` FlipV int64 `xml:"flipV,attr"` AOff *AOff `xml:"a:off"` AExt *AExt `xml:"a:ext"` } //AOff a:off type AOff struct { XMLName xml.Name `xml:"a:off"` X int64 `xml:"x,attr"` Y int64 `xml:"y,attr"` } //AExt "a:ext type AExt struct { XMLName xml.Name `xml:"a:ext"` CX int64 `xml:"cx,attr"` //图片宽度,36000为1毫米 CY int64 `xml:"cy,attr"` //图片高度,36000为1毫米 } //PrstGeom 几何形状,rect:矩形 type PrstGeom struct { XMLName xml.Name `xml:"a:prstGeom"` Prst string `xml:"prst,attr"` AVLst string `xml:"a:avLst"` } //Anchor 形状 type Anchor struct { XMLName xml.Name `xml:"wp:anchor"` DistT int64 `xml:"distT,attr"` DistB int64 `xml:"distB,attr"` DistL int64 `xml:"distL,attr"` DistR int64 `xml:"distR,attr"` SimplePos int64 `xml:"simplePos,attr"` //默认0 RelativeHeight int64 `xml:"relativeHeight,attr"` //默认0 BehindDoc int64 `xml:"behindDoc,attr"` //默认0 Locked int64 `xml:"locked,attr"` //默认0 LayoutInCell int64 `xml:"layoutInCell,attr"` //默认1 AllowOverlap int64 `xml:"allowOverlap,attr"` //默认1 AnchorID string `xml:"wp14:anchorId,attr"` //"69E31D9A" EditID string `xml:"wp14:editId,attr"` //"48F3AB62" WpSimplePos *SimplePos `xml:"wp:simplePos"` PositionH *PositionH `xml:"wp:positionH"` PositionV *PositionV `xml:"wp:positionV"` Extent *Extent `xml:"wp:extent"` EffectExtent *EffectExtent `xml:"wp:effectExtent"` WrapNone string `xml:"wp:wrapNone"` DocPr *DocPr `xml:"wp:docPr"` CNvGraphicFramePr *CNvGraphicFramePr `xml:"wp:cNvGraphicFramePr"` Graphic *Graphic `xml:"a:graphic"` } //SimplePos wp:simplePos type SimplePos struct { XMLName xml.Name `xml:"wp:simplePos"` X int64 `xml:"x,attr"` Y int64 `xml:"y,attr"` } //PositionH wp:positionH type PositionH struct { XMLName xml.Name `xml:"wp:positionH"` RelativeFrom string `xml:"relativeFrom,attr"` //column PosOffset *PosOffset `xml:"wp:posOffset"` } //PositionV wp:positionV type PositionV struct { XMLName xml.Name `xml:"wp:positionV"` RelativeFrom string `xml:"relativeFrom,attr"` //paragraph PosOffset *PosOffset `xml:"wp:posOffset"` } //PosOffset wp:posOffset type PosOffset struct { XMLName xml.Name `xml:"wp:posOffset"` Text string `xml:",chardata"` } //Wsp word形状数据,wps:wsp Word Processing Shape type Wsp struct { XMLName xml.Name `xml:"wps:wsp"` CNvCnPr string `xml:"wps:cNvCnPr"` WpsSpPr *WpsSpPr `xml:"wps:spPr"` BodyPr string `xml:"wps:bodyPr"` } //WpsSpPr wps:spPr type WpsSpPr struct { XMLName xml.Name `xml:"wps:spPr"` Xfrm *Xfrm `xml:"a:xfrm"` PrstGeom *PrstGeom `xml:"a:prstGeom"` Ln *Ln `xml:"a:ln"` } //Ln 线 type Ln struct { XMLName xml.Name `xml:"a:ln"` W int64 `xml:"w,attr"` //线宽 SolidFill *SolidFill `xml:"a:solidFill"` //填充 } //SolidFill 实心填充 type SolidFill struct { XMLName xml.Name `xml:"a:solidFill"` SrgbClr *SrgbClr `xml:"a:srgbClr"` } //SrgbClr 填充颜色 type SrgbClr struct { XMLName xml.Name `xml:"a:srgbClr"` Val string `xml:"val,attr"` //颜色 RGB } //WpsStyle 样式 type WpsStyle struct { XMLName xml.Name `xml:"wps:style"` LnRef *LnRef `xml:"a:lnRef"` FillRef *FillRef `xml:"a:fillRef"` EffectRef *EffectRef `xml:"a:effectRef"` FontRef *FontRef `xml:"a:fontRef"` } //LnRef a:lnRef type LnRef struct { XMLName xml.Name `xml:"a:lnRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //FillRef a:fillRef type FillRef struct { XMLName xml.Name `xml:"a:fillRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //EffectRef a:effectRef type EffectRef struct { XMLName xml.Name `xml:"a:effectRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //FontRef a:fontRef type FontRef struct { XMLName xml.Name `xml:"a:fontRef"` IDX int64 `xml:"idx,attr"` SchemeClr *SchemeClr `xml:"a:schemeClr"` } //SchemeClr a:schemeClr type SchemeClr struct { XMLName xml.Name `xml:"a:schemeClr"` Val string `xml:"val,attr"` } //Relationship 文档映射关系 type Relationship struct { XMLName xml.Name `xml:"Relationship"` ID string `xml:"Id,attr"` //rId9 Target string `xml:"Target,attr"` //media/image2.png Type string `xml:"Type,attr"` //"http://schemas.openxmlformats.org/officeDocument/2006/relationships/image" } //DocumentRels 文档映射关系 type DocumentRels struct { XMLName xml.Name `xml:"Relationships"` Relationships []*Relationship `xml:"Relationship"` }
//Jc 对齐方式 <w:jc w:val="left"/> type Jc struct {
random_line_split
minesweeper.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Feb 20 11:15:50 2017 @author: yaojie """ import random import sys from copy import deepcopy class Minesweeper(object): # just for readability WIN = True IS_A_BOMB = True NOT_A_BOMB = False # Unicode just to look pretty FLAG = u'\u2691' BOMB = u'\U0001F4A3' EXPLOSION = u'\U0001F4A5' letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] def __init__(self, height, width, mines): """initializes the Minesweeper instance with a width, height, and the number of mines. Sets up a default game table, generates random mine locations and updates another table for the solution.""" self.x = int(width) self.y = int(height) self.table_state = [ ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)] self.mine_locations = self.generate_mines(int(mines)) self.final_table = self.generate_answer() @staticmethod def print_table(table, exploded_at=[-1, -1]): """prints the table, regardless whether it's a game state table or the answer table.""" # color codes just to look pretty NORMAL = '\33[10m' BLUE_START = '\33[104m' RED_START = '\33[31m' PURPLE_START = '\33[35m' GREEN_START = '\33[92m' ORANGE_START = '\33[93m' END = '\033[0m' s = ' %s' % BLUE_START # print number headers along x-axis for i in range(0, width): s += " %s" % i if i < 10: s += " " * 2 else: s += " " s += "%s\n" % END # print letters for y-axis, + the relevant values in each coordinate # depending on table. for y in range(0, height): s += "%s %s %s \t" % (BLUE_START, Minesweeper.letters[y], END) for x in range(0, width): value = table[y][x] if value == "0": s += "%s%s%s" % (NORMAL, value, END) elif value == "1": s += "%s%s%s" % (GREEN_START, value, END) elif value == "2": s += "%s%s%s" % (ORANGE_START, value, END) elif value == "3": s += "%s%s%s" % (RED_START, value, END) elif value == "4" or value == "5" or value == "6" or value == "7" or value == "8": s += "%s%s%s" % (PURPLE_START, value, END) # special elif value == "-": s += "%s%s%s" % (NORMAL, value, END) elif value == Minesweeper.BOMB: if y == exploded_at[0] and x == exploded_at[1]: # Make the bomb at the casualty site explode! s += "%s%s%s" % (RED_START, Minesweeper.EXPLOSION, END) else: # show normal bomb s += "%s%s%s" % (RED_START, value, END) elif value == Minesweeper.FLAG: s += "%s%s%s" % (RED_START, value, END) s += " " * 3 s += "\n" # use tabbing to space them nicely print s.expandtabs(3) def generate_mines(self, number): """generate a list of viable coordinates for mines, and randomly choose them.""" mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates.
return mine_locations def generate_answer(self): ft = deepcopy(self.table_state) for x in range(0, self.x): for y in range(0, self.y): # get the number or mine with neighbours ft[y][x] = self.get_neighbour(y, x) return ft def get_neighbour(self, y, x): """populate answer table with numbers and mines""" if [y, x] in self.mine_locations: return Minesweeper.BOMB count = 0 # (x-1, y-1), (x, y-1), (x+1, y-1), # (x-1, y), (x, y), (x+1, y), # (x-1, y+1), (x, y+1), (x+1, y+1) for xe in range(x - 1, x + 2): for ye in range(y - 1, y + 2): if [ye, xe] in self.mine_locations: count += 1 return str(count) def flags_nearby(self, y, x): """ gets number of flags nearby """ count = 0 l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: continue if self.table_state[ye][xe] == Minesweeper.FLAG: count += 1 return str(count) def special_open_neighbours(self, y, x): """Open neighbours if the flag number matches the count.""" if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB def open_neighbours(self, y, x): """Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.""" if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe) def check_status(self): count = 0 flag_count = 0 for i in [item for sublist in self.table_state for item in sublist]: if i == '-': count += 1 if i == Minesweeper.FLAG: count += 1 flag_count += 1 print "%d tiles remaining. (%d flagged)" % (count - flag_count, flag_count) return count == len(self.mine_locations) def flag(self, y, x): """set a flag to the desired coordinates.""" if self.table_state[y][x] == '-': self.table_state[y][x] = Minesweeper.FLAG Minesweeper.print_table(self.table_state) def tease_user(self, y, x): """come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.""" self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state) def show_answer_board(self, coords): """prints the answer table with print_table.""" Minesweeper.print_table(self.final_table, coords) def open_tile(self, y, x): """opens a tile at the respective coordinates on the table_state list.""" # Find the letter index and convert into a y-coordinate. # Checks if it is a mine if [y, x] in self.mine_locations: # explode self.show_answer_board([y, x]) print "Boomz." return Minesweeper.IS_A_BOMB else: # strip(?)tease to the user (oh damn sexy numbers) self.tease_user(y, x) return Minesweeper.NOT_A_BOMB # initialize options print "Options: " print "* = letter from A to P, # = number from 0 to 29" print "Opening a tile: o*#" print "Flag a tile: f*#" print "Left and right click a tile to open neighbouring tiles when the number matches the number of flags: d*#" print "exit: exit" default_height = 15 default_width = 15 default_mines = 20 height = raw_input("Height (1 to 26), defaults to %d: " % default_height) or default_height width = raw_input("Width (1 to 26), defaults to %d: " % default_width) or default_width mines = raw_input("Number of mines, defaults to %d: " % default_mines) or default_mines print '' ms = Minesweeper(height, width, mines) Minesweeper.print_table(ms.table_state) # listen to commands by user. while True: command = raw_input("Command: ") try: if command == "exit": break elif 'd' == command[0]: # open neighbour of selected coordinate if flag count matches # number if ms.special_open_neighbours(Minesweeper.letters.index(command[1]), int(command[2:])) == Minesweeper.IS_A_BOMB: break elif 'o' == command[0]: # open a tile # ms.open_tile checks whether it's a bomb if ms.open_tile(Minesweeper.letters.index(command[1]), int(command[2:])) == Minesweeper.IS_A_BOMB: break elif 'f' == command[0]: ms.flag(Minesweeper.letters.index(command[1]), int(command[2:])) if ms.check_status() == Minesweeper.WIN: ms.show_answer_board([-1, -1]) print "You win!" break except: print sys.exc_info() print "Whoops, try again!"
choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1
conditional_block
minesweeper.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Feb 20 11:15:50 2017 @author: yaojie """ import random import sys from copy import deepcopy class Minesweeper(object): # just for readability WIN = True IS_A_BOMB = True NOT_A_BOMB = False # Unicode just to look pretty FLAG = u'\u2691' BOMB = u'\U0001F4A3' EXPLOSION = u'\U0001F4A5' letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] def __init__(self, height, width, mines): """initializes the Minesweeper instance with a width, height, and the number of mines. Sets up a default game table, generates random mine locations and updates another table for the solution.""" self.x = int(width) self.y = int(height) self.table_state = [ ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)] self.mine_locations = self.generate_mines(int(mines)) self.final_table = self.generate_answer() @staticmethod def print_table(table, exploded_at=[-1, -1]): """prints the table, regardless whether it's a game state table or the answer table.""" # color codes just to look pretty NORMAL = '\33[10m' BLUE_START = '\33[104m' RED_START = '\33[31m' PURPLE_START = '\33[35m' GREEN_START = '\33[92m' ORANGE_START = '\33[93m' END = '\033[0m' s = ' %s' % BLUE_START # print number headers along x-axis for i in range(0, width): s += " %s" % i if i < 10: s += " " * 2 else: s += " " s += "%s\n" % END # print letters for y-axis, + the relevant values in each coordinate # depending on table. for y in range(0, height): s += "%s %s %s \t" % (BLUE_START, Minesweeper.letters[y], END) for x in range(0, width): value = table[y][x] if value == "0": s += "%s%s%s" % (NORMAL, value, END) elif value == "1": s += "%s%s%s" % (GREEN_START, value, END) elif value == "2": s += "%s%s%s" % (ORANGE_START, value, END) elif value == "3": s += "%s%s%s" % (RED_START, value, END) elif value == "4" or value == "5" or value == "6" or value == "7" or value == "8": s += "%s%s%s" % (PURPLE_START, value, END) # special elif value == "-": s += "%s%s%s" % (NORMAL, value, END) elif value == Minesweeper.BOMB: if y == exploded_at[0] and x == exploded_at[1]: # Make the bomb at the casualty site explode! s += "%s%s%s" % (RED_START, Minesweeper.EXPLOSION, END) else: # show normal bomb s += "%s%s%s" % (RED_START, value, END) elif value == Minesweeper.FLAG: s += "%s%s%s" % (RED_START, value, END) s += " " * 3 s += "\n" # use tabbing to space them nicely print s.expandtabs(3) def generate_mines(self, number): """generate a list of viable coordinates for mines, and randomly choose them.""" mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates. choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1 return mine_locations def generate_answer(self): ft = deepcopy(self.table_state) for x in range(0, self.x): for y in range(0, self.y): # get the number or mine with neighbours ft[y][x] = self.get_neighbour(y, x) return ft def get_neighbour(self, y, x): """populate answer table with numbers and mines""" if [y, x] in self.mine_locations: return Minesweeper.BOMB count = 0 # (x-1, y-1), (x, y-1), (x+1, y-1), # (x-1, y), (x, y), (x+1, y), # (x-1, y+1), (x, y+1), (x+1, y+1) for xe in range(x - 1, x + 2): for ye in range(y - 1, y + 2): if [ye, xe] in self.mine_locations: count += 1 return str(count) def flags_nearby(self, y, x): """ gets number of flags nearby """ count = 0 l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: continue if self.table_state[ye][xe] == Minesweeper.FLAG: count += 1 return str(count) def special_open_neighbours(self, y, x): """Open neighbours if the flag number matches the count.""" if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range(
if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB def open_neighbours(self, y, x): """Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.""" if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe) def check_status(self): count = 0 flag_count = 0 for i in [item for sublist in self.table_state for item in sublist]: if i == '-': count += 1 if i == Minesweeper.FLAG: count += 1 flag_count += 1 print "%d tiles remaining. (%d flagged)" % (count - flag_count, flag_count) return count == len(self.mine_locations) def flag(self, y, x): """set a flag to the desired coordinates.""" if self.table_state[y][x] == '-': self.table_state[y][x] = Minesweeper.FLAG Minesweeper.print_table(self.table_state) def tease_user(self, y, x): """come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.""" self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state) def show_answer_board(self, coords): """prints the answer table with print_table.""" Minesweeper.print_table(self.final_table, coords) def open_tile(self, y, x): """opens a tile at the respective coordinates on the table_state list.""" # Find the letter index and convert into a y-coordinate. # Checks if it is a mine if [y, x] in self.mine_locations: # explode self.show_answer_board([y, x]) print "Boomz." return Minesweeper.IS_A_BOMB else: # strip(?)tease to the user (oh damn sexy numbers) self.tease_user(y, x) return Minesweeper.NOT_A_BOMB # initialize options print "Options: " print "* = letter from A to P, # = number from 0 to 29" print "Opening a tile: o*#" print "Flag a tile: f*#" print "Left and right click a tile to open neighbouring tiles when the number matches the number of flags: d*#" print "exit: exit" default_height = 15 default_width = 15 default_mines = 20 height = raw_input("Height (1 to 26), defaults to %d: " % default_height) or default_height width = raw_input("Width (1 to 26), defaults to %d: " % default_width) or default_width mines = raw_input("Number of mines, defaults to %d: " % default_mines) or default_mines print '' ms = Minesweeper(height, width, mines) Minesweeper.print_table(ms.table_state) # listen to commands by user. while True: command = raw_input("Command: ") try: if command == "exit": break elif 'd' == command[0]: # open neighbour of selected coordinate if flag count matches # number if ms.special_open_neighbours(Minesweeper.letters.index(command[1]), int(command[2:])) == Minesweeper.IS_A_BOMB: break elif 'o' == command[0]: # open a tile # ms.open_tile checks whether it's a bomb if ms.open_tile(Minesweeper.letters.index(command[1]), int(command[2:])) == Minesweeper.IS_A_BOMB: break elif 'f' == command[0]: ms.flag(Minesweeper.letters.index(command[1]), int(command[2:])) if ms.check_status() == Minesweeper.WIN: ms.show_answer_board([-1, -1]) print "You win!" break except: print sys.exc_info() print "Whoops, try again!"
x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged
random_line_split
minesweeper.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Feb 20 11:15:50 2017 @author: yaojie """ import random import sys from copy import deepcopy class Minesweeper(object): # just for readability WIN = True IS_A_BOMB = True NOT_A_BOMB = False # Unicode just to look pretty FLAG = u'\u2691' BOMB = u'\U0001F4A3' EXPLOSION = u'\U0001F4A5' letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] def
(self, height, width, mines): """initializes the Minesweeper instance with a width, height, and the number of mines. Sets up a default game table, generates random mine locations and updates another table for the solution.""" self.x = int(width) self.y = int(height) self.table_state = [ ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)] self.mine_locations = self.generate_mines(int(mines)) self.final_table = self.generate_answer() @staticmethod def print_table(table, exploded_at=[-1, -1]): """prints the table, regardless whether it's a game state table or the answer table.""" # color codes just to look pretty NORMAL = '\33[10m' BLUE_START = '\33[104m' RED_START = '\33[31m' PURPLE_START = '\33[35m' GREEN_START = '\33[92m' ORANGE_START = '\33[93m' END = '\033[0m' s = ' %s' % BLUE_START # print number headers along x-axis for i in range(0, width): s += " %s" % i if i < 10: s += " " * 2 else: s += " " s += "%s\n" % END # print letters for y-axis, + the relevant values in each coordinate # depending on table. for y in range(0, height): s += "%s %s %s \t" % (BLUE_START, Minesweeper.letters[y], END) for x in range(0, width): value = table[y][x] if value == "0": s += "%s%s%s" % (NORMAL, value, END) elif value == "1": s += "%s%s%s" % (GREEN_START, value, END) elif value == "2": s += "%s%s%s" % (ORANGE_START, value, END) elif value == "3": s += "%s%s%s" % (RED_START, value, END) elif value == "4" or value == "5" or value == "6" or value == "7" or value == "8": s += "%s%s%s" % (PURPLE_START, value, END) # special elif value == "-": s += "%s%s%s" % (NORMAL, value, END) elif value == Minesweeper.BOMB: if y == exploded_at[0] and x == exploded_at[1]: # Make the bomb at the casualty site explode! s += "%s%s%s" % (RED_START, Minesweeper.EXPLOSION, END) else: # show normal bomb s += "%s%s%s" % (RED_START, value, END) elif value == Minesweeper.FLAG: s += "%s%s%s" % (RED_START, value, END) s += " " * 3 s += "\n" # use tabbing to space them nicely print s.expandtabs(3) def generate_mines(self, number): """generate a list of viable coordinates for mines, and randomly choose them.""" mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates. choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1 return mine_locations def generate_answer(self): ft = deepcopy(self.table_state) for x in range(0, self.x): for y in range(0, self.y): # get the number or mine with neighbours ft[y][x] = self.get_neighbour(y, x) return ft def get_neighbour(self, y, x): """populate answer table with numbers and mines""" if [y, x] in self.mine_locations: return Minesweeper.BOMB count = 0 # (x-1, y-1), (x, y-1), (x+1, y-1), # (x-1, y), (x, y), (x+1, y), # (x-1, y+1), (x, y+1), (x+1, y+1) for xe in range(x - 1, x + 2): for ye in range(y - 1, y + 2): if [ye, xe] in self.mine_locations: count += 1 return str(count) def flags_nearby(self, y, x): """ gets number of flags nearby """ count = 0 l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: continue if self.table_state[ye][xe] == Minesweeper.FLAG: count += 1 return str(count) def special_open_neighbours(self, y, x): """Open neighbours if the flag number matches the count.""" if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB def open_neighbours(self, y, x): """Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.""" if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe) def check_status(self): count = 0 flag_count = 0 for i in [item for sublist in self.table_state for item in sublist]: if i == '-': count += 1 if i == Minesweeper.FLAG: count += 1 flag_count += 1 print "%d tiles remaining. (%d flagged)" % (count - flag_count, flag_count) return count == len(self.mine_locations) def flag(self, y, x): """set a flag to the desired coordinates.""" if self.table_state[y][x] == '-': self.table_state[y][x] = Minesweeper.FLAG Minesweeper.print_table(self.table_state) def tease_user(self, y, x): """come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.""" self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state) def show_answer_board(self, coords): """prints the answer table with print_table.""" Minesweeper.print_table(self.final_table, coords) def open_tile(self, y, x): """opens a tile at the respective coordinates on the table_state list.""" # Find the letter index and convert into a y-coordinate. # Checks if it is a mine if [y, x] in self.mine_locations: # explode self.show_answer_board([y, x]) print "Boomz." return Minesweeper.IS_A_BOMB else: # strip(?)tease to the user (oh damn sexy numbers) self.tease_user(y, x) return Minesweeper.NOT_A_BOMB # initialize options print "Options: " print "* = letter from A to P, # = number from 0 to 29" print "Opening a tile: o*#" print "Flag a tile: f*#" print "Left and right click a tile to open neighbouring tiles when the number matches the number of flags: d*#" print "exit: exit" default_height = 15 default_width = 15 default_mines = 20 height = raw_input("Height (1 to 26), defaults to %d: " % default_height) or default_height width = raw_input("Width (1 to 26), defaults to %d: " % default_width) or default_width mines = raw_input("Number of mines, defaults to %d: " % default_mines) or default_mines print '' ms = Minesweeper(height, width, mines) Minesweeper.print_table(ms.table_state) # listen to commands by user. while True: command = raw_input("Command: ") try: if command == "exit": break elif 'd' == command[0]: # open neighbour of selected coordinate if flag count matches # number if ms.special_open_neighbours(Minesweeper.letters.index(command[1]), int(command[2:])) == Minesweeper.IS_A_BOMB: break elif 'o' == command[0]: # open a tile # ms.open_tile checks whether it's a bomb if ms.open_tile(Minesweeper.letters.index(command[1]), int(command[2:])) == Minesweeper.IS_A_BOMB: break elif 'f' == command[0]: ms.flag(Minesweeper.letters.index(command[1]), int(command[2:])) if ms.check_status() == Minesweeper.WIN: ms.show_answer_board([-1, -1]) print "You win!" break except: print sys.exc_info() print "Whoops, try again!"
__init__
identifier_name
minesweeper.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Feb 20 11:15:50 2017 @author: yaojie """ import random import sys from copy import deepcopy class Minesweeper(object): # just for readability WIN = True IS_A_BOMB = True NOT_A_BOMB = False # Unicode just to look pretty FLAG = u'\u2691' BOMB = u'\U0001F4A3' EXPLOSION = u'\U0001F4A5' letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] def __init__(self, height, width, mines): """initializes the Minesweeper instance with a width, height, and the number of mines. Sets up a default game table, generates random mine locations and updates another table for the solution.""" self.x = int(width) self.y = int(height) self.table_state = [ ['-' for i in xrange(0, self.x)] for j in xrange(0, self.y)] self.mine_locations = self.generate_mines(int(mines)) self.final_table = self.generate_answer() @staticmethod def print_table(table, exploded_at=[-1, -1]): """prints the table, regardless whether it's a game state table or the answer table.""" # color codes just to look pretty NORMAL = '\33[10m' BLUE_START = '\33[104m' RED_START = '\33[31m' PURPLE_START = '\33[35m' GREEN_START = '\33[92m' ORANGE_START = '\33[93m' END = '\033[0m' s = ' %s' % BLUE_START # print number headers along x-axis for i in range(0, width): s += " %s" % i if i < 10: s += " " * 2 else: s += " " s += "%s\n" % END # print letters for y-axis, + the relevant values in each coordinate # depending on table. for y in range(0, height): s += "%s %s %s \t" % (BLUE_START, Minesweeper.letters[y], END) for x in range(0, width): value = table[y][x] if value == "0": s += "%s%s%s" % (NORMAL, value, END) elif value == "1": s += "%s%s%s" % (GREEN_START, value, END) elif value == "2": s += "%s%s%s" % (ORANGE_START, value, END) elif value == "3": s += "%s%s%s" % (RED_START, value, END) elif value == "4" or value == "5" or value == "6" or value == "7" or value == "8": s += "%s%s%s" % (PURPLE_START, value, END) # special elif value == "-": s += "%s%s%s" % (NORMAL, value, END) elif value == Minesweeper.BOMB: if y == exploded_at[0] and x == exploded_at[1]: # Make the bomb at the casualty site explode! s += "%s%s%s" % (RED_START, Minesweeper.EXPLOSION, END) else: # show normal bomb s += "%s%s%s" % (RED_START, value, END) elif value == Minesweeper.FLAG: s += "%s%s%s" % (RED_START, value, END) s += " " * 3 s += "\n" # use tabbing to space them nicely print s.expandtabs(3) def generate_mines(self, number): """generate a list of viable coordinates for mines, and randomly choose them.""" mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates. choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1 return mine_locations def generate_answer(self): ft = deepcopy(self.table_state) for x in range(0, self.x): for y in range(0, self.y): # get the number or mine with neighbours ft[y][x] = self.get_neighbour(y, x) return ft def get_neighbour(self, y, x):
def flags_nearby(self, y, x): """ gets number of flags nearby """ count = 0 l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: continue if self.table_state[ye][xe] == Minesweeper.FLAG: count += 1 return str(count) def special_open_neighbours(self, y, x): """Open neighbours if the flag number matches the count.""" if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB def open_neighbours(self, y, x): """Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.""" if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe) def check_status(self): count = 0 flag_count = 0 for i in [item for sublist in self.table_state for item in sublist]: if i == '-': count += 1 if i == Minesweeper.FLAG: count += 1 flag_count += 1 print "%d tiles remaining. (%d flagged)" % (count - flag_count, flag_count) return count == len(self.mine_locations) def flag(self, y, x): """set a flag to the desired coordinates.""" if self.table_state[y][x] == '-': self.table_state[y][x] = Minesweeper.FLAG Minesweeper.print_table(self.table_state) def tease_user(self, y, x): """come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.""" self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state) def show_answer_board(self, coords): """prints the answer table with print_table.""" Minesweeper.print_table(self.final_table, coords) def open_tile(self, y, x): """opens a tile at the respective coordinates on the table_state list.""" # Find the letter index and convert into a y-coordinate. # Checks if it is a mine if [y, x] in self.mine_locations: # explode self.show_answer_board([y, x]) print "Boomz." return Minesweeper.IS_A_BOMB else: # strip(?)tease to the user (oh damn sexy numbers) self.tease_user(y, x) return Minesweeper.NOT_A_BOMB # initialize options print "Options: " print "* = letter from A to P, # = number from 0 to 29" print "Opening a tile: o*#" print "Flag a tile: f*#" print "Left and right click a tile to open neighbouring tiles when the number matches the number of flags: d*#" print "exit: exit" default_height = 15 default_width = 15 default_mines = 20 height = raw_input("Height (1 to 26), defaults to %d: " % default_height) or default_height width = raw_input("Width (1 to 26), defaults to %d: " % default_width) or default_width mines = raw_input("Number of mines, defaults to %d: " % default_mines) or default_mines print '' ms = Minesweeper(height, width, mines) Minesweeper.print_table(ms.table_state) # listen to commands by user. while True: command = raw_input("Command: ") try: if command == "exit": break elif 'd' == command[0]: # open neighbour of selected coordinate if flag count matches # number if ms.special_open_neighbours(Minesweeper.letters.index(command[1]), int(command[2:])) == Minesweeper.IS_A_BOMB: break elif 'o' == command[0]: # open a tile # ms.open_tile checks whether it's a bomb if ms.open_tile(Minesweeper.letters.index(command[1]), int(command[2:])) == Minesweeper.IS_A_BOMB: break elif 'f' == command[0]: ms.flag(Minesweeper.letters.index(command[1]), int(command[2:])) if ms.check_status() == Minesweeper.WIN: ms.show_answer_board([-1, -1]) print "You win!" break except: print sys.exc_info() print "Whoops, try again!"
"""populate answer table with numbers and mines""" if [y, x] in self.mine_locations: return Minesweeper.BOMB count = 0 # (x-1, y-1), (x, y-1), (x+1, y-1), # (x-1, y), (x, y), (x+1, y), # (x-1, y+1), (x, y+1), (x+1, y+1) for xe in range(x - 1, x + 2): for ye in range(y - 1, y + 2): if [ye, xe] in self.mine_locations: count += 1 return str(count)
identifier_body
IBRAHIM_OLADOKUN.py
#!/usr/bin/env python # coding: utf-8 # # Probability of default: credit scoring model # Building a model that borrowers can use to help make the best financial decisions. # The following variables are contained in the csv Dataset given: # # VARIABLE NAMES : DESCRIPTIONS # SeriousDlqin2yrs : Person experienced 90 days past due delinquency or worse (Target variable / label) # # RevolvingUtilizationOfUnsecuredLines: Total balance on credit cards and personal lines of credit except real estate and no installment debt like car loans divided by the sum of credit limits # # age Age of borrower in years # # NumberOfTime30-59DaysPastDueNotWorse: Number of times borrower has been 30-59 days past due but no worse in the last 2 years. # # DebtRatio: Monthly debt payments, alimony,living costs divided by monthy gross income # # MonthlyIncome: Monthly income # # NumberOfOpenCreditLinesAndLoans: Number of Open loans (installment like car loan or mortgage) and Lines of credit (e.g. credit cards) # # NumberOfTimes90DaysLate: Number of times borrower has been 90 days or more past due. # # NumberRealEstateLoansOrLines: Number of mortgage and real estate loans including home equity lines of credit # # NumberOfTime60-89DaysPastDueNotWorse: Number of times borrower has been 60-89 days past due but no worse in the last 2 years. # # NumberOfDependents: Number of dependents in family excluding themselves (spouse, children etc.) # # I will be using a random forest classifier for two reasons: firstly, because it would allow me to quickly and easily change the output to a simple binary classification problem. Secondly, because the predict_proba functionality allows me to output a probability score (probability of 1), this score is what i will use for predicting the probability of 90 days past due delinquency or worse in 2 years time. # # Furthermore, I will predominantly be adopting a quantiles based approach in order to streamline the process as much as possible so that hypothetical credit checks can be returned as easily and as quickly as possible. # In[1]: # Load in our libraries import pandas as pd import numpy as np import re import sklearn import seaborn as sns import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import warnings warnings.filterwarnings('ignore') from collections import Counter from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve from sklearn.feature_selection import SelectFromModel, SelectKBest from sklearn.pipeline import make_pipeline from sklearn.cross_validation import StratifiedKFold from sklearn.grid_search import GridSearchCV from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier from sklearn.cross_validation import cross_val_score sns.set(style='white', context='notebook', palette='deep') pd.options.display.max_columns = 100 # # Exploratory Data Analysis # In[3]: train = pd.read_csv("cs-training.csv") test = pd.read_csv("cs-test.csv") # In[6]: test.head() # In[7]: train.shape # In[8]: train.describe() # In[9]: train.info() # In[10]: train.isnull().sum() # SeriousDlqin2yrs is the target variable (label), it is binary. # # The training set contains 150,000 observations of 11 numerical features and 1 label. # # # # NumberOfDependents column and MonthlyIncome column contains NaN values, It is suspected that other variables contains errors (Age) # In[11]: test.isnull().sum() # The test data also contains several NaN values # # Target distribution # In[13]: ax = sns.countplot(x = train.SeriousDlqin2yrs ,palette="Set3") sns.set(font_scale=1.5) ax.set_ylim(top = 150000) ax.set_xlabel('Financial difficulty in 2 years') ax.set_ylabel('Frequency') fig = plt.gcf() fig.set_size_inches(10,5) ax.set_ylim(top=160000) plt.show() # Our target variable distribution of the above plot is very skewed i.e the right and left disribution are shaped differently from each other # # Detecting outliers # In[14]: def detect_outliers(df,n,features): outlier_indices = [] # iterate over features(columns) for col in features: # 1st quartile (25%) Q1 = np.percentile(df[col], 25) # 3rd quartile (75%) Q3 = np.percentile(df[col],75) # Interquartile range (IQR) IQR = Q3 - Q1 # outlier step outlier_step = 1.5 * IQR # Determine a list of indices of outliers for feature col outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step )].index # append the found outlier indices for col to the list of outlier indices outlier_indices.extend(outlier_list_col) # select observations containing more than 2 outliers outlier_indices = Counter(outlier_indices) multiple_outliers = list( k for k, v in outlier_indices.items() if v > n ) return multiple_outliers # detect outliers from Age, SibSp , Parch and Fare # These are the numerical features present in the dataset Outliers_to_drop = detect_outliers(train,2,["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "Unnamed: 0", "NumberOfDependents"]) # In[17]: train.loc[Outliers_to_drop] # 3527 outliers were detected in the training set, which represents 2.53% of our training data.I will drop these outliers # In[18]: train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True) # # Merging datasets # In[21]: train_len = len(train) dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True) # In[22]: dataset.shape # In[24]: dataset = dataset.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) train = train.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) test = test.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) # # Exploring variables # In[25]: # Correlation matrix g = sns.heatmap(train.corr(),annot=False, fmt = ".2f", cmap = "coolwarm") # This shows that the Target has the highest correlation with age, previous late payments, and the number of dependants # # Exploring UnsecLines # In[27]: dataset.UnsecLines.describe() # In[28]: dataset.UnsecLines = pd.qcut(dataset.UnsecLines.values, 5).codes # In[29]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="UnsecLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # This shows that there is an almost exponential relationship between this variable and our target # # Exploring Age # In[31]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "age") # In[32]: dataset.age = pd.qcut(dataset.age.values, 5).codes # In[33]: # Exploring age feature vs Target g = sns.factorplot(x="age",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # The age has an inverse relationship to default risk # # Exploring Late3059 # In[35]: # Explore UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[36]: for i in range(len(dataset)): if dataset.Late3059[i] >= 6: dataset.Late3059[i] = 6 # In[38]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # Due to very high standard deviations i decided to group customers who have 6 or more late payments together. Which shows that this has boosted the predictive capacity and reduced the variance of Late3059 # # Exploring DebtRatio # In[40]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "DebtRatio") # In[41]: dataset.DebtRatio = pd.qcut(dataset.DebtRatio.values, 5).codes # In[42]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="DebtRatio",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[43]: dataset.MonthlyIncome.isnull().sum() # In[44]: g = sns.heatmap(dataset[["MonthlyIncome","Unknown","UnsecLines","OpenCredit","PropLines"]].corr(),cmap="BrBG",annot=True) # In[45]: g = sns.heatmap(dataset[["MonthlyIncome","age","DebtRatio","Deps","Target"]].corr(),cmap="BrBG",annot=True) # In[46]: g = sns.heatmap(dataset[["MonthlyIncome","Late3059","Late6089","Late90"]].corr(),cmap="BrBG",annot=True) # MonthlyIncome has no strong correlation with any other variable so the NaN values cannot be accurately estimated. Thus, i will fill the NaN with the median value # In[47]: dataset.MonthlyIncome.median() # In[48]: #Fill Embarked nan values of dataset set with 'S' most frequent value dataset.MonthlyIncome = dataset.MonthlyIncome.fillna(dataset.MonthlyIncome.median()) # In[49]: dataset.MonthlyIncome = pd.qcut(dataset.MonthlyIncome.values, 5).codes # In[50]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="MonthlyIncome",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring OpenCredit # In[51]: dataset.OpenCredit.describe() # In[52]: dataset.OpenCredit = pd.qcut(dataset.OpenCredit.values, 5).codes # In[53]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="OpenCredit",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late90 # In[54]: dataset.Late90.describe() # In[55]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[56]: for i in range(len(dataset)): if dataset.Late90[i] >= 5: dataset.Late90[i] = 5 # In[57]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring PropLines # In[58]: dataset.PropLines.describe() # In[59]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[60]: for i in range(len(dataset)): if dataset.PropLines[i] >= 6: dataset.PropLines[i] = 6 # In[61]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late6089 # In[62]: # Exploring Late6089 feature quantiles vs Target g = sns.factorplot(x="Late6089",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[63]: for i in range(len(dataset)):
# In[64]: # Exploring Late6089 feature quantiles vs Target g = sns.factorplot(x="Late6089",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Deps # In[65]: dataset.Deps.describe() # In[66]: dataset.Deps = dataset.Deps.fillna(dataset.Deps.median()) # In[67]: dataset.Deps.isnull().sum() # In[68]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="Deps",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[69]: for i in range(len(dataset)): if dataset.Deps[i] >= 4: dataset.Deps[i] = 4 # In[71]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="Deps",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Final NaN check # In[72]: dataset.info() # In[74]: dataset.head() # Building binary/dummy variables # In[76]: dataset = pd.get_dummies(dataset, columns = ["UnsecLines"], prefix="UnsecLines") dataset = pd.get_dummies(dataset, columns = ["age"], prefix="age") dataset = pd.get_dummies(dataset, columns = ["Late3059"], prefix="Late3059") dataset = pd.get_dummies(dataset, columns = ["DebtRatio"], prefix="DebtRatio") dataset = pd.get_dummies(dataset, columns = ["MonthlyIncome"], prefix="MonthlyIncome") dataset = pd.get_dummies(dataset, columns = ["OpenCredit"], prefix="OpenCredit") dataset = pd.get_dummies(dataset, columns = ["Late90"], prefix="Late90") dataset = pd.get_dummies(dataset, columns = ["PropLines"], prefix="PropLines") dataset = pd.get_dummies(dataset, columns = ["Late6089"], prefix="Late6089") dataset = pd.get_dummies(dataset, columns = ["Deps"], prefix="Deps") # In[77]: dataset.head() # In[78]: dataset.head() # In[79]: dataset.shape # # Building our credit scoring model # In[82]: train = dataset[:train_len] test = dataset[train_len:] test.drop(labels=["Target"],axis = 1,inplace=True) # In[83]: test.shape # In[84]: ## Separate train features and label train["Target"] = train["Target"].astype(int) Y_train = train["Target"] X_train = train.drop(labels = ["Target", "Unknown"],axis = 1) # In[85]: clf = RandomForestClassifier(n_estimators=50, max_features='sqrt') clf = clf.fit(X_train, Y_train) # In[86]: features = pd.DataFrame() features['feature'] = X_train.columns features['importance'] = clf.feature_importances_ features.sort_values(by=['importance'], ascending=True, inplace=True) features.set_index('feature', inplace=True) # In[87]: features.plot(kind='barh', figsize=(20, 20)) # In[92]: parameters = {'n_estimators': 1000, 'random_state' : 20} model = RandomForestClassifier(**parameters) model.fit(X_train, Y_train) # In[93]: test.head() # In[94]: results_df = pd.read_csv("cs-test.csv") # In[95]: results_df = results_df.drop(["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "NumberOfDependents"], axis=1) # In[97]: DefaultProba = model.predict_proba(test.drop(["Unknown"], axis=1)) DefaultProba = DefaultProba[:,1] results_df.SeriousDlqin2yrs = DefaultProba results_df = results_df.rename(columns={'Unnamed: 0': 'Id', 'SeriousDlqin2yrs': 'Probability'}) # In[99]: results_df.head() # In[100]: results_df.to_csv("TEST_CREDIT_SCORE.csv", index=False) # This model lead to an accuracy rate of 0.800498 on Kaggle's unseen test data. # # I deem this accuracy rate to be acceptable given that i used a relatively simple quantile based approach and in light of the fact that no parameter optimization was undertaken. # # In[109]: results_df.to_csv(r"C:\Users\Lenovo Core i7\Desktop\REnmoneycsv\ttttt.csv",index =False) # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]:
if dataset.Late6089[i] >= 3: dataset.Late6089[i] = 3
conditional_block
IBRAHIM_OLADOKUN.py
#!/usr/bin/env python # coding: utf-8 # # Probability of default: credit scoring model # Building a model that borrowers can use to help make the best financial decisions. # The following variables are contained in the csv Dataset given: # # VARIABLE NAMES : DESCRIPTIONS # SeriousDlqin2yrs : Person experienced 90 days past due delinquency or worse (Target variable / label) # # RevolvingUtilizationOfUnsecuredLines: Total balance on credit cards and personal lines of credit except real estate and no installment debt like car loans divided by the sum of credit limits # # age Age of borrower in years # # NumberOfTime30-59DaysPastDueNotWorse: Number of times borrower has been 30-59 days past due but no worse in the last 2 years. # # DebtRatio: Monthly debt payments, alimony,living costs divided by monthy gross income # # MonthlyIncome: Monthly income # # NumberOfOpenCreditLinesAndLoans: Number of Open loans (installment like car loan or mortgage) and Lines of credit (e.g. credit cards) # # NumberOfTimes90DaysLate: Number of times borrower has been 90 days or more past due. # # NumberRealEstateLoansOrLines: Number of mortgage and real estate loans including home equity lines of credit # # NumberOfTime60-89DaysPastDueNotWorse: Number of times borrower has been 60-89 days past due but no worse in the last 2 years. # # NumberOfDependents: Number of dependents in family excluding themselves (spouse, children etc.) # # I will be using a random forest classifier for two reasons: firstly, because it would allow me to quickly and easily change the output to a simple binary classification problem. Secondly, because the predict_proba functionality allows me to output a probability score (probability of 1), this score is what i will use for predicting the probability of 90 days past due delinquency or worse in 2 years time. # # Furthermore, I will predominantly be adopting a quantiles based approach in order to streamline the process as much as possible so that hypothetical credit checks can be returned as easily and as quickly as possible. # In[1]: # Load in our libraries import pandas as pd import numpy as np import re import sklearn import seaborn as sns import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import warnings warnings.filterwarnings('ignore') from collections import Counter from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve from sklearn.feature_selection import SelectFromModel, SelectKBest from sklearn.pipeline import make_pipeline from sklearn.cross_validation import StratifiedKFold from sklearn.grid_search import GridSearchCV from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier from sklearn.cross_validation import cross_val_score sns.set(style='white', context='notebook', palette='deep') pd.options.display.max_columns = 100 # # Exploratory Data Analysis # In[3]: train = pd.read_csv("cs-training.csv") test = pd.read_csv("cs-test.csv") # In[6]: test.head() # In[7]: train.shape # In[8]: train.describe() # In[9]: train.info() # In[10]: train.isnull().sum() # SeriousDlqin2yrs is the target variable (label), it is binary. # # The training set contains 150,000 observations of 11 numerical features and 1 label. # # # # NumberOfDependents column and MonthlyIncome column contains NaN values, It is suspected that other variables contains errors (Age) # In[11]: test.isnull().sum() # The test data also contains several NaN values # # Target distribution # In[13]: ax = sns.countplot(x = train.SeriousDlqin2yrs ,palette="Set3") sns.set(font_scale=1.5) ax.set_ylim(top = 150000) ax.set_xlabel('Financial difficulty in 2 years') ax.set_ylabel('Frequency') fig = plt.gcf() fig.set_size_inches(10,5) ax.set_ylim(top=160000) plt.show() # Our target variable distribution of the above plot is very skewed i.e the right and left disribution are shaped differently from each other # # Detecting outliers # In[14]: def detect_outliers(df,n,features): outlier_indices = [] # iterate over features(columns) for col in features: # 1st quartile (25%)
# Interquartile range (IQR) IQR = Q3 - Q1 # outlier step outlier_step = 1.5 * IQR # Determine a list of indices of outliers for feature col outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step )].index # append the found outlier indices for col to the list of outlier indices outlier_indices.extend(outlier_list_col) # select observations containing more than 2 outliers outlier_indices = Counter(outlier_indices) multiple_outliers = list( k for k, v in outlier_indices.items() if v > n ) return multiple_outliers # detect outliers from Age, SibSp , Parch and Fare # These are the numerical features present in the dataset Outliers_to_drop = detect_outliers(train,2,["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "Unnamed: 0", "NumberOfDependents"]) # In[17]: train.loc[Outliers_to_drop] # 3527 outliers were detected in the training set, which represents 2.53% of our training data.I will drop these outliers # In[18]: train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True) # # Merging datasets # In[21]: train_len = len(train) dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True) # In[22]: dataset.shape # In[24]: dataset = dataset.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) train = train.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) test = test.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) # # Exploring variables # In[25]: # Correlation matrix g = sns.heatmap(train.corr(),annot=False, fmt = ".2f", cmap = "coolwarm") # This shows that the Target has the highest correlation with age, previous late payments, and the number of dependants # # Exploring UnsecLines # In[27]: dataset.UnsecLines.describe() # In[28]: dataset.UnsecLines = pd.qcut(dataset.UnsecLines.values, 5).codes # In[29]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="UnsecLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # This shows that there is an almost exponential relationship between this variable and our target # # Exploring Age # In[31]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "age") # In[32]: dataset.age = pd.qcut(dataset.age.values, 5).codes # In[33]: # Exploring age feature vs Target g = sns.factorplot(x="age",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # The age has an inverse relationship to default risk # # Exploring Late3059 # In[35]: # Explore UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[36]: for i in range(len(dataset)): if dataset.Late3059[i] >= 6: dataset.Late3059[i] = 6 # In[38]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # Due to very high standard deviations i decided to group customers who have 6 or more late payments together. Which shows that this has boosted the predictive capacity and reduced the variance of Late3059 # # Exploring DebtRatio # In[40]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "DebtRatio") # In[41]: dataset.DebtRatio = pd.qcut(dataset.DebtRatio.values, 5).codes # In[42]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="DebtRatio",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[43]: dataset.MonthlyIncome.isnull().sum() # In[44]: g = sns.heatmap(dataset[["MonthlyIncome","Unknown","UnsecLines","OpenCredit","PropLines"]].corr(),cmap="BrBG",annot=True) # In[45]: g = sns.heatmap(dataset[["MonthlyIncome","age","DebtRatio","Deps","Target"]].corr(),cmap="BrBG",annot=True) # In[46]: g = sns.heatmap(dataset[["MonthlyIncome","Late3059","Late6089","Late90"]].corr(),cmap="BrBG",annot=True) # MonthlyIncome has no strong correlation with any other variable so the NaN values cannot be accurately estimated. Thus, i will fill the NaN with the median value # In[47]: dataset.MonthlyIncome.median() # In[48]: #Fill Embarked nan values of dataset set with 'S' most frequent value dataset.MonthlyIncome = dataset.MonthlyIncome.fillna(dataset.MonthlyIncome.median()) # In[49]: dataset.MonthlyIncome = pd.qcut(dataset.MonthlyIncome.values, 5).codes # In[50]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="MonthlyIncome",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring OpenCredit # In[51]: dataset.OpenCredit.describe() # In[52]: dataset.OpenCredit = pd.qcut(dataset.OpenCredit.values, 5).codes # In[53]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="OpenCredit",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late90 # In[54]: dataset.Late90.describe() # In[55]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[56]: for i in range(len(dataset)): if dataset.Late90[i] >= 5: dataset.Late90[i] = 5 # In[57]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring PropLines # In[58]: dataset.PropLines.describe() # In[59]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[60]: for i in range(len(dataset)): if dataset.PropLines[i] >= 6: dataset.PropLines[i] = 6 # In[61]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late6089 # In[62]: # Exploring Late6089 feature quantiles vs Target g = sns.factorplot(x="Late6089",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[63]: for i in range(len(dataset)): if dataset.Late6089[i] >= 3: dataset.Late6089[i] = 3 # In[64]: # Exploring Late6089 feature quantiles vs Target g = sns.factorplot(x="Late6089",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Deps # In[65]: dataset.Deps.describe() # In[66]: dataset.Deps = dataset.Deps.fillna(dataset.Deps.median()) # In[67]: dataset.Deps.isnull().sum() # In[68]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="Deps",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[69]: for i in range(len(dataset)): if dataset.Deps[i] >= 4: dataset.Deps[i] = 4 # In[71]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="Deps",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Final NaN check # In[72]: dataset.info() # In[74]: dataset.head() # Building binary/dummy variables # In[76]: dataset = pd.get_dummies(dataset, columns = ["UnsecLines"], prefix="UnsecLines") dataset = pd.get_dummies(dataset, columns = ["age"], prefix="age") dataset = pd.get_dummies(dataset, columns = ["Late3059"], prefix="Late3059") dataset = pd.get_dummies(dataset, columns = ["DebtRatio"], prefix="DebtRatio") dataset = pd.get_dummies(dataset, columns = ["MonthlyIncome"], prefix="MonthlyIncome") dataset = pd.get_dummies(dataset, columns = ["OpenCredit"], prefix="OpenCredit") dataset = pd.get_dummies(dataset, columns = ["Late90"], prefix="Late90") dataset = pd.get_dummies(dataset, columns = ["PropLines"], prefix="PropLines") dataset = pd.get_dummies(dataset, columns = ["Late6089"], prefix="Late6089") dataset = pd.get_dummies(dataset, columns = ["Deps"], prefix="Deps") # In[77]: dataset.head() # In[78]: dataset.head() # In[79]: dataset.shape # # Building our credit scoring model # In[82]: train = dataset[:train_len] test = dataset[train_len:] test.drop(labels=["Target"],axis = 1,inplace=True) # In[83]: test.shape # In[84]: ## Separate train features and label train["Target"] = train["Target"].astype(int) Y_train = train["Target"] X_train = train.drop(labels = ["Target", "Unknown"],axis = 1) # In[85]: clf = RandomForestClassifier(n_estimators=50, max_features='sqrt') clf = clf.fit(X_train, Y_train) # In[86]: features = pd.DataFrame() features['feature'] = X_train.columns features['importance'] = clf.feature_importances_ features.sort_values(by=['importance'], ascending=True, inplace=True) features.set_index('feature', inplace=True) # In[87]: features.plot(kind='barh', figsize=(20, 20)) # In[92]: parameters = {'n_estimators': 1000, 'random_state' : 20} model = RandomForestClassifier(**parameters) model.fit(X_train, Y_train) # In[93]: test.head() # In[94]: results_df = pd.read_csv("cs-test.csv") # In[95]: results_df = results_df.drop(["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "NumberOfDependents"], axis=1) # In[97]: DefaultProba = model.predict_proba(test.drop(["Unknown"], axis=1)) DefaultProba = DefaultProba[:,1] results_df.SeriousDlqin2yrs = DefaultProba results_df = results_df.rename(columns={'Unnamed: 0': 'Id', 'SeriousDlqin2yrs': 'Probability'}) # In[99]: results_df.head() # In[100]: results_df.to_csv("TEST_CREDIT_SCORE.csv", index=False) # This model lead to an accuracy rate of 0.800498 on Kaggle's unseen test data. # # I deem this accuracy rate to be acceptable given that i used a relatively simple quantile based approach and in light of the fact that no parameter optimization was undertaken. # # In[109]: results_df.to_csv(r"C:\Users\Lenovo Core i7\Desktop\REnmoneycsv\ttttt.csv",index =False) # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]:
Q1 = np.percentile(df[col], 25) # 3rd quartile (75%) Q3 = np.percentile(df[col],75)
random_line_split
IBRAHIM_OLADOKUN.py
#!/usr/bin/env python # coding: utf-8 # # Probability of default: credit scoring model # Building a model that borrowers can use to help make the best financial decisions. # The following variables are contained in the csv Dataset given: # # VARIABLE NAMES : DESCRIPTIONS # SeriousDlqin2yrs : Person experienced 90 days past due delinquency or worse (Target variable / label) # # RevolvingUtilizationOfUnsecuredLines: Total balance on credit cards and personal lines of credit except real estate and no installment debt like car loans divided by the sum of credit limits # # age Age of borrower in years # # NumberOfTime30-59DaysPastDueNotWorse: Number of times borrower has been 30-59 days past due but no worse in the last 2 years. # # DebtRatio: Monthly debt payments, alimony,living costs divided by monthy gross income # # MonthlyIncome: Monthly income # # NumberOfOpenCreditLinesAndLoans: Number of Open loans (installment like car loan or mortgage) and Lines of credit (e.g. credit cards) # # NumberOfTimes90DaysLate: Number of times borrower has been 90 days or more past due. # # NumberRealEstateLoansOrLines: Number of mortgage and real estate loans including home equity lines of credit # # NumberOfTime60-89DaysPastDueNotWorse: Number of times borrower has been 60-89 days past due but no worse in the last 2 years. # # NumberOfDependents: Number of dependents in family excluding themselves (spouse, children etc.) # # I will be using a random forest classifier for two reasons: firstly, because it would allow me to quickly and easily change the output to a simple binary classification problem. Secondly, because the predict_proba functionality allows me to output a probability score (probability of 1), this score is what i will use for predicting the probability of 90 days past due delinquency or worse in 2 years time. # # Furthermore, I will predominantly be adopting a quantiles based approach in order to streamline the process as much as possible so that hypothetical credit checks can be returned as easily and as quickly as possible. # In[1]: # Load in our libraries import pandas as pd import numpy as np import re import sklearn import seaborn as sns import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import warnings warnings.filterwarnings('ignore') from collections import Counter from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve from sklearn.feature_selection import SelectFromModel, SelectKBest from sklearn.pipeline import make_pipeline from sklearn.cross_validation import StratifiedKFold from sklearn.grid_search import GridSearchCV from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier from sklearn.cross_validation import cross_val_score sns.set(style='white', context='notebook', palette='deep') pd.options.display.max_columns = 100 # # Exploratory Data Analysis # In[3]: train = pd.read_csv("cs-training.csv") test = pd.read_csv("cs-test.csv") # In[6]: test.head() # In[7]: train.shape # In[8]: train.describe() # In[9]: train.info() # In[10]: train.isnull().sum() # SeriousDlqin2yrs is the target variable (label), it is binary. # # The training set contains 150,000 observations of 11 numerical features and 1 label. # # # # NumberOfDependents column and MonthlyIncome column contains NaN values, It is suspected that other variables contains errors (Age) # In[11]: test.isnull().sum() # The test data also contains several NaN values # # Target distribution # In[13]: ax = sns.countplot(x = train.SeriousDlqin2yrs ,palette="Set3") sns.set(font_scale=1.5) ax.set_ylim(top = 150000) ax.set_xlabel('Financial difficulty in 2 years') ax.set_ylabel('Frequency') fig = plt.gcf() fig.set_size_inches(10,5) ax.set_ylim(top=160000) plt.show() # Our target variable distribution of the above plot is very skewed i.e the right and left disribution are shaped differently from each other # # Detecting outliers # In[14]: def detect_outliers(df,n,features):
# detect outliers from Age, SibSp , Parch and Fare # These are the numerical features present in the dataset Outliers_to_drop = detect_outliers(train,2,["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "Unnamed: 0", "NumberOfDependents"]) # In[17]: train.loc[Outliers_to_drop] # 3527 outliers were detected in the training set, which represents 2.53% of our training data.I will drop these outliers # In[18]: train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True) # # Merging datasets # In[21]: train_len = len(train) dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True) # In[22]: dataset.shape # In[24]: dataset = dataset.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) train = train.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) test = test.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) # # Exploring variables # In[25]: # Correlation matrix g = sns.heatmap(train.corr(),annot=False, fmt = ".2f", cmap = "coolwarm") # This shows that the Target has the highest correlation with age, previous late payments, and the number of dependants # # Exploring UnsecLines # In[27]: dataset.UnsecLines.describe() # In[28]: dataset.UnsecLines = pd.qcut(dataset.UnsecLines.values, 5).codes # In[29]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="UnsecLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # This shows that there is an almost exponential relationship between this variable and our target # # Exploring Age # In[31]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "age") # In[32]: dataset.age = pd.qcut(dataset.age.values, 5).codes # In[33]: # Exploring age feature vs Target g = sns.factorplot(x="age",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # The age has an inverse relationship to default risk # # Exploring Late3059 # In[35]: # Explore UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[36]: for i in range(len(dataset)): if dataset.Late3059[i] >= 6: dataset.Late3059[i] = 6 # In[38]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # Due to very high standard deviations i decided to group customers who have 6 or more late payments together. Which shows that this has boosted the predictive capacity and reduced the variance of Late3059 # # Exploring DebtRatio # In[40]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "DebtRatio") # In[41]: dataset.DebtRatio = pd.qcut(dataset.DebtRatio.values, 5).codes # In[42]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="DebtRatio",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[43]: dataset.MonthlyIncome.isnull().sum() # In[44]: g = sns.heatmap(dataset[["MonthlyIncome","Unknown","UnsecLines","OpenCredit","PropLines"]].corr(),cmap="BrBG",annot=True) # In[45]: g = sns.heatmap(dataset[["MonthlyIncome","age","DebtRatio","Deps","Target"]].corr(),cmap="BrBG",annot=True) # In[46]: g = sns.heatmap(dataset[["MonthlyIncome","Late3059","Late6089","Late90"]].corr(),cmap="BrBG",annot=True) # MonthlyIncome has no strong correlation with any other variable so the NaN values cannot be accurately estimated. Thus, i will fill the NaN with the median value # In[47]: dataset.MonthlyIncome.median() # In[48]: #Fill Embarked nan values of dataset set with 'S' most frequent value dataset.MonthlyIncome = dataset.MonthlyIncome.fillna(dataset.MonthlyIncome.median()) # In[49]: dataset.MonthlyIncome = pd.qcut(dataset.MonthlyIncome.values, 5).codes # In[50]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="MonthlyIncome",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring OpenCredit # In[51]: dataset.OpenCredit.describe() # In[52]: dataset.OpenCredit = pd.qcut(dataset.OpenCredit.values, 5).codes # In[53]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="OpenCredit",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late90 # In[54]: dataset.Late90.describe() # In[55]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[56]: for i in range(len(dataset)): if dataset.Late90[i] >= 5: dataset.Late90[i] = 5 # In[57]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring PropLines # In[58]: dataset.PropLines.describe() # In[59]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[60]: for i in range(len(dataset)): if dataset.PropLines[i] >= 6: dataset.PropLines[i] = 6 # In[61]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late6089 # In[62]: # Exploring Late6089 feature quantiles vs Target g = sns.factorplot(x="Late6089",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[63]: for i in range(len(dataset)): if dataset.Late6089[i] >= 3: dataset.Late6089[i] = 3 # In[64]: # Exploring Late6089 feature quantiles vs Target g = sns.factorplot(x="Late6089",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Deps # In[65]: dataset.Deps.describe() # In[66]: dataset.Deps = dataset.Deps.fillna(dataset.Deps.median()) # In[67]: dataset.Deps.isnull().sum() # In[68]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="Deps",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[69]: for i in range(len(dataset)): if dataset.Deps[i] >= 4: dataset.Deps[i] = 4 # In[71]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="Deps",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Final NaN check # In[72]: dataset.info() # In[74]: dataset.head() # Building binary/dummy variables # In[76]: dataset = pd.get_dummies(dataset, columns = ["UnsecLines"], prefix="UnsecLines") dataset = pd.get_dummies(dataset, columns = ["age"], prefix="age") dataset = pd.get_dummies(dataset, columns = ["Late3059"], prefix="Late3059") dataset = pd.get_dummies(dataset, columns = ["DebtRatio"], prefix="DebtRatio") dataset = pd.get_dummies(dataset, columns = ["MonthlyIncome"], prefix="MonthlyIncome") dataset = pd.get_dummies(dataset, columns = ["OpenCredit"], prefix="OpenCredit") dataset = pd.get_dummies(dataset, columns = ["Late90"], prefix="Late90") dataset = pd.get_dummies(dataset, columns = ["PropLines"], prefix="PropLines") dataset = pd.get_dummies(dataset, columns = ["Late6089"], prefix="Late6089") dataset = pd.get_dummies(dataset, columns = ["Deps"], prefix="Deps") # In[77]: dataset.head() # In[78]: dataset.head() # In[79]: dataset.shape # # Building our credit scoring model # In[82]: train = dataset[:train_len] test = dataset[train_len:] test.drop(labels=["Target"],axis = 1,inplace=True) # In[83]: test.shape # In[84]: ## Separate train features and label train["Target"] = train["Target"].astype(int) Y_train = train["Target"] X_train = train.drop(labels = ["Target", "Unknown"],axis = 1) # In[85]: clf = RandomForestClassifier(n_estimators=50, max_features='sqrt') clf = clf.fit(X_train, Y_train) # In[86]: features = pd.DataFrame() features['feature'] = X_train.columns features['importance'] = clf.feature_importances_ features.sort_values(by=['importance'], ascending=True, inplace=True) features.set_index('feature', inplace=True) # In[87]: features.plot(kind='barh', figsize=(20, 20)) # In[92]: parameters = {'n_estimators': 1000, 'random_state' : 20} model = RandomForestClassifier(**parameters) model.fit(X_train, Y_train) # In[93]: test.head() # In[94]: results_df = pd.read_csv("cs-test.csv") # In[95]: results_df = results_df.drop(["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "NumberOfDependents"], axis=1) # In[97]: DefaultProba = model.predict_proba(test.drop(["Unknown"], axis=1)) DefaultProba = DefaultProba[:,1] results_df.SeriousDlqin2yrs = DefaultProba results_df = results_df.rename(columns={'Unnamed: 0': 'Id', 'SeriousDlqin2yrs': 'Probability'}) # In[99]: results_df.head() # In[100]: results_df.to_csv("TEST_CREDIT_SCORE.csv", index=False) # This model lead to an accuracy rate of 0.800498 on Kaggle's unseen test data. # # I deem this accuracy rate to be acceptable given that i used a relatively simple quantile based approach and in light of the fact that no parameter optimization was undertaken. # # In[109]: results_df.to_csv(r"C:\Users\Lenovo Core i7\Desktop\REnmoneycsv\ttttt.csv",index =False) # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]:
outlier_indices = [] # iterate over features(columns) for col in features: # 1st quartile (25%) Q1 = np.percentile(df[col], 25) # 3rd quartile (75%) Q3 = np.percentile(df[col],75) # Interquartile range (IQR) IQR = Q3 - Q1 # outlier step outlier_step = 1.5 * IQR # Determine a list of indices of outliers for feature col outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step )].index # append the found outlier indices for col to the list of outlier indices outlier_indices.extend(outlier_list_col) # select observations containing more than 2 outliers outlier_indices = Counter(outlier_indices) multiple_outliers = list( k for k, v in outlier_indices.items() if v > n ) return multiple_outliers
identifier_body
IBRAHIM_OLADOKUN.py
#!/usr/bin/env python # coding: utf-8 # # Probability of default: credit scoring model # Building a model that borrowers can use to help make the best financial decisions. # The following variables are contained in the csv Dataset given: # # VARIABLE NAMES : DESCRIPTIONS # SeriousDlqin2yrs : Person experienced 90 days past due delinquency or worse (Target variable / label) # # RevolvingUtilizationOfUnsecuredLines: Total balance on credit cards and personal lines of credit except real estate and no installment debt like car loans divided by the sum of credit limits # # age Age of borrower in years # # NumberOfTime30-59DaysPastDueNotWorse: Number of times borrower has been 30-59 days past due but no worse in the last 2 years. # # DebtRatio: Monthly debt payments, alimony,living costs divided by monthy gross income # # MonthlyIncome: Monthly income # # NumberOfOpenCreditLinesAndLoans: Number of Open loans (installment like car loan or mortgage) and Lines of credit (e.g. credit cards) # # NumberOfTimes90DaysLate: Number of times borrower has been 90 days or more past due. # # NumberRealEstateLoansOrLines: Number of mortgage and real estate loans including home equity lines of credit # # NumberOfTime60-89DaysPastDueNotWorse: Number of times borrower has been 60-89 days past due but no worse in the last 2 years. # # NumberOfDependents: Number of dependents in family excluding themselves (spouse, children etc.) # # I will be using a random forest classifier for two reasons: firstly, because it would allow me to quickly and easily change the output to a simple binary classification problem. Secondly, because the predict_proba functionality allows me to output a probability score (probability of 1), this score is what i will use for predicting the probability of 90 days past due delinquency or worse in 2 years time. # # Furthermore, I will predominantly be adopting a quantiles based approach in order to streamline the process as much as possible so that hypothetical credit checks can be returned as easily and as quickly as possible. # In[1]: # Load in our libraries import pandas as pd import numpy as np import re import sklearn import seaborn as sns import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') import warnings warnings.filterwarnings('ignore') from collections import Counter from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve from sklearn.feature_selection import SelectFromModel, SelectKBest from sklearn.pipeline import make_pipeline from sklearn.cross_validation import StratifiedKFold from sklearn.grid_search import GridSearchCV from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier from sklearn.cross_validation import cross_val_score sns.set(style='white', context='notebook', palette='deep') pd.options.display.max_columns = 100 # # Exploratory Data Analysis # In[3]: train = pd.read_csv("cs-training.csv") test = pd.read_csv("cs-test.csv") # In[6]: test.head() # In[7]: train.shape # In[8]: train.describe() # In[9]: train.info() # In[10]: train.isnull().sum() # SeriousDlqin2yrs is the target variable (label), it is binary. # # The training set contains 150,000 observations of 11 numerical features and 1 label. # # # # NumberOfDependents column and MonthlyIncome column contains NaN values, It is suspected that other variables contains errors (Age) # In[11]: test.isnull().sum() # The test data also contains several NaN values # # Target distribution # In[13]: ax = sns.countplot(x = train.SeriousDlqin2yrs ,palette="Set3") sns.set(font_scale=1.5) ax.set_ylim(top = 150000) ax.set_xlabel('Financial difficulty in 2 years') ax.set_ylabel('Frequency') fig = plt.gcf() fig.set_size_inches(10,5) ax.set_ylim(top=160000) plt.show() # Our target variable distribution of the above plot is very skewed i.e the right and left disribution are shaped differently from each other # # Detecting outliers # In[14]: def
(df,n,features): outlier_indices = [] # iterate over features(columns) for col in features: # 1st quartile (25%) Q1 = np.percentile(df[col], 25) # 3rd quartile (75%) Q3 = np.percentile(df[col],75) # Interquartile range (IQR) IQR = Q3 - Q1 # outlier step outlier_step = 1.5 * IQR # Determine a list of indices of outliers for feature col outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step )].index # append the found outlier indices for col to the list of outlier indices outlier_indices.extend(outlier_list_col) # select observations containing more than 2 outliers outlier_indices = Counter(outlier_indices) multiple_outliers = list( k for k, v in outlier_indices.items() if v > n ) return multiple_outliers # detect outliers from Age, SibSp , Parch and Fare # These are the numerical features present in the dataset Outliers_to_drop = detect_outliers(train,2,["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "Unnamed: 0", "NumberOfDependents"]) # In[17]: train.loc[Outliers_to_drop] # 3527 outliers were detected in the training set, which represents 2.53% of our training data.I will drop these outliers # In[18]: train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True) # # Merging datasets # In[21]: train_len = len(train) dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True) # In[22]: dataset.shape # In[24]: dataset = dataset.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) train = train.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) test = test.rename(columns={'Unnamed: 0': 'Unknown', 'SeriousDlqin2yrs': 'Target', 'RevolvingUtilizationOfUnsecuredLines': 'UnsecLines', 'NumberOfTime30-59DaysPastDueNotWorse': 'Late3059', 'DebtRatio': 'DebtRatio', 'MonthlyIncome': 'MonthlyIncome', 'NumberOfOpenCreditLinesAndLoans': 'OpenCredit', 'NumberOfTimes90DaysLate': 'Late90', 'NumberRealEstateLoansOrLines': 'PropLines', 'NumberOfTime60-89DaysPastDueNotWorse': 'Late6089', 'NumberOfDependents': 'Deps'}) # # Exploring variables # In[25]: # Correlation matrix g = sns.heatmap(train.corr(),annot=False, fmt = ".2f", cmap = "coolwarm") # This shows that the Target has the highest correlation with age, previous late payments, and the number of dependants # # Exploring UnsecLines # In[27]: dataset.UnsecLines.describe() # In[28]: dataset.UnsecLines = pd.qcut(dataset.UnsecLines.values, 5).codes # In[29]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="UnsecLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # This shows that there is an almost exponential relationship between this variable and our target # # Exploring Age # In[31]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "age") # In[32]: dataset.age = pd.qcut(dataset.age.values, 5).codes # In[33]: # Exploring age feature vs Target g = sns.factorplot(x="age",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # The age has an inverse relationship to default risk # # Exploring Late3059 # In[35]: # Explore UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[36]: for i in range(len(dataset)): if dataset.Late3059[i] >= 6: dataset.Late3059[i] = 6 # In[38]: # Exploring UnsecLines feature vs Target g = sns.factorplot(x="Late3059",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # Due to very high standard deviations i decided to group customers who have 6 or more late payments together. Which shows that this has boosted the predictive capacity and reduced the variance of Late3059 # # Exploring DebtRatio # In[40]: # Exploring Age vs Survived g = sns.FacetGrid(dataset, col='Target') g = g.map(sns.distplot, "DebtRatio") # In[41]: dataset.DebtRatio = pd.qcut(dataset.DebtRatio.values, 5).codes # In[42]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="DebtRatio",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[43]: dataset.MonthlyIncome.isnull().sum() # In[44]: g = sns.heatmap(dataset[["MonthlyIncome","Unknown","UnsecLines","OpenCredit","PropLines"]].corr(),cmap="BrBG",annot=True) # In[45]: g = sns.heatmap(dataset[["MonthlyIncome","age","DebtRatio","Deps","Target"]].corr(),cmap="BrBG",annot=True) # In[46]: g = sns.heatmap(dataset[["MonthlyIncome","Late3059","Late6089","Late90"]].corr(),cmap="BrBG",annot=True) # MonthlyIncome has no strong correlation with any other variable so the NaN values cannot be accurately estimated. Thus, i will fill the NaN with the median value # In[47]: dataset.MonthlyIncome.median() # In[48]: #Fill Embarked nan values of dataset set with 'S' most frequent value dataset.MonthlyIncome = dataset.MonthlyIncome.fillna(dataset.MonthlyIncome.median()) # In[49]: dataset.MonthlyIncome = pd.qcut(dataset.MonthlyIncome.values, 5).codes # In[50]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="MonthlyIncome",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring OpenCredit # In[51]: dataset.OpenCredit.describe() # In[52]: dataset.OpenCredit = pd.qcut(dataset.OpenCredit.values, 5).codes # In[53]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="OpenCredit",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late90 # In[54]: dataset.Late90.describe() # In[55]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[56]: for i in range(len(dataset)): if dataset.Late90[i] >= 5: dataset.Late90[i] = 5 # In[57]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="Late90",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring PropLines # In[58]: dataset.PropLines.describe() # In[59]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[60]: for i in range(len(dataset)): if dataset.PropLines[i] >= 6: dataset.PropLines[i] = 6 # In[61]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="PropLines",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Late6089 # In[62]: # Exploring Late6089 feature quantiles vs Target g = sns.factorplot(x="Late6089",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[63]: for i in range(len(dataset)): if dataset.Late6089[i] >= 3: dataset.Late6089[i] = 3 # In[64]: # Exploring Late6089 feature quantiles vs Target g = sns.factorplot(x="Late6089",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Exploring Deps # In[65]: dataset.Deps.describe() # In[66]: dataset.Deps = dataset.Deps.fillna(dataset.Deps.median()) # In[67]: dataset.Deps.isnull().sum() # In[68]: # Explore DebtRatio feature quantiles vs Target g = sns.factorplot(x="Deps",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # In[69]: for i in range(len(dataset)): if dataset.Deps[i] >= 4: dataset.Deps[i] = 4 # In[71]: # Exploring DebtRatio feature quantiles vs Target g = sns.factorplot(x="Deps",y="Target",data=dataset,kind="bar", size = 6 , palette = "muted") g.despine(left=True) g = g.set_ylabels("Target probability") # # Final NaN check # In[72]: dataset.info() # In[74]: dataset.head() # Building binary/dummy variables # In[76]: dataset = pd.get_dummies(dataset, columns = ["UnsecLines"], prefix="UnsecLines") dataset = pd.get_dummies(dataset, columns = ["age"], prefix="age") dataset = pd.get_dummies(dataset, columns = ["Late3059"], prefix="Late3059") dataset = pd.get_dummies(dataset, columns = ["DebtRatio"], prefix="DebtRatio") dataset = pd.get_dummies(dataset, columns = ["MonthlyIncome"], prefix="MonthlyIncome") dataset = pd.get_dummies(dataset, columns = ["OpenCredit"], prefix="OpenCredit") dataset = pd.get_dummies(dataset, columns = ["Late90"], prefix="Late90") dataset = pd.get_dummies(dataset, columns = ["PropLines"], prefix="PropLines") dataset = pd.get_dummies(dataset, columns = ["Late6089"], prefix="Late6089") dataset = pd.get_dummies(dataset, columns = ["Deps"], prefix="Deps") # In[77]: dataset.head() # In[78]: dataset.head() # In[79]: dataset.shape # # Building our credit scoring model # In[82]: train = dataset[:train_len] test = dataset[train_len:] test.drop(labels=["Target"],axis = 1,inplace=True) # In[83]: test.shape # In[84]: ## Separate train features and label train["Target"] = train["Target"].astype(int) Y_train = train["Target"] X_train = train.drop(labels = ["Target", "Unknown"],axis = 1) # In[85]: clf = RandomForestClassifier(n_estimators=50, max_features='sqrt') clf = clf.fit(X_train, Y_train) # In[86]: features = pd.DataFrame() features['feature'] = X_train.columns features['importance'] = clf.feature_importances_ features.sort_values(by=['importance'], ascending=True, inplace=True) features.set_index('feature', inplace=True) # In[87]: features.plot(kind='barh', figsize=(20, 20)) # In[92]: parameters = {'n_estimators': 1000, 'random_state' : 20} model = RandomForestClassifier(**parameters) model.fit(X_train, Y_train) # In[93]: test.head() # In[94]: results_df = pd.read_csv("cs-test.csv") # In[95]: results_df = results_df.drop(["RevolvingUtilizationOfUnsecuredLines", "age", "NumberOfTime30-59DaysPastDueNotWorse", "DebtRatio", "MonthlyIncome", "NumberOfOpenCreditLinesAndLoans", "NumberOfTimes90DaysLate", "NumberRealEstateLoansOrLines", "NumberOfTime60-89DaysPastDueNotWorse", "NumberOfDependents"], axis=1) # In[97]: DefaultProba = model.predict_proba(test.drop(["Unknown"], axis=1)) DefaultProba = DefaultProba[:,1] results_df.SeriousDlqin2yrs = DefaultProba results_df = results_df.rename(columns={'Unnamed: 0': 'Id', 'SeriousDlqin2yrs': 'Probability'}) # In[99]: results_df.head() # In[100]: results_df.to_csv("TEST_CREDIT_SCORE.csv", index=False) # This model lead to an accuracy rate of 0.800498 on Kaggle's unseen test data. # # I deem this accuracy rate to be acceptable given that i used a relatively simple quantile based approach and in light of the fact that no parameter optimization was undertaken. # # In[109]: results_df.to_csv(r"C:\Users\Lenovo Core i7\Desktop\REnmoneycsv\ttttt.csv",index =False) # In[ ]: # In[ ]: # In[ ]: # In[ ]: # In[ ]:
detect_outliers
identifier_name
putio-ftp-connector.py
#!/usr/bin/env python # $Id: basic_ftpd.py 569 2009-04-04 00:17:43Z billiejoex $ """A basic FTP server which uses a DummyAuthorizer for managing 'virtual users', setting a limit for incoming connections. """ import os #from pyftpdlib import ftpserver import urllib2 import base64 import putio from pathtoid import PathToId import pathtoid import config import time class HttpFD(object): def __init__(self, apifile, bucket, obj, mode): self.apifile = apifile self.download_url = apifile.get_stream_url() self.bucket = bucket self.name = obj self.mode = mode self.closed = False self.total_size = None self.seekpos = None self.read_size = 0 # speed... self.read_bytes = 128 * 1024 # 128kb per iteration self.buffer = '' self.req = None self.fd = None # gets total size req = urllib2.Request(self.download_url) f = urllib2.urlopen(req) self.total_size = f.headers.get('Content-Length') def write(self, data): raise OSError(1, 'Operation not permitted') # self.temp_file.write(data) def close(self): return def __read(self, size=65536): c = self.buffer self.buffer = '' return c def read(self, size=65536): if self.req == None: self.req = urllib2.Request(self.download_url) if self.seekpos: self.req.headers['Range'] = 'bytes=%s-' % (self.seekpos) if self.read_size > self.total_size: return self.read_size = self.read_size + self.read_bytes + 1 if not self.fd: self.fd = urllib2.urlopen(self.req) return self.fd.read(1024) def seek(self, frombytes, **kwargs): self.seekpos = frombytes return # .... idfinder = PathToId() idfinder.load_items() api = None class HttpFS(ftpserver.AbstractedFS): def __init__(self): self.root = None self.cwd = '/' self.rnfr = None self.dirlistcache = {} self.idfinder = idfinder def open(self, filename, mode): print "filename: ", filename if filename in self.dirlistcache: apifile = self.dirlistcache[filename] else: if filename == os.path.sep: # items = operations.api.get_items() # this is not a file its a directory # raise OSError(1, 'This is a directory') raise IOError(1, 'This is a directory') else: id = idfinder.find_item_by_path(filename) print "file id:", filename, id apifile = operations.api.get_items(id=id)[0] if apifile.is_dir: raise IOError(1, 'This is a directory') # return HttpFD(apifile, None, filename, mode) def chdir(self, path): self.cwd = path.decode('utf-8').encode('utf-8') def mkdir(self, path): dirs = os.path.split(path) apifile = self._getitem(dirs[0]) if not apifile: #this is root operations.api.create_folder(name = dirs[1], parent_id = 0) else: apifile.create_folder(name=dirs[1]) self.remove_from_cache(path) self.remove_from_cache(dirs[0]) def listdir(self, path): ret = [] try: item = self._getitem(path) except: return [] if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return [] for i in items: ret.append(i.name) return ret def remove_from_cache(self, path): if path in self.dirlistcache: del self.dirlistcache[path] idfinder.invalidate_items_cache_by_path(path) def rmdir(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def remove(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def rename(self, src, dst): apifile = self._getitem(src) if not apifile: raise OSError(2, 'No such file or directory') srcs = os.path.split(src) dsts = os.path.split(dst) if srcs[0] != dsts[0]: # this is a move operation.. if dsts[0] == os.path.sep: apifile.move_item(target=0) return destination = self._getitem(dsts[0]); if not destination: raise OSError(2, 'No such file or directory') apifile.move_item(target=destination.id) return apifile.rename_item(dsts[1]) self.remove_from_cache(src) self.remove_from_cache(srcs[0]) self.remove_from_cache(dst) self.remove_from_cache(dsts[0]) def isfile(self, path): return not self.isdir(path) def islink(self, path): return False def isdir(self, path): if path == os.path.sep: return True apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') if apifile.is_dir: return True else: return False def getsize(self, path): apifile = self._getitem(path) if not apifile: raise OSError(1, 'No such file or directory') print "filesize :", apifile.size return long(apifile.size) #return self.stat(path).st_size def getmtime(self, path): return self.stat(path).st_mtime def realpath(self, path): return path def lexists(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') return apifile def _getitem(self, filename): if filename in self.dirlistcache: apifile = self.dirlistcache[filename] print 'found........', apifile.id, apifile.name else: if filename == os.path.sep: # items = operations.api.get_items() return False else: id = idfinder.find_item_by_path(filename) print "file id:", id apifile = operations.api.get_items(id=id)[0] self.dirlistcache[filename] = apifile return apifile #.get_download_url() def stat(self, path): apifile = self._getitem(path) return os.stat_result((666, 0L, 0L, 0, 0, 0, apifile.size, 0, 0, 0)) exists = lexists lstat = stat def validpath(self, path): return True def format_list_items(self, items): for item in items: if item.is_dir: s = 'drwxrwxrwx 1 %s group %8s Jan 01 00:00 %s\r\n' % ('aaa', 0, item.name) else: s = '-rw-rw-rw- 1 %s group %8s %s %s\r\n' % ('aaa', item.size, time.strftime("%b %d %H:%M"), item.name) yield s.encode('utf-8') def get_list_dir(self, path): try: item = self._getitem(path) except: return self.format_list_items([]) if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return self.format_list_items([]) return self.format_list_items(items) def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): # find item in cache... if basedir in self.dirlistcache: fnd = self.dirlistcache[basedir] try: items = operations.api.get_items(parent_id = fnd.id) except: items = [] else: if basedir == os.path.sep: items = operations.api.get_items() else: parent_id = self.idfinder.find_item_by_path(pathtoid._utf8(basedir)) print "parent_id:", parent_id items = operations.api.get_items(parent_id=parent_id) c = 0 s = '' for i in items: c = c + 1 type = 'type=file;' if 'type' in facts: if i.type == 'folder': type = 'type=dir;' if 'size' in facts: size = 'size=%s;' % i.size # file size ln = "%s%sperm=r;modify=20071029155301;unique=11150051; %s\r\n" % (type, size, i.name) if basedir== os.path.sep:
else: key = '%s/%s' % (pathtoid._utf8(basedir), pathtoid._utf8(i.name)) self.dirlistcache[key] = i print 'key:', key yield ln.encode('utf-8') class HttpOperations(object): '''Storing connection object''' def __init__(self): self.connection = None self.username = None def authenticate(self, username, password): self.username = username self.password = password config.apisecret = password config.apikey = username self.api = putio.Api(config.apikey,config.apisecret) print "checking user & passwd" username = self.api.get_user_name() if not username: return False print "> welcome ", username return True def __repr__(self): return self.connection operations = HttpOperations() class HttpAuthorizer(ftpserver.DummyAuthorizer): '''FTP server authorizer. Logs the users into Putio Cloud Files and keeps track of them. ''' users = {} def validate_authentication(self, username, password): try: return operations.authenticate(username, password) except: return False def has_user(self, username): return username != 'anonymous' def has_perm(self, username, perm, path=None): return True def get_perms(self, username): return 'lrdw' def get_home_dir(self, username): return os.sep def get_msg_login(self, username): return 'Welcome %s' % username def get_msg_quit(self, username): return 'Goodbye %s' % username def main(): ftp_handler = ftpserver.FTPHandler ftp_handler.authorizer = HttpAuthorizer() ftp_handler.abstracted_fs = HttpFS # ftp_handler.passive_ports = range(60000, 65535) # try: # ftp_handler.masquerade_address = gethostbyname(options.bind_address) # except gaierror, (_, errmsg): # sys.exit('Address error: %s' % errmsg) # # ftpd = ftpserver.FTPServer((options.bind_address, # options.port), # ftp_handler) address = (config.ip_address, 2121 ) ftpd = ftpserver.FTPServer(address, ftp_handler) ftpd.serve_forever() # # Instantiate a dummy authorizer for managing 'virtual' users # authorizer = ftpserver.DummyAuthorizer() # # # Define a new user having full r/w permissions and a read-only # # anonymous user # authorizer.add_user('user', '12345', '/home/aybars/completed', perm='elradfmw') # authorizer.add_anonymous('/home/aybars/completed') # # # Instantiate FTP handler class # ftp_handler = ftpserver.FTPHandler # ftp_handler.authorizer = authorizer # # # Define a customized banner (string returned when client connects) # ftp_handler.banner = "pyftpdlib %s based ftpd ready." %ftpserver.__ver__ # # # Specify a masquerade address and the range of ports to use for # # passive connections. Decomment in case you're behind a NAT. # #ftp_handler.masquerade_address = '151.25.42.11' # #ftp_handler.passive_ports = range(60000, 65535) # # # Instantiate FTP server class and listen to 0.0.0.0:21 # address = ('', 2121 ) # ftpd = ftpserver.FTPServer(address, ftp_handler) # # # set a limit for connections # ftpd.max_cons = 256 # ftpd.max_cons_per_ip = 5 # # # start ftp server # ftpd.serve_forever() if __name__ == '__main__': # api = putio.Api(config.apikey, config.apisecret) # # # getting your items # items = api.get_items(parent_id=11110932) # #yield items # # for it in items: # print "%s %s" % (it.id, it.name) main()
key = '/%s' % (pathtoid._utf8(i.name))
conditional_block
putio-ftp-connector.py
#!/usr/bin/env python # $Id: basic_ftpd.py 569 2009-04-04 00:17:43Z billiejoex $ """A basic FTP server which uses a DummyAuthorizer for managing 'virtual users', setting a limit for incoming connections. """ import os #from pyftpdlib import ftpserver import urllib2 import base64 import putio from pathtoid import PathToId import pathtoid import config import time class HttpFD(object): def __init__(self, apifile, bucket, obj, mode): self.apifile = apifile self.download_url = apifile.get_stream_url() self.bucket = bucket self.name = obj self.mode = mode self.closed = False self.total_size = None self.seekpos = None self.read_size = 0 # speed... self.read_bytes = 128 * 1024 # 128kb per iteration self.buffer = '' self.req = None self.fd = None # gets total size req = urllib2.Request(self.download_url) f = urllib2.urlopen(req) self.total_size = f.headers.get('Content-Length') def write(self, data): raise OSError(1, 'Operation not permitted') # self.temp_file.write(data) def close(self): return def __read(self, size=65536): c = self.buffer self.buffer = '' return c def read(self, size=65536): if self.req == None: self.req = urllib2.Request(self.download_url) if self.seekpos: self.req.headers['Range'] = 'bytes=%s-' % (self.seekpos) if self.read_size > self.total_size: return self.read_size = self.read_size + self.read_bytes + 1 if not self.fd: self.fd = urllib2.urlopen(self.req) return self.fd.read(1024) def seek(self, frombytes, **kwargs): self.seekpos = frombytes return # .... idfinder = PathToId() idfinder.load_items() api = None class HttpFS(ftpserver.AbstractedFS): def __init__(self): self.root = None self.cwd = '/' self.rnfr = None self.dirlistcache = {} self.idfinder = idfinder def open(self, filename, mode): print "filename: ", filename if filename in self.dirlistcache: apifile = self.dirlistcache[filename] else: if filename == os.path.sep: # items = operations.api.get_items() # this is not a file its a directory # raise OSError(1, 'This is a directory') raise IOError(1, 'This is a directory') else: id = idfinder.find_item_by_path(filename) print "file id:", filename, id apifile = operations.api.get_items(id=id)[0] if apifile.is_dir: raise IOError(1, 'This is a directory') # return HttpFD(apifile, None, filename, mode) def chdir(self, path): self.cwd = path.decode('utf-8').encode('utf-8') def mkdir(self, path): dirs = os.path.split(path) apifile = self._getitem(dirs[0]) if not apifile: #this is root operations.api.create_folder(name = dirs[1], parent_id = 0) else: apifile.create_folder(name=dirs[1]) self.remove_from_cache(path) self.remove_from_cache(dirs[0]) def listdir(self, path): ret = [] try: item = self._getitem(path) except: return [] if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return [] for i in items: ret.append(i.name) return ret def remove_from_cache(self, path): if path in self.dirlistcache: del self.dirlistcache[path] idfinder.invalidate_items_cache_by_path(path) def rmdir(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def remove(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def rename(self, src, dst): apifile = self._getitem(src) if not apifile: raise OSError(2, 'No such file or directory') srcs = os.path.split(src) dsts = os.path.split(dst) if srcs[0] != dsts[0]: # this is a move operation.. if dsts[0] == os.path.sep: apifile.move_item(target=0) return destination = self._getitem(dsts[0]); if not destination: raise OSError(2, 'No such file or directory') apifile.move_item(target=destination.id) return apifile.rename_item(dsts[1]) self.remove_from_cache(src) self.remove_from_cache(srcs[0]) self.remove_from_cache(dst) self.remove_from_cache(dsts[0]) def isfile(self, path): return not self.isdir(path) def islink(self, path): return False def isdir(self, path): if path == os.path.sep: return True apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') if apifile.is_dir: return True else: return False def getsize(self, path): apifile = self._getitem(path) if not apifile: raise OSError(1, 'No such file or directory') print "filesize :", apifile.size return long(apifile.size) #return self.stat(path).st_size def getmtime(self, path): return self.stat(path).st_mtime def realpath(self, path): return path def lexists(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') return apifile def _getitem(self, filename): if filename in self.dirlistcache: apifile = self.dirlistcache[filename] print 'found........', apifile.id, apifile.name else: if filename == os.path.sep: # items = operations.api.get_items() return False else: id = idfinder.find_item_by_path(filename) print "file id:", id apifile = operations.api.get_items(id=id)[0] self.dirlistcache[filename] = apifile return apifile #.get_download_url() def stat(self, path): apifile = self._getitem(path) return os.stat_result((666, 0L, 0L, 0, 0, 0, apifile.size, 0, 0, 0)) exists = lexists lstat = stat def validpath(self, path): return True def format_list_items(self, items): for item in items: if item.is_dir: s = 'drwxrwxrwx 1 %s group %8s Jan 01 00:00 %s\r\n' % ('aaa', 0, item.name) else: s = '-rw-rw-rw- 1 %s group %8s %s %s\r\n' % ('aaa', item.size, time.strftime("%b %d %H:%M"), item.name) yield s.encode('utf-8') def get_list_dir(self, path): try: item = self._getitem(path) except: return self.format_list_items([]) if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return self.format_list_items([]) return self.format_list_items(items) def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): # find item in cache... if basedir in self.dirlistcache: fnd = self.dirlistcache[basedir] try: items = operations.api.get_items(parent_id = fnd.id) except: items = [] else: if basedir == os.path.sep: items = operations.api.get_items() else: parent_id = self.idfinder.find_item_by_path(pathtoid._utf8(basedir)) print "parent_id:", parent_id items = operations.api.get_items(parent_id=parent_id) c = 0 s = '' for i in items: c = c + 1 type = 'type=file;' if 'type' in facts: if i.type == 'folder': type = 'type=dir;' if 'size' in facts: size = 'size=%s;' % i.size # file size ln = "%s%sperm=r;modify=20071029155301;unique=11150051; %s\r\n" % (type, size, i.name) if basedir== os.path.sep: key = '/%s' % (pathtoid._utf8(i.name)) else: key = '%s/%s' % (pathtoid._utf8(basedir), pathtoid._utf8(i.name)) self.dirlistcache[key] = i print 'key:', key yield ln.encode('utf-8') class HttpOperations(object): '''Storing connection object''' def __init__(self): self.connection = None self.username = None def authenticate(self, username, password): self.username = username self.password = password config.apisecret = password config.apikey = username self.api = putio.Api(config.apikey,config.apisecret)
if not username: return False print "> welcome ", username return True def __repr__(self): return self.connection operations = HttpOperations() class HttpAuthorizer(ftpserver.DummyAuthorizer): '''FTP server authorizer. Logs the users into Putio Cloud Files and keeps track of them. ''' users = {} def validate_authentication(self, username, password): try: return operations.authenticate(username, password) except: return False def has_user(self, username): return username != 'anonymous' def has_perm(self, username, perm, path=None): return True def get_perms(self, username): return 'lrdw' def get_home_dir(self, username): return os.sep def get_msg_login(self, username): return 'Welcome %s' % username def get_msg_quit(self, username): return 'Goodbye %s' % username def main(): ftp_handler = ftpserver.FTPHandler ftp_handler.authorizer = HttpAuthorizer() ftp_handler.abstracted_fs = HttpFS # ftp_handler.passive_ports = range(60000, 65535) # try: # ftp_handler.masquerade_address = gethostbyname(options.bind_address) # except gaierror, (_, errmsg): # sys.exit('Address error: %s' % errmsg) # # ftpd = ftpserver.FTPServer((options.bind_address, # options.port), # ftp_handler) address = (config.ip_address, 2121 ) ftpd = ftpserver.FTPServer(address, ftp_handler) ftpd.serve_forever() # # Instantiate a dummy authorizer for managing 'virtual' users # authorizer = ftpserver.DummyAuthorizer() # # # Define a new user having full r/w permissions and a read-only # # anonymous user # authorizer.add_user('user', '12345', '/home/aybars/completed', perm='elradfmw') # authorizer.add_anonymous('/home/aybars/completed') # # # Instantiate FTP handler class # ftp_handler = ftpserver.FTPHandler # ftp_handler.authorizer = authorizer # # # Define a customized banner (string returned when client connects) # ftp_handler.banner = "pyftpdlib %s based ftpd ready." %ftpserver.__ver__ # # # Specify a masquerade address and the range of ports to use for # # passive connections. Decomment in case you're behind a NAT. # #ftp_handler.masquerade_address = '151.25.42.11' # #ftp_handler.passive_ports = range(60000, 65535) # # # Instantiate FTP server class and listen to 0.0.0.0:21 # address = ('', 2121 ) # ftpd = ftpserver.FTPServer(address, ftp_handler) # # # set a limit for connections # ftpd.max_cons = 256 # ftpd.max_cons_per_ip = 5 # # # start ftp server # ftpd.serve_forever() if __name__ == '__main__': # api = putio.Api(config.apikey, config.apisecret) # # # getting your items # items = api.get_items(parent_id=11110932) # #yield items # # for it in items: # print "%s %s" % (it.id, it.name) main()
print "checking user & passwd" username = self.api.get_user_name()
random_line_split
putio-ftp-connector.py
#!/usr/bin/env python # $Id: basic_ftpd.py 569 2009-04-04 00:17:43Z billiejoex $ """A basic FTP server which uses a DummyAuthorizer for managing 'virtual users', setting a limit for incoming connections. """ import os #from pyftpdlib import ftpserver import urllib2 import base64 import putio from pathtoid import PathToId import pathtoid import config import time class HttpFD(object): def __init__(self, apifile, bucket, obj, mode): self.apifile = apifile self.download_url = apifile.get_stream_url() self.bucket = bucket self.name = obj self.mode = mode self.closed = False self.total_size = None self.seekpos = None self.read_size = 0 # speed... self.read_bytes = 128 * 1024 # 128kb per iteration self.buffer = '' self.req = None self.fd = None # gets total size req = urllib2.Request(self.download_url) f = urllib2.urlopen(req) self.total_size = f.headers.get('Content-Length') def write(self, data): raise OSError(1, 'Operation not permitted') # self.temp_file.write(data) def close(self): return def __read(self, size=65536): c = self.buffer self.buffer = '' return c def read(self, size=65536): if self.req == None: self.req = urllib2.Request(self.download_url) if self.seekpos: self.req.headers['Range'] = 'bytes=%s-' % (self.seekpos) if self.read_size > self.total_size: return self.read_size = self.read_size + self.read_bytes + 1 if not self.fd: self.fd = urllib2.urlopen(self.req) return self.fd.read(1024) def seek(self, frombytes, **kwargs): self.seekpos = frombytes return # .... idfinder = PathToId() idfinder.load_items() api = None class HttpFS(ftpserver.AbstractedFS): def __init__(self): self.root = None self.cwd = '/' self.rnfr = None self.dirlistcache = {} self.idfinder = idfinder def open(self, filename, mode): print "filename: ", filename if filename in self.dirlistcache: apifile = self.dirlistcache[filename] else: if filename == os.path.sep: # items = operations.api.get_items() # this is not a file its a directory # raise OSError(1, 'This is a directory') raise IOError(1, 'This is a directory') else: id = idfinder.find_item_by_path(filename) print "file id:", filename, id apifile = operations.api.get_items(id=id)[0] if apifile.is_dir: raise IOError(1, 'This is a directory') # return HttpFD(apifile, None, filename, mode) def chdir(self, path): self.cwd = path.decode('utf-8').encode('utf-8') def mkdir(self, path): dirs = os.path.split(path) apifile = self._getitem(dirs[0]) if not apifile: #this is root operations.api.create_folder(name = dirs[1], parent_id = 0) else: apifile.create_folder(name=dirs[1]) self.remove_from_cache(path) self.remove_from_cache(dirs[0]) def listdir(self, path): ret = [] try: item = self._getitem(path) except: return [] if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return [] for i in items: ret.append(i.name) return ret def remove_from_cache(self, path): if path in self.dirlistcache: del self.dirlistcache[path] idfinder.invalidate_items_cache_by_path(path) def rmdir(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def remove(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def rename(self, src, dst): apifile = self._getitem(src) if not apifile: raise OSError(2, 'No such file or directory') srcs = os.path.split(src) dsts = os.path.split(dst) if srcs[0] != dsts[0]: # this is a move operation.. if dsts[0] == os.path.sep: apifile.move_item(target=0) return destination = self._getitem(dsts[0]); if not destination: raise OSError(2, 'No such file or directory') apifile.move_item(target=destination.id) return apifile.rename_item(dsts[1]) self.remove_from_cache(src) self.remove_from_cache(srcs[0]) self.remove_from_cache(dst) self.remove_from_cache(dsts[0]) def isfile(self, path): return not self.isdir(path) def islink(self, path): return False def isdir(self, path): if path == os.path.sep: return True apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') if apifile.is_dir: return True else: return False def getsize(self, path): apifile = self._getitem(path) if not apifile: raise OSError(1, 'No such file or directory') print "filesize :", apifile.size return long(apifile.size) #return self.stat(path).st_size def getmtime(self, path): return self.stat(path).st_mtime def realpath(self, path): return path def lexists(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') return apifile def
(self, filename): if filename in self.dirlistcache: apifile = self.dirlistcache[filename] print 'found........', apifile.id, apifile.name else: if filename == os.path.sep: # items = operations.api.get_items() return False else: id = idfinder.find_item_by_path(filename) print "file id:", id apifile = operations.api.get_items(id=id)[0] self.dirlistcache[filename] = apifile return apifile #.get_download_url() def stat(self, path): apifile = self._getitem(path) return os.stat_result((666, 0L, 0L, 0, 0, 0, apifile.size, 0, 0, 0)) exists = lexists lstat = stat def validpath(self, path): return True def format_list_items(self, items): for item in items: if item.is_dir: s = 'drwxrwxrwx 1 %s group %8s Jan 01 00:00 %s\r\n' % ('aaa', 0, item.name) else: s = '-rw-rw-rw- 1 %s group %8s %s %s\r\n' % ('aaa', item.size, time.strftime("%b %d %H:%M"), item.name) yield s.encode('utf-8') def get_list_dir(self, path): try: item = self._getitem(path) except: return self.format_list_items([]) if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return self.format_list_items([]) return self.format_list_items(items) def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): # find item in cache... if basedir in self.dirlistcache: fnd = self.dirlistcache[basedir] try: items = operations.api.get_items(parent_id = fnd.id) except: items = [] else: if basedir == os.path.sep: items = operations.api.get_items() else: parent_id = self.idfinder.find_item_by_path(pathtoid._utf8(basedir)) print "parent_id:", parent_id items = operations.api.get_items(parent_id=parent_id) c = 0 s = '' for i in items: c = c + 1 type = 'type=file;' if 'type' in facts: if i.type == 'folder': type = 'type=dir;' if 'size' in facts: size = 'size=%s;' % i.size # file size ln = "%s%sperm=r;modify=20071029155301;unique=11150051; %s\r\n" % (type, size, i.name) if basedir== os.path.sep: key = '/%s' % (pathtoid._utf8(i.name)) else: key = '%s/%s' % (pathtoid._utf8(basedir), pathtoid._utf8(i.name)) self.dirlistcache[key] = i print 'key:', key yield ln.encode('utf-8') class HttpOperations(object): '''Storing connection object''' def __init__(self): self.connection = None self.username = None def authenticate(self, username, password): self.username = username self.password = password config.apisecret = password config.apikey = username self.api = putio.Api(config.apikey,config.apisecret) print "checking user & passwd" username = self.api.get_user_name() if not username: return False print "> welcome ", username return True def __repr__(self): return self.connection operations = HttpOperations() class HttpAuthorizer(ftpserver.DummyAuthorizer): '''FTP server authorizer. Logs the users into Putio Cloud Files and keeps track of them. ''' users = {} def validate_authentication(self, username, password): try: return operations.authenticate(username, password) except: return False def has_user(self, username): return username != 'anonymous' def has_perm(self, username, perm, path=None): return True def get_perms(self, username): return 'lrdw' def get_home_dir(self, username): return os.sep def get_msg_login(self, username): return 'Welcome %s' % username def get_msg_quit(self, username): return 'Goodbye %s' % username def main(): ftp_handler = ftpserver.FTPHandler ftp_handler.authorizer = HttpAuthorizer() ftp_handler.abstracted_fs = HttpFS # ftp_handler.passive_ports = range(60000, 65535) # try: # ftp_handler.masquerade_address = gethostbyname(options.bind_address) # except gaierror, (_, errmsg): # sys.exit('Address error: %s' % errmsg) # # ftpd = ftpserver.FTPServer((options.bind_address, # options.port), # ftp_handler) address = (config.ip_address, 2121 ) ftpd = ftpserver.FTPServer(address, ftp_handler) ftpd.serve_forever() # # Instantiate a dummy authorizer for managing 'virtual' users # authorizer = ftpserver.DummyAuthorizer() # # # Define a new user having full r/w permissions and a read-only # # anonymous user # authorizer.add_user('user', '12345', '/home/aybars/completed', perm='elradfmw') # authorizer.add_anonymous('/home/aybars/completed') # # # Instantiate FTP handler class # ftp_handler = ftpserver.FTPHandler # ftp_handler.authorizer = authorizer # # # Define a customized banner (string returned when client connects) # ftp_handler.banner = "pyftpdlib %s based ftpd ready." %ftpserver.__ver__ # # # Specify a masquerade address and the range of ports to use for # # passive connections. Decomment in case you're behind a NAT. # #ftp_handler.masquerade_address = '151.25.42.11' # #ftp_handler.passive_ports = range(60000, 65535) # # # Instantiate FTP server class and listen to 0.0.0.0:21 # address = ('', 2121 ) # ftpd = ftpserver.FTPServer(address, ftp_handler) # # # set a limit for connections # ftpd.max_cons = 256 # ftpd.max_cons_per_ip = 5 # # # start ftp server # ftpd.serve_forever() if __name__ == '__main__': # api = putio.Api(config.apikey, config.apisecret) # # # getting your items # items = api.get_items(parent_id=11110932) # #yield items # # for it in items: # print "%s %s" % (it.id, it.name) main()
_getitem
identifier_name
putio-ftp-connector.py
#!/usr/bin/env python # $Id: basic_ftpd.py 569 2009-04-04 00:17:43Z billiejoex $ """A basic FTP server which uses a DummyAuthorizer for managing 'virtual users', setting a limit for incoming connections. """ import os #from pyftpdlib import ftpserver import urllib2 import base64 import putio from pathtoid import PathToId import pathtoid import config import time class HttpFD(object): def __init__(self, apifile, bucket, obj, mode): self.apifile = apifile self.download_url = apifile.get_stream_url() self.bucket = bucket self.name = obj self.mode = mode self.closed = False self.total_size = None self.seekpos = None self.read_size = 0 # speed... self.read_bytes = 128 * 1024 # 128kb per iteration self.buffer = '' self.req = None self.fd = None # gets total size req = urllib2.Request(self.download_url) f = urllib2.urlopen(req) self.total_size = f.headers.get('Content-Length') def write(self, data): raise OSError(1, 'Operation not permitted') # self.temp_file.write(data) def close(self): return def __read(self, size=65536): c = self.buffer self.buffer = '' return c def read(self, size=65536): if self.req == None: self.req = urllib2.Request(self.download_url) if self.seekpos: self.req.headers['Range'] = 'bytes=%s-' % (self.seekpos) if self.read_size > self.total_size: return self.read_size = self.read_size + self.read_bytes + 1 if not self.fd: self.fd = urllib2.urlopen(self.req) return self.fd.read(1024) def seek(self, frombytes, **kwargs): self.seekpos = frombytes return # .... idfinder = PathToId() idfinder.load_items() api = None class HttpFS(ftpserver.AbstractedFS): def __init__(self): self.root = None self.cwd = '/' self.rnfr = None self.dirlistcache = {} self.idfinder = idfinder def open(self, filename, mode): print "filename: ", filename if filename in self.dirlistcache: apifile = self.dirlistcache[filename] else: if filename == os.path.sep: # items = operations.api.get_items() # this is not a file its a directory # raise OSError(1, 'This is a directory') raise IOError(1, 'This is a directory') else: id = idfinder.find_item_by_path(filename) print "file id:", filename, id apifile = operations.api.get_items(id=id)[0] if apifile.is_dir: raise IOError(1, 'This is a directory') # return HttpFD(apifile, None, filename, mode) def chdir(self, path): self.cwd = path.decode('utf-8').encode('utf-8') def mkdir(self, path):
def listdir(self, path): ret = [] try: item = self._getitem(path) except: return [] if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return [] for i in items: ret.append(i.name) return ret def remove_from_cache(self, path): if path in self.dirlistcache: del self.dirlistcache[path] idfinder.invalidate_items_cache_by_path(path) def rmdir(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def remove(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') apifile.delete_item() self.remove_from_cache(path) def rename(self, src, dst): apifile = self._getitem(src) if not apifile: raise OSError(2, 'No such file or directory') srcs = os.path.split(src) dsts = os.path.split(dst) if srcs[0] != dsts[0]: # this is a move operation.. if dsts[0] == os.path.sep: apifile.move_item(target=0) return destination = self._getitem(dsts[0]); if not destination: raise OSError(2, 'No such file or directory') apifile.move_item(target=destination.id) return apifile.rename_item(dsts[1]) self.remove_from_cache(src) self.remove_from_cache(srcs[0]) self.remove_from_cache(dst) self.remove_from_cache(dsts[0]) def isfile(self, path): return not self.isdir(path) def islink(self, path): return False def isdir(self, path): if path == os.path.sep: return True apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') if apifile.is_dir: return True else: return False def getsize(self, path): apifile = self._getitem(path) if not apifile: raise OSError(1, 'No such file or directory') print "filesize :", apifile.size return long(apifile.size) #return self.stat(path).st_size def getmtime(self, path): return self.stat(path).st_mtime def realpath(self, path): return path def lexists(self, path): apifile = self._getitem(path) if not apifile: raise OSError(2, 'No such file or directory') return apifile def _getitem(self, filename): if filename in self.dirlistcache: apifile = self.dirlistcache[filename] print 'found........', apifile.id, apifile.name else: if filename == os.path.sep: # items = operations.api.get_items() return False else: id = idfinder.find_item_by_path(filename) print "file id:", id apifile = operations.api.get_items(id=id)[0] self.dirlistcache[filename] = apifile return apifile #.get_download_url() def stat(self, path): apifile = self._getitem(path) return os.stat_result((666, 0L, 0L, 0, 0, 0, apifile.size, 0, 0, 0)) exists = lexists lstat = stat def validpath(self, path): return True def format_list_items(self, items): for item in items: if item.is_dir: s = 'drwxrwxrwx 1 %s group %8s Jan 01 00:00 %s\r\n' % ('aaa', 0, item.name) else: s = '-rw-rw-rw- 1 %s group %8s %s %s\r\n' % ('aaa', item.size, time.strftime("%b %d %H:%M"), item.name) yield s.encode('utf-8') def get_list_dir(self, path): try: item = self._getitem(path) except: return self.format_list_items([]) if not item: items = operations.api.get_items() else: try: items = operations.api.get_items(parent_id=item.id) except: return self.format_list_items([]) return self.format_list_items(items) def format_mlsx(self, basedir, listing, perms, facts, ignore_err=True): # find item in cache... if basedir in self.dirlistcache: fnd = self.dirlistcache[basedir] try: items = operations.api.get_items(parent_id = fnd.id) except: items = [] else: if basedir == os.path.sep: items = operations.api.get_items() else: parent_id = self.idfinder.find_item_by_path(pathtoid._utf8(basedir)) print "parent_id:", parent_id items = operations.api.get_items(parent_id=parent_id) c = 0 s = '' for i in items: c = c + 1 type = 'type=file;' if 'type' in facts: if i.type == 'folder': type = 'type=dir;' if 'size' in facts: size = 'size=%s;' % i.size # file size ln = "%s%sperm=r;modify=20071029155301;unique=11150051; %s\r\n" % (type, size, i.name) if basedir== os.path.sep: key = '/%s' % (pathtoid._utf8(i.name)) else: key = '%s/%s' % (pathtoid._utf8(basedir), pathtoid._utf8(i.name)) self.dirlistcache[key] = i print 'key:', key yield ln.encode('utf-8') class HttpOperations(object): '''Storing connection object''' def __init__(self): self.connection = None self.username = None def authenticate(self, username, password): self.username = username self.password = password config.apisecret = password config.apikey = username self.api = putio.Api(config.apikey,config.apisecret) print "checking user & passwd" username = self.api.get_user_name() if not username: return False print "> welcome ", username return True def __repr__(self): return self.connection operations = HttpOperations() class HttpAuthorizer(ftpserver.DummyAuthorizer): '''FTP server authorizer. Logs the users into Putio Cloud Files and keeps track of them. ''' users = {} def validate_authentication(self, username, password): try: return operations.authenticate(username, password) except: return False def has_user(self, username): return username != 'anonymous' def has_perm(self, username, perm, path=None): return True def get_perms(self, username): return 'lrdw' def get_home_dir(self, username): return os.sep def get_msg_login(self, username): return 'Welcome %s' % username def get_msg_quit(self, username): return 'Goodbye %s' % username def main(): ftp_handler = ftpserver.FTPHandler ftp_handler.authorizer = HttpAuthorizer() ftp_handler.abstracted_fs = HttpFS # ftp_handler.passive_ports = range(60000, 65535) # try: # ftp_handler.masquerade_address = gethostbyname(options.bind_address) # except gaierror, (_, errmsg): # sys.exit('Address error: %s' % errmsg) # # ftpd = ftpserver.FTPServer((options.bind_address, # options.port), # ftp_handler) address = (config.ip_address, 2121 ) ftpd = ftpserver.FTPServer(address, ftp_handler) ftpd.serve_forever() # # Instantiate a dummy authorizer for managing 'virtual' users # authorizer = ftpserver.DummyAuthorizer() # # # Define a new user having full r/w permissions and a read-only # # anonymous user # authorizer.add_user('user', '12345', '/home/aybars/completed', perm='elradfmw') # authorizer.add_anonymous('/home/aybars/completed') # # # Instantiate FTP handler class # ftp_handler = ftpserver.FTPHandler # ftp_handler.authorizer = authorizer # # # Define a customized banner (string returned when client connects) # ftp_handler.banner = "pyftpdlib %s based ftpd ready." %ftpserver.__ver__ # # # Specify a masquerade address and the range of ports to use for # # passive connections. Decomment in case you're behind a NAT. # #ftp_handler.masquerade_address = '151.25.42.11' # #ftp_handler.passive_ports = range(60000, 65535) # # # Instantiate FTP server class and listen to 0.0.0.0:21 # address = ('', 2121 ) # ftpd = ftpserver.FTPServer(address, ftp_handler) # # # set a limit for connections # ftpd.max_cons = 256 # ftpd.max_cons_per_ip = 5 # # # start ftp server # ftpd.serve_forever() if __name__ == '__main__': # api = putio.Api(config.apikey, config.apisecret) # # # getting your items # items = api.get_items(parent_id=11110932) # #yield items # # for it in items: # print "%s %s" % (it.id, it.name) main()
dirs = os.path.split(path) apifile = self._getitem(dirs[0]) if not apifile: #this is root operations.api.create_folder(name = dirs[1], parent_id = 0) else: apifile.create_folder(name=dirs[1]) self.remove_from_cache(path) self.remove_from_cache(dirs[0])
identifier_body
glfw.go
// Copyright 2016 The G3N Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !wasm // +build !wasm package window import ( "bytes" "fmt" "image" _ "image/png" "os" "runtime" "github.com/g3n/engine/core" "github.com/g3n/engine/gls" "github.com/g3n/engine/gui/assets" "github.com/go-gl/glfw/v3.3/glfw" ) // Keycodes const ( KeyUnknown = Key(glfw.KeyUnknown) KeySpace = Key(glfw.KeySpace) KeyApostrophe = Key(glfw.KeyApostrophe) KeyComma = Key(glfw.KeyComma) KeyMinus = Key(glfw.KeyMinus) KeyPeriod = Key(glfw.KeyPeriod) KeySlash = Key(glfw.KeySlash) Key0 = Key(glfw.Key0) Key1 = Key(glfw.Key1) Key2 = Key(glfw.Key2) Key3 = Key(glfw.Key3) Key4 = Key(glfw.Key4) Key5 = Key(glfw.Key5) Key6 = Key(glfw.Key6) Key7 = Key(glfw.Key7) Key8 = Key(glfw.Key8) Key9 = Key(glfw.Key9) KeySemicolon = Key(glfw.KeySemicolon) KeyEqual = Key(glfw.KeyEqual) KeyA = Key(glfw.KeyA) KeyB = Key(glfw.KeyB) KeyC = Key(glfw.KeyC) KeyD = Key(glfw.KeyD) KeyE = Key(glfw.KeyE) KeyF = Key(glfw.KeyF) KeyG = Key(glfw.KeyG) KeyH = Key(glfw.KeyH) KeyI = Key(glfw.KeyI) KeyJ = Key(glfw.KeyJ) KeyK = Key(glfw.KeyK) KeyL = Key(glfw.KeyL) KeyM = Key(glfw.KeyM) KeyN = Key(glfw.KeyN) KeyO = Key(glfw.KeyO) KeyP = Key(glfw.KeyP) KeyQ = Key(glfw.KeyQ) KeyR = Key(glfw.KeyR) KeyS = Key(glfw.KeyS) KeyT = Key(glfw.KeyT) KeyU = Key(glfw.KeyU) KeyV = Key(glfw.KeyV) KeyW = Key(glfw.KeyW) KeyX = Key(glfw.KeyX) KeyY = Key(glfw.KeyY) KeyZ = Key(glfw.KeyZ) KeyLeftBracket = Key(glfw.KeyLeftBracket) KeyBackslash = Key(glfw.KeyBackslash) KeyRightBracket = Key(glfw.KeyRightBracket) KeyGraveAccent = Key(glfw.KeyGraveAccent) KeyWorld1 = Key(glfw.KeyWorld1) KeyWorld2 = Key(glfw.KeyWorld2) KeyEscape = Key(glfw.KeyEscape) KeyEnter = Key(glfw.KeyEnter) KeyTab = Key(glfw.KeyTab) KeyBackspace = Key(glfw.KeyBackspace) KeyInsert = Key(glfw.KeyInsert) KeyDelete = Key(glfw.KeyDelete) KeyRight = Key(glfw.KeyRight) KeyLeft = Key(glfw.KeyLeft) KeyDown = Key(glfw.KeyDown) KeyUp = Key(glfw.KeyUp) KeyPageUp = Key(glfw.KeyPageUp) KeyPageDown = Key(glfw.KeyPageDown) KeyHome = Key(glfw.KeyHome) KeyEnd = Key(glfw.KeyEnd) KeyCapsLock = Key(glfw.KeyCapsLock) KeyScrollLock = Key(glfw.KeyScrollLock) KeyNumLock = Key(glfw.KeyNumLock) KeyPrintScreen = Key(glfw.KeyPrintScreen) KeyPause = Key(glfw.KeyPause) KeyF1 = Key(glfw.KeyF1) KeyF2 = Key(glfw.KeyF2) KeyF3 = Key(glfw.KeyF3) KeyF4 = Key(glfw.KeyF4) KeyF5 = Key(glfw.KeyF5) KeyF6 = Key(glfw.KeyF6) KeyF7 = Key(glfw.KeyF7) KeyF8 = Key(glfw.KeyF8) KeyF9 = Key(glfw.KeyF9) KeyF10 = Key(glfw.KeyF10) KeyF11 = Key(glfw.KeyF11) KeyF12 = Key(glfw.KeyF12) KeyF13 = Key(glfw.KeyF13) KeyF14 = Key(glfw.KeyF14) KeyF15 = Key(glfw.KeyF15) KeyF16 = Key(glfw.KeyF16) KeyF17 = Key(glfw.KeyF17) KeyF18 = Key(glfw.KeyF18) KeyF19 = Key(glfw.KeyF19) KeyF20 = Key(glfw.KeyF20) KeyF21 = Key(glfw.KeyF21) KeyF22 = Key(glfw.KeyF22) KeyF23 = Key(glfw.KeyF23) KeyF24 = Key(glfw.KeyF24) KeyF25 = Key(glfw.KeyF25) KeyKP0 = Key(glfw.KeyKP0) KeyKP1 = Key(glfw.KeyKP1) KeyKP2 = Key(glfw.KeyKP2) KeyKP3 = Key(glfw.KeyKP3) KeyKP4 = Key(glfw.KeyKP4) KeyKP5 = Key(glfw.KeyKP5) KeyKP6 = Key(glfw.KeyKP6) KeyKP7 = Key(glfw.KeyKP7) KeyKP8 = Key(glfw.KeyKP8) KeyKP9 = Key(glfw.KeyKP9) KeyKPDecimal = Key(glfw.KeyKPDecimal) KeyKPDivide = Key(glfw.KeyKPDivide) KeyKPMultiply = Key(glfw.KeyKPMultiply) KeyKPSubtract = Key(glfw.KeyKPSubtract) KeyKPAdd = Key(glfw.KeyKPAdd) KeyKPEnter = Key(glfw.KeyKPEnter) KeyKPEqual = Key(glfw.KeyKPEqual) KeyLeftShift = Key(glfw.KeyLeftShift) KeyLeftControl = Key(glfw.KeyLeftControl) KeyLeftAlt = Key(glfw.KeyLeftAlt) KeyLeftSuper = Key(glfw.KeyLeftSuper) KeyRightShift = Key(glfw.KeyRightShift) KeyRightControl = Key(glfw.KeyRightControl) KeyRightAlt = Key(glfw.KeyRightAlt) KeyRightSuper = Key(glfw.KeyRightSuper) KeyMenu = Key(glfw.KeyMenu) KeyLast = Key(glfw.KeyLast) ) // Modifier keys const ( ModShift = ModifierKey(glfw.ModShift) ModControl = ModifierKey(glfw.ModControl) ModAlt = ModifierKey(glfw.ModAlt) ModSuper = ModifierKey(glfw.ModSuper) ) // Mouse buttons const ( MouseButton1 = MouseButton(glfw.MouseButton1) MouseButton2 = MouseButton(glfw.MouseButton2) MouseButton3 = MouseButton(glfw.MouseButton3) MouseButton4 = MouseButton(glfw.MouseButton4) MouseButton5 = MouseButton(glfw.MouseButton5) MouseButton6 = MouseButton(glfw.MouseButton6) MouseButton7 = MouseButton(glfw.MouseButton7) MouseButton8 = MouseButton(glfw.MouseButton8) MouseButtonLast = MouseButton(glfw.MouseButtonLast) MouseButtonLeft = MouseButton(glfw.MouseButtonLeft) MouseButtonRight = MouseButton(glfw.MouseButtonRight) MouseButtonMiddle = MouseButton(glfw.MouseButtonMiddle) ) // Input modes const ( CursorInputMode = InputMode(glfw.CursorMode) // See Cursor mode values StickyKeysInputMode = InputMode(glfw.StickyKeysMode) // Value can be either 1 or 0 StickyMouseButtonsInputMode = InputMode(glfw.StickyMouseButtonsMode) // Value can be either 1 or 0 ) // Cursor mode values const ( CursorNormal = CursorMode(glfw.CursorNormal) CursorHidden = CursorMode(glfw.CursorHidden) CursorDisabled = CursorMode(glfw.CursorDisabled) ) // GlfwWindow describes one glfw window type GlfwWindow struct { *glfw.Window // Embedded GLFW window core.Dispatcher // Embedded event dispatcher gls *gls.GLS // Associated OpenGL State fullscreen bool lastX int lastY int lastWidth int lastHeight int scaleX float64 scaleY float64 // Events keyEv KeyEvent charEv CharEvent mouseEv MouseEvent posEv PosEvent sizeEv SizeEvent cursorEv CursorEvent scrollEv ScrollEvent focusEv FocusEvent mods ModifierKey // Current modifier keys // Cursors cursors map[Cursor]*glfw.Cursor lastCursorKey Cursor } // Init initializes the GlfwWindow singleton with the specified width, height, and title. func Init(width, height int, title string) error { // Panic if already created if win != nil { panic(fmt.Errorf("can only call window.Init() once")) } // OpenGL functions must be executed in the same thread where // the context was created (by wmgr.CreateWindow()) runtime.LockOSThread() // Create wrapper window with dispatcher w := new(GlfwWindow) w.Dispatcher.Initialize() var err error // Initialize GLFW err = glfw.Init() if err != nil { return err } // Set window hints glfw.WindowHint(glfw.ContextVersionMajor, 3) glfw.WindowHint(glfw.ContextVersionMinor, 3) glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile) glfw.WindowHint(glfw.Samples, 8) // Set OpenGL forward compatible context only for OSX because it is required for OSX. // When this is set, glLineWidth(width) only accepts width=1.0 and generates an error // for any other values although the spec says it should ignore unsupported widths // and generate an error only when width <= 0. if runtime.GOOS == "darwin" { glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True) } // Create window and set it as the current context. // The window is created always as not full screen because if it is // created as full screen it not possible to revert it to windowed mode. // At the end of this function, the window will be set to full screen if requested. w.Window, err = glfw.CreateWindow(width, height, title, nil, nil) if err != nil { return err } w.MakeContextCurrent() // Create OpenGL state w.gls, err = gls.New() if err != nil { return err } // Compute and store scale fbw, fbh := w.GetFramebufferSize() w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) // Create map for cursors w.cursors = make(map[Cursor]*glfw.Cursor) w.lastCursorKey = CursorLast // Preallocate GLFW standard cursors w.cursors[ArrowCursor] = glfw.CreateStandardCursor(glfw.ArrowCursor) w.cursors[IBeamCursor] = glfw.CreateStandardCursor(glfw.IBeamCursor) w.cursors[CrosshairCursor] = glfw.CreateStandardCursor(glfw.CrosshairCursor) w.cursors[HandCursor] = glfw.CreateStandardCursor(glfw.HandCursor) w.cursors[HResizeCursor] = glfw.CreateStandardCursor(glfw.HResizeCursor) w.cursors[VResizeCursor] = glfw.CreateStandardCursor(glfw.VResizeCursor) // Preallocate extra G3N standard cursors (diagonal resize cursors) cursorDiag1Png := assets.MustAsset("cursors/diag1.png") // [/] cursorDiag2Png := assets.MustAsset("cursors/diag2.png") // [\] diag1Img, _, err := image.Decode(bytes.NewReader(cursorDiag1Png)) diag2Img, _, err := image.Decode(bytes.NewReader(cursorDiag2Png)) if err != nil { return err } w.cursors[DiagResize1Cursor] = glfw.CreateCursor(diag1Img, 8, 8) // [/] w.cursors[DiagResize2Cursor] = glfw.CreateCursor(diag2Img, 8, 8) // [\] // Set up key callback to dispatch event w.SetKeyCallback(func(x *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { w.keyEv.Key = Key(key) w.keyEv.Mods = ModifierKey(mods) w.mods = w.keyEv.Mods if action == glfw.Press { w.Dispatch(OnKeyDown, &w.keyEv) } else if action == glfw.Release { w.Dispatch(OnKeyUp, &w.keyEv) } else if action == glfw.Repeat { w.Dispatch(OnKeyRepeat, &w.keyEv) } }) // Set up char callback to dispatch event w.SetCharModsCallback(func(x *glfw.Window, char rune, mods glfw.ModifierKey) { w.charEv.Char = char w.charEv.Mods = ModifierKey(mods) w.Dispatch(OnChar, &w.charEv) }) // Set up mouse button callback to dispatch event w.SetMouseButtonCallback(func(x *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) { xpos, ypos := x.GetCursorPos() w.mouseEv.Button = MouseButton(button) w.mouseEv.Mods = ModifierKey(mods) w.mouseEv.Xpos = float32(xpos) //* float32(w.scaleX) TODO w.mouseEv.Ypos = float32(ypos) //* float32(w.scaleY) if action == glfw.Press { w.Dispatch(OnMouseDown, &w.mouseEv) } else if action == glfw.Release { w.Dispatch(OnMouseUp, &w.mouseEv) } }) // Set up window size callback to dispatch event w.SetSizeCallback(func(x *glfw.Window, width int, height int) { fbw, fbh := x.GetFramebufferSize() w.sizeEv.Width = width w.sizeEv.Height = height w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) w.Dispatch(OnWindowSize, &w.sizeEv) }) // Set up window position callback to dispatch event w.SetPosCallback(func(x *glfw.Window, xpos int, ypos int) { w.posEv.Xpos = xpos w.posEv.Ypos = ypos w.Dispatch(OnWindowPos, &w.posEv) }) // Set up window focus callback to dispatch event w.SetFocusCallback(func(x *glfw.Window, focused bool) { w.focusEv.Focused = focused w.Dispatch(OnWindowFocus, &w.focusEv) }) // Set up window cursor position callback to dispatch event w.SetCursorPosCallback(func(x *glfw.Window, xpos float64, ypos float64) { w.cursorEv.Xpos = float32(xpos) w.cursorEv.Ypos = float32(ypos) w.cursorEv.Mods = w.mods w.Dispatch(OnCursor, &w.cursorEv) }) // Set up mouse wheel scroll callback to dispatch event w.SetScrollCallback(func(x *glfw.Window, xoff float64, yoff float64) { w.scrollEv.Xoffset = float32(xoff) w.scrollEv.Yoffset = float32(yoff) w.scrollEv.Mods = w.mods w.Dispatch(OnScroll, &w.scrollEv) }) win = w // Set singleton return nil } // Gls returns the associated OpenGL state. func (w *GlfwWindow) Gls() *gls.GLS { return w.gls } // FullScreen returns whether this windows is currently fullscreen. func (w *GlfwWindow) FullScreen() bool { return w.fullscreen } // SetFullScreen sets this window as fullscreen on the primary monitor // TODO allow for fullscreen with resolutions different than the monitor's func (w *GlfwWindow) SetFullScreen(full bool)
// Destroy destroys this window and its context func (w *GlfwWindow) Destroy() { w.Window.Destroy() glfw.Terminate() runtime.UnlockOSThread() // Important when using the execution tracer } // Scale returns this window's DPI scale factor (FramebufferSize / Size) func (w *GlfwWindow) GetScale() (x float64, y float64) { return w.scaleX, w.scaleY } // ScreenResolution returns the screen resolution func (w *GlfwWindow) ScreenResolution(p interface{}) (width, height int) { mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() return vmode.Width, vmode.Height } // PollEvents process events in the event queue func (w *GlfwWindow) PollEvents() { glfw.PollEvents() } // SetSwapInterval sets the number of screen updates to wait from the time SwapBuffer() // is called before swapping the buffers and returning. func (w *GlfwWindow) SetSwapInterval(interval int) { glfw.SwapInterval(interval) } // SetCursor sets the window's cursor. func (w *GlfwWindow) SetCursor(cursor Cursor) { cur, ok := w.cursors[cursor] if !ok { panic("Invalid cursor") } w.Window.SetCursor(cur) } // CreateCursor creates a new custom cursor and returns an int handle. func (w *GlfwWindow) CreateCursor(imgFile string, xhot, yhot int) (Cursor, error) { // Open image file file, err := os.Open(imgFile) if err != nil { return 0, err } defer file.Close() // Decode image img, _, err := image.Decode(file) if err != nil { return 0, err } // Create and store cursor w.lastCursorKey += 1 w.cursors[Cursor(w.lastCursorKey)] = glfw.CreateCursor(img, xhot, yhot) return w.lastCursorKey, nil } // DisposeCursor deletes the existing custom cursor with the provided int handle. func (w *GlfwWindow) DisposeCursor(cursor Cursor) { if cursor <= CursorLast { panic("Can't dispose standard cursor") } w.cursors[cursor].Destroy() delete(w.cursors, cursor) } // DisposeAllCursors deletes all existing custom cursors. func (w *GlfwWindow) DisposeAllCustomCursors() { // Destroy and delete all custom cursors for key := range w.cursors { if key > CursorLast { w.cursors[key].Destroy() delete(w.cursors, key) } } // Set the next cursor key as the last standard cursor key + 1 w.lastCursorKey = CursorLast } // Center centers the window on the screen. //func (w *GlfwWindow) Center() { // // // TODO //}
{ // If already in the desired state, nothing to do if w.fullscreen == full { return } // Set window fullscreen on the primary monitor if full { // Save current position and size of the window w.lastX, w.lastY = w.GetPos() w.lastWidth, w.lastHeight = w.GetSize() // Get size of primary monitor mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() width := vmode.Width height := vmode.Height // Set as fullscreen on the primary monitor w.SetMonitor(mon, 0, 0, width, height, vmode.RefreshRate) w.fullscreen = true } else { // Restore window to previous position and size w.SetMonitor(nil, w.lastX, w.lastY, w.lastWidth, w.lastHeight, glfw.DontCare) w.fullscreen = false } }
identifier_body
glfw.go
// Copyright 2016 The G3N Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !wasm // +build !wasm package window import ( "bytes" "fmt" "image" _ "image/png" "os" "runtime" "github.com/g3n/engine/core" "github.com/g3n/engine/gls" "github.com/g3n/engine/gui/assets" "github.com/go-gl/glfw/v3.3/glfw" ) // Keycodes const ( KeyUnknown = Key(glfw.KeyUnknown) KeySpace = Key(glfw.KeySpace) KeyApostrophe = Key(glfw.KeyApostrophe) KeyComma = Key(glfw.KeyComma) KeyMinus = Key(glfw.KeyMinus) KeyPeriod = Key(glfw.KeyPeriod) KeySlash = Key(glfw.KeySlash) Key0 = Key(glfw.Key0) Key1 = Key(glfw.Key1) Key2 = Key(glfw.Key2) Key3 = Key(glfw.Key3) Key4 = Key(glfw.Key4) Key5 = Key(glfw.Key5) Key6 = Key(glfw.Key6) Key7 = Key(glfw.Key7) Key8 = Key(glfw.Key8) Key9 = Key(glfw.Key9) KeySemicolon = Key(glfw.KeySemicolon) KeyEqual = Key(glfw.KeyEqual) KeyA = Key(glfw.KeyA) KeyB = Key(glfw.KeyB) KeyC = Key(glfw.KeyC) KeyD = Key(glfw.KeyD) KeyE = Key(glfw.KeyE) KeyF = Key(glfw.KeyF) KeyG = Key(glfw.KeyG) KeyH = Key(glfw.KeyH) KeyI = Key(glfw.KeyI) KeyJ = Key(glfw.KeyJ) KeyK = Key(glfw.KeyK) KeyL = Key(glfw.KeyL) KeyM = Key(glfw.KeyM) KeyN = Key(glfw.KeyN) KeyO = Key(glfw.KeyO) KeyP = Key(glfw.KeyP) KeyQ = Key(glfw.KeyQ) KeyR = Key(glfw.KeyR) KeyS = Key(glfw.KeyS) KeyT = Key(glfw.KeyT) KeyU = Key(glfw.KeyU) KeyV = Key(glfw.KeyV) KeyW = Key(glfw.KeyW) KeyX = Key(glfw.KeyX) KeyY = Key(glfw.KeyY) KeyZ = Key(glfw.KeyZ) KeyLeftBracket = Key(glfw.KeyLeftBracket) KeyBackslash = Key(glfw.KeyBackslash) KeyRightBracket = Key(glfw.KeyRightBracket) KeyGraveAccent = Key(glfw.KeyGraveAccent) KeyWorld1 = Key(glfw.KeyWorld1) KeyWorld2 = Key(glfw.KeyWorld2) KeyEscape = Key(glfw.KeyEscape) KeyEnter = Key(glfw.KeyEnter) KeyTab = Key(glfw.KeyTab) KeyBackspace = Key(glfw.KeyBackspace) KeyInsert = Key(glfw.KeyInsert) KeyDelete = Key(glfw.KeyDelete) KeyRight = Key(glfw.KeyRight) KeyLeft = Key(glfw.KeyLeft) KeyDown = Key(glfw.KeyDown) KeyUp = Key(glfw.KeyUp) KeyPageUp = Key(glfw.KeyPageUp) KeyPageDown = Key(glfw.KeyPageDown) KeyHome = Key(glfw.KeyHome) KeyEnd = Key(glfw.KeyEnd) KeyCapsLock = Key(glfw.KeyCapsLock) KeyScrollLock = Key(glfw.KeyScrollLock) KeyNumLock = Key(glfw.KeyNumLock) KeyPrintScreen = Key(glfw.KeyPrintScreen) KeyPause = Key(glfw.KeyPause) KeyF1 = Key(glfw.KeyF1) KeyF2 = Key(glfw.KeyF2) KeyF3 = Key(glfw.KeyF3) KeyF4 = Key(glfw.KeyF4) KeyF5 = Key(glfw.KeyF5) KeyF6 = Key(glfw.KeyF6) KeyF7 = Key(glfw.KeyF7) KeyF8 = Key(glfw.KeyF8) KeyF9 = Key(glfw.KeyF9) KeyF10 = Key(glfw.KeyF10) KeyF11 = Key(glfw.KeyF11) KeyF12 = Key(glfw.KeyF12) KeyF13 = Key(glfw.KeyF13) KeyF14 = Key(glfw.KeyF14) KeyF15 = Key(glfw.KeyF15) KeyF16 = Key(glfw.KeyF16) KeyF17 = Key(glfw.KeyF17) KeyF18 = Key(glfw.KeyF18) KeyF19 = Key(glfw.KeyF19) KeyF20 = Key(glfw.KeyF20) KeyF21 = Key(glfw.KeyF21) KeyF22 = Key(glfw.KeyF22) KeyF23 = Key(glfw.KeyF23) KeyF24 = Key(glfw.KeyF24) KeyF25 = Key(glfw.KeyF25) KeyKP0 = Key(glfw.KeyKP0) KeyKP1 = Key(glfw.KeyKP1) KeyKP2 = Key(glfw.KeyKP2) KeyKP3 = Key(glfw.KeyKP3) KeyKP4 = Key(glfw.KeyKP4) KeyKP5 = Key(glfw.KeyKP5) KeyKP6 = Key(glfw.KeyKP6) KeyKP7 = Key(glfw.KeyKP7) KeyKP8 = Key(glfw.KeyKP8) KeyKP9 = Key(glfw.KeyKP9) KeyKPDecimal = Key(glfw.KeyKPDecimal) KeyKPDivide = Key(glfw.KeyKPDivide) KeyKPMultiply = Key(glfw.KeyKPMultiply) KeyKPSubtract = Key(glfw.KeyKPSubtract) KeyKPAdd = Key(glfw.KeyKPAdd) KeyKPEnter = Key(glfw.KeyKPEnter) KeyKPEqual = Key(glfw.KeyKPEqual) KeyLeftShift = Key(glfw.KeyLeftShift) KeyLeftControl = Key(glfw.KeyLeftControl) KeyLeftAlt = Key(glfw.KeyLeftAlt) KeyLeftSuper = Key(glfw.KeyLeftSuper) KeyRightShift = Key(glfw.KeyRightShift) KeyRightControl = Key(glfw.KeyRightControl) KeyRightAlt = Key(glfw.KeyRightAlt) KeyRightSuper = Key(glfw.KeyRightSuper) KeyMenu = Key(glfw.KeyMenu) KeyLast = Key(glfw.KeyLast) ) // Modifier keys const ( ModShift = ModifierKey(glfw.ModShift) ModControl = ModifierKey(glfw.ModControl) ModAlt = ModifierKey(glfw.ModAlt) ModSuper = ModifierKey(glfw.ModSuper) ) // Mouse buttons const ( MouseButton1 = MouseButton(glfw.MouseButton1) MouseButton2 = MouseButton(glfw.MouseButton2) MouseButton3 = MouseButton(glfw.MouseButton3) MouseButton4 = MouseButton(glfw.MouseButton4) MouseButton5 = MouseButton(glfw.MouseButton5) MouseButton6 = MouseButton(glfw.MouseButton6) MouseButton7 = MouseButton(glfw.MouseButton7) MouseButton8 = MouseButton(glfw.MouseButton8) MouseButtonLast = MouseButton(glfw.MouseButtonLast) MouseButtonLeft = MouseButton(glfw.MouseButtonLeft) MouseButtonRight = MouseButton(glfw.MouseButtonRight) MouseButtonMiddle = MouseButton(glfw.MouseButtonMiddle) ) // Input modes const ( CursorInputMode = InputMode(glfw.CursorMode) // See Cursor mode values StickyKeysInputMode = InputMode(glfw.StickyKeysMode) // Value can be either 1 or 0 StickyMouseButtonsInputMode = InputMode(glfw.StickyMouseButtonsMode) // Value can be either 1 or 0 ) // Cursor mode values const ( CursorNormal = CursorMode(glfw.CursorNormal) CursorHidden = CursorMode(glfw.CursorHidden) CursorDisabled = CursorMode(glfw.CursorDisabled) ) // GlfwWindow describes one glfw window type GlfwWindow struct { *glfw.Window // Embedded GLFW window core.Dispatcher // Embedded event dispatcher gls *gls.GLS // Associated OpenGL State fullscreen bool lastX int lastY int lastWidth int lastHeight int scaleX float64 scaleY float64 // Events keyEv KeyEvent charEv CharEvent mouseEv MouseEvent posEv PosEvent sizeEv SizeEvent cursorEv CursorEvent scrollEv ScrollEvent focusEv FocusEvent mods ModifierKey // Current modifier keys // Cursors cursors map[Cursor]*glfw.Cursor lastCursorKey Cursor } // Init initializes the GlfwWindow singleton with the specified width, height, and title. func Init(width, height int, title string) error { // Panic if already created if win != nil { panic(fmt.Errorf("can only call window.Init() once")) } // OpenGL functions must be executed in the same thread where // the context was created (by wmgr.CreateWindow()) runtime.LockOSThread() // Create wrapper window with dispatcher w := new(GlfwWindow) w.Dispatcher.Initialize() var err error // Initialize GLFW err = glfw.Init() if err != nil { return err } // Set window hints glfw.WindowHint(glfw.ContextVersionMajor, 3) glfw.WindowHint(glfw.ContextVersionMinor, 3) glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile) glfw.WindowHint(glfw.Samples, 8) // Set OpenGL forward compatible context only for OSX because it is required for OSX. // When this is set, glLineWidth(width) only accepts width=1.0 and generates an error // for any other values although the spec says it should ignore unsupported widths // and generate an error only when width <= 0. if runtime.GOOS == "darwin" { glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True) } // Create window and set it as the current context. // The window is created always as not full screen because if it is // created as full screen it not possible to revert it to windowed mode. // At the end of this function, the window will be set to full screen if requested. w.Window, err = glfw.CreateWindow(width, height, title, nil, nil) if err != nil { return err } w.MakeContextCurrent() // Create OpenGL state w.gls, err = gls.New() if err != nil { return err } // Compute and store scale fbw, fbh := w.GetFramebufferSize() w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) // Create map for cursors w.cursors = make(map[Cursor]*glfw.Cursor) w.lastCursorKey = CursorLast // Preallocate GLFW standard cursors w.cursors[ArrowCursor] = glfw.CreateStandardCursor(glfw.ArrowCursor) w.cursors[IBeamCursor] = glfw.CreateStandardCursor(glfw.IBeamCursor) w.cursors[CrosshairCursor] = glfw.CreateStandardCursor(glfw.CrosshairCursor) w.cursors[HandCursor] = glfw.CreateStandardCursor(glfw.HandCursor) w.cursors[HResizeCursor] = glfw.CreateStandardCursor(glfw.HResizeCursor) w.cursors[VResizeCursor] = glfw.CreateStandardCursor(glfw.VResizeCursor) // Preallocate extra G3N standard cursors (diagonal resize cursors) cursorDiag1Png := assets.MustAsset("cursors/diag1.png") // [/] cursorDiag2Png := assets.MustAsset("cursors/diag2.png") // [\] diag1Img, _, err := image.Decode(bytes.NewReader(cursorDiag1Png)) diag2Img, _, err := image.Decode(bytes.NewReader(cursorDiag2Png)) if err != nil { return err } w.cursors[DiagResize1Cursor] = glfw.CreateCursor(diag1Img, 8, 8) // [/] w.cursors[DiagResize2Cursor] = glfw.CreateCursor(diag2Img, 8, 8) // [\] // Set up key callback to dispatch event w.SetKeyCallback(func(x *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { w.keyEv.Key = Key(key) w.keyEv.Mods = ModifierKey(mods) w.mods = w.keyEv.Mods if action == glfw.Press { w.Dispatch(OnKeyDown, &w.keyEv) } else if action == glfw.Release { w.Dispatch(OnKeyUp, &w.keyEv) } else if action == glfw.Repeat { w.Dispatch(OnKeyRepeat, &w.keyEv) } }) // Set up char callback to dispatch event w.SetCharModsCallback(func(x *glfw.Window, char rune, mods glfw.ModifierKey) { w.charEv.Char = char w.charEv.Mods = ModifierKey(mods) w.Dispatch(OnChar, &w.charEv) }) // Set up mouse button callback to dispatch event w.SetMouseButtonCallback(func(x *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) { xpos, ypos := x.GetCursorPos() w.mouseEv.Button = MouseButton(button) w.mouseEv.Mods = ModifierKey(mods) w.mouseEv.Xpos = float32(xpos) //* float32(w.scaleX) TODO w.mouseEv.Ypos = float32(ypos) //* float32(w.scaleY) if action == glfw.Press { w.Dispatch(OnMouseDown, &w.mouseEv) } else if action == glfw.Release { w.Dispatch(OnMouseUp, &w.mouseEv) } }) // Set up window size callback to dispatch event w.SetSizeCallback(func(x *glfw.Window, width int, height int) { fbw, fbh := x.GetFramebufferSize() w.sizeEv.Width = width w.sizeEv.Height = height w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) w.Dispatch(OnWindowSize, &w.sizeEv) }) // Set up window position callback to dispatch event w.SetPosCallback(func(x *glfw.Window, xpos int, ypos int) { w.posEv.Xpos = xpos w.posEv.Ypos = ypos w.Dispatch(OnWindowPos, &w.posEv) }) // Set up window focus callback to dispatch event w.SetFocusCallback(func(x *glfw.Window, focused bool) { w.focusEv.Focused = focused w.Dispatch(OnWindowFocus, &w.focusEv) }) // Set up window cursor position callback to dispatch event w.SetCursorPosCallback(func(x *glfw.Window, xpos float64, ypos float64) { w.cursorEv.Xpos = float32(xpos) w.cursorEv.Ypos = float32(ypos) w.cursorEv.Mods = w.mods w.Dispatch(OnCursor, &w.cursorEv) }) // Set up mouse wheel scroll callback to dispatch event w.SetScrollCallback(func(x *glfw.Window, xoff float64, yoff float64) { w.scrollEv.Xoffset = float32(xoff) w.scrollEv.Yoffset = float32(yoff) w.scrollEv.Mods = w.mods w.Dispatch(OnScroll, &w.scrollEv) }) win = w // Set singleton return nil } // Gls returns the associated OpenGL state. func (w *GlfwWindow) Gls() *gls.GLS { return w.gls } // FullScreen returns whether this windows is currently fullscreen. func (w *GlfwWindow) FullScreen() bool { return w.fullscreen } // SetFullScreen sets this window as fullscreen on the primary monitor // TODO allow for fullscreen with resolutions different than the monitor's func (w *GlfwWindow) SetFullScreen(full bool) { // If already in the desired state, nothing to do if w.fullscreen == full { return } // Set window fullscreen on the primary monitor if full { // Save current position and size of the window w.lastX, w.lastY = w.GetPos() w.lastWidth, w.lastHeight = w.GetSize() // Get size of primary monitor mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() width := vmode.Width height := vmode.Height // Set as fullscreen on the primary monitor w.SetMonitor(mon, 0, 0, width, height, vmode.RefreshRate) w.fullscreen = true } else { // Restore window to previous position and size w.SetMonitor(nil, w.lastX, w.lastY, w.lastWidth, w.lastHeight, glfw.DontCare) w.fullscreen = false } } // Destroy destroys this window and its context func (w *GlfwWindow) Destroy() { w.Window.Destroy() glfw.Terminate() runtime.UnlockOSThread() // Important when using the execution tracer } // Scale returns this window's DPI scale factor (FramebufferSize / Size) func (w *GlfwWindow) GetScale() (x float64, y float64) { return w.scaleX, w.scaleY } // ScreenResolution returns the screen resolution func (w *GlfwWindow) ScreenResolution(p interface{}) (width, height int) { mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() return vmode.Width, vmode.Height } // PollEvents process events in the event queue func (w *GlfwWindow) PollEvents() { glfw.PollEvents() } // SetSwapInterval sets the number of screen updates to wait from the time SwapBuffer() // is called before swapping the buffers and returning. func (w *GlfwWindow) SetSwapInterval(interval int) { glfw.SwapInterval(interval) } // SetCursor sets the window's cursor. func (w *GlfwWindow) SetCursor(cursor Cursor) { cur, ok := w.cursors[cursor] if !ok { panic("Invalid cursor") } w.Window.SetCursor(cur) } // CreateCursor creates a new custom cursor and returns an int handle. func (w *GlfwWindow) CreateCursor(imgFile string, xhot, yhot int) (Cursor, error) { // Open image file file, err := os.Open(imgFile) if err != nil { return 0, err } defer file.Close() // Decode image img, _, err := image.Decode(file) if err != nil { return 0, err } // Create and store cursor w.lastCursorKey += 1 w.cursors[Cursor(w.lastCursorKey)] = glfw.CreateCursor(img, xhot, yhot) return w.lastCursorKey, nil } // DisposeCursor deletes the existing custom cursor with the provided int handle. func (w *GlfwWindow) DisposeCursor(cursor Cursor) { if cursor <= CursorLast { panic("Can't dispose standard cursor") } w.cursors[cursor].Destroy() delete(w.cursors, cursor) } // DisposeAllCursors deletes all existing custom cursors. func (w *GlfwWindow) DisposeAllCustomCursors() { // Destroy and delete all custom cursors for key := range w.cursors { if key > CursorLast
} // Set the next cursor key as the last standard cursor key + 1 w.lastCursorKey = CursorLast } // Center centers the window on the screen. //func (w *GlfwWindow) Center() { // // // TODO //}
{ w.cursors[key].Destroy() delete(w.cursors, key) }
conditional_block
glfw.go
// Copyright 2016 The G3N Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !wasm // +build !wasm package window import ( "bytes" "fmt" "image" _ "image/png" "os" "runtime" "github.com/g3n/engine/core" "github.com/g3n/engine/gls" "github.com/g3n/engine/gui/assets" "github.com/go-gl/glfw/v3.3/glfw" ) // Keycodes const ( KeyUnknown = Key(glfw.KeyUnknown) KeySpace = Key(glfw.KeySpace) KeyApostrophe = Key(glfw.KeyApostrophe) KeyComma = Key(glfw.KeyComma) KeyMinus = Key(glfw.KeyMinus) KeyPeriod = Key(glfw.KeyPeriod) KeySlash = Key(glfw.KeySlash) Key0 = Key(glfw.Key0) Key1 = Key(glfw.Key1) Key2 = Key(glfw.Key2) Key3 = Key(glfw.Key3) Key4 = Key(glfw.Key4) Key5 = Key(glfw.Key5) Key6 = Key(glfw.Key6) Key7 = Key(glfw.Key7) Key8 = Key(glfw.Key8) Key9 = Key(glfw.Key9) KeySemicolon = Key(glfw.KeySemicolon) KeyEqual = Key(glfw.KeyEqual) KeyA = Key(glfw.KeyA) KeyB = Key(glfw.KeyB) KeyC = Key(glfw.KeyC) KeyD = Key(glfw.KeyD) KeyE = Key(glfw.KeyE) KeyF = Key(glfw.KeyF) KeyG = Key(glfw.KeyG) KeyH = Key(glfw.KeyH) KeyI = Key(glfw.KeyI) KeyJ = Key(glfw.KeyJ) KeyK = Key(glfw.KeyK) KeyL = Key(glfw.KeyL) KeyM = Key(glfw.KeyM) KeyN = Key(glfw.KeyN) KeyO = Key(glfw.KeyO) KeyP = Key(glfw.KeyP) KeyQ = Key(glfw.KeyQ) KeyR = Key(glfw.KeyR) KeyS = Key(glfw.KeyS) KeyT = Key(glfw.KeyT) KeyU = Key(glfw.KeyU) KeyV = Key(glfw.KeyV) KeyW = Key(glfw.KeyW) KeyX = Key(glfw.KeyX) KeyY = Key(glfw.KeyY) KeyZ = Key(glfw.KeyZ) KeyLeftBracket = Key(glfw.KeyLeftBracket) KeyBackslash = Key(glfw.KeyBackslash) KeyRightBracket = Key(glfw.KeyRightBracket) KeyGraveAccent = Key(glfw.KeyGraveAccent) KeyWorld1 = Key(glfw.KeyWorld1) KeyWorld2 = Key(glfw.KeyWorld2) KeyEscape = Key(glfw.KeyEscape) KeyEnter = Key(glfw.KeyEnter) KeyTab = Key(glfw.KeyTab) KeyBackspace = Key(glfw.KeyBackspace) KeyInsert = Key(glfw.KeyInsert) KeyDelete = Key(glfw.KeyDelete) KeyRight = Key(glfw.KeyRight) KeyLeft = Key(glfw.KeyLeft) KeyDown = Key(glfw.KeyDown) KeyUp = Key(glfw.KeyUp) KeyPageUp = Key(glfw.KeyPageUp) KeyPageDown = Key(glfw.KeyPageDown) KeyHome = Key(glfw.KeyHome) KeyEnd = Key(glfw.KeyEnd) KeyCapsLock = Key(glfw.KeyCapsLock) KeyScrollLock = Key(glfw.KeyScrollLock) KeyNumLock = Key(glfw.KeyNumLock) KeyPrintScreen = Key(glfw.KeyPrintScreen) KeyPause = Key(glfw.KeyPause) KeyF1 = Key(glfw.KeyF1) KeyF2 = Key(glfw.KeyF2) KeyF3 = Key(glfw.KeyF3) KeyF4 = Key(glfw.KeyF4) KeyF5 = Key(glfw.KeyF5) KeyF6 = Key(glfw.KeyF6) KeyF7 = Key(glfw.KeyF7) KeyF8 = Key(glfw.KeyF8) KeyF9 = Key(glfw.KeyF9) KeyF10 = Key(glfw.KeyF10) KeyF11 = Key(glfw.KeyF11) KeyF12 = Key(glfw.KeyF12) KeyF13 = Key(glfw.KeyF13) KeyF14 = Key(glfw.KeyF14) KeyF15 = Key(glfw.KeyF15) KeyF16 = Key(glfw.KeyF16) KeyF17 = Key(glfw.KeyF17) KeyF18 = Key(glfw.KeyF18) KeyF19 = Key(glfw.KeyF19) KeyF20 = Key(glfw.KeyF20) KeyF21 = Key(glfw.KeyF21) KeyF22 = Key(glfw.KeyF22) KeyF23 = Key(glfw.KeyF23) KeyF24 = Key(glfw.KeyF24) KeyF25 = Key(glfw.KeyF25) KeyKP0 = Key(glfw.KeyKP0) KeyKP1 = Key(glfw.KeyKP1) KeyKP2 = Key(glfw.KeyKP2) KeyKP3 = Key(glfw.KeyKP3) KeyKP4 = Key(glfw.KeyKP4) KeyKP5 = Key(glfw.KeyKP5) KeyKP6 = Key(glfw.KeyKP6) KeyKP7 = Key(glfw.KeyKP7) KeyKP8 = Key(glfw.KeyKP8) KeyKP9 = Key(glfw.KeyKP9) KeyKPDecimal = Key(glfw.KeyKPDecimal) KeyKPDivide = Key(glfw.KeyKPDivide) KeyKPMultiply = Key(glfw.KeyKPMultiply) KeyKPSubtract = Key(glfw.KeyKPSubtract) KeyKPAdd = Key(glfw.KeyKPAdd) KeyKPEnter = Key(glfw.KeyKPEnter) KeyKPEqual = Key(glfw.KeyKPEqual) KeyLeftShift = Key(glfw.KeyLeftShift) KeyLeftControl = Key(glfw.KeyLeftControl) KeyLeftAlt = Key(glfw.KeyLeftAlt) KeyLeftSuper = Key(glfw.KeyLeftSuper) KeyRightShift = Key(glfw.KeyRightShift) KeyRightControl = Key(glfw.KeyRightControl) KeyRightAlt = Key(glfw.KeyRightAlt) KeyRightSuper = Key(glfw.KeyRightSuper) KeyMenu = Key(glfw.KeyMenu) KeyLast = Key(glfw.KeyLast) ) // Modifier keys const ( ModShift = ModifierKey(glfw.ModShift) ModControl = ModifierKey(glfw.ModControl) ModAlt = ModifierKey(glfw.ModAlt) ModSuper = ModifierKey(glfw.ModSuper) ) // Mouse buttons const ( MouseButton1 = MouseButton(glfw.MouseButton1) MouseButton2 = MouseButton(glfw.MouseButton2) MouseButton3 = MouseButton(glfw.MouseButton3) MouseButton4 = MouseButton(glfw.MouseButton4) MouseButton5 = MouseButton(glfw.MouseButton5) MouseButton6 = MouseButton(glfw.MouseButton6) MouseButton7 = MouseButton(glfw.MouseButton7) MouseButton8 = MouseButton(glfw.MouseButton8) MouseButtonLast = MouseButton(glfw.MouseButtonLast) MouseButtonLeft = MouseButton(glfw.MouseButtonLeft) MouseButtonRight = MouseButton(glfw.MouseButtonRight) MouseButtonMiddle = MouseButton(glfw.MouseButtonMiddle) ) // Input modes const ( CursorInputMode = InputMode(glfw.CursorMode) // See Cursor mode values StickyKeysInputMode = InputMode(glfw.StickyKeysMode) // Value can be either 1 or 0 StickyMouseButtonsInputMode = InputMode(glfw.StickyMouseButtonsMode) // Value can be either 1 or 0 ) // Cursor mode values const ( CursorNormal = CursorMode(glfw.CursorNormal) CursorHidden = CursorMode(glfw.CursorHidden) CursorDisabled = CursorMode(glfw.CursorDisabled) ) // GlfwWindow describes one glfw window type GlfwWindow struct { *glfw.Window // Embedded GLFW window core.Dispatcher // Embedded event dispatcher gls *gls.GLS // Associated OpenGL State fullscreen bool lastX int lastY int lastWidth int lastHeight int scaleX float64 scaleY float64 // Events keyEv KeyEvent charEv CharEvent mouseEv MouseEvent posEv PosEvent sizeEv SizeEvent cursorEv CursorEvent scrollEv ScrollEvent focusEv FocusEvent mods ModifierKey // Current modifier keys // Cursors cursors map[Cursor]*glfw.Cursor lastCursorKey Cursor } // Init initializes the GlfwWindow singleton with the specified width, height, and title. func Init(width, height int, title string) error { // Panic if already created if win != nil { panic(fmt.Errorf("can only call window.Init() once")) } // OpenGL functions must be executed in the same thread where // the context was created (by wmgr.CreateWindow()) runtime.LockOSThread() // Create wrapper window with dispatcher w := new(GlfwWindow) w.Dispatcher.Initialize() var err error // Initialize GLFW err = glfw.Init() if err != nil { return err } // Set window hints glfw.WindowHint(glfw.ContextVersionMajor, 3) glfw.WindowHint(glfw.ContextVersionMinor, 3) glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile) glfw.WindowHint(glfw.Samples, 8) // Set OpenGL forward compatible context only for OSX because it is required for OSX. // When this is set, glLineWidth(width) only accepts width=1.0 and generates an error // for any other values although the spec says it should ignore unsupported widths // and generate an error only when width <= 0. if runtime.GOOS == "darwin" { glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True) } // Create window and set it as the current context. // The window is created always as not full screen because if it is // created as full screen it not possible to revert it to windowed mode. // At the end of this function, the window will be set to full screen if requested. w.Window, err = glfw.CreateWindow(width, height, title, nil, nil) if err != nil { return err } w.MakeContextCurrent() // Create OpenGL state w.gls, err = gls.New() if err != nil { return err } // Compute and store scale fbw, fbh := w.GetFramebufferSize() w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) // Create map for cursors w.cursors = make(map[Cursor]*glfw.Cursor) w.lastCursorKey = CursorLast // Preallocate GLFW standard cursors w.cursors[ArrowCursor] = glfw.CreateStandardCursor(glfw.ArrowCursor) w.cursors[IBeamCursor] = glfw.CreateStandardCursor(glfw.IBeamCursor) w.cursors[CrosshairCursor] = glfw.CreateStandardCursor(glfw.CrosshairCursor) w.cursors[HandCursor] = glfw.CreateStandardCursor(glfw.HandCursor) w.cursors[HResizeCursor] = glfw.CreateStandardCursor(glfw.HResizeCursor) w.cursors[VResizeCursor] = glfw.CreateStandardCursor(glfw.VResizeCursor) // Preallocate extra G3N standard cursors (diagonal resize cursors) cursorDiag1Png := assets.MustAsset("cursors/diag1.png") // [/] cursorDiag2Png := assets.MustAsset("cursors/diag2.png") // [\] diag1Img, _, err := image.Decode(bytes.NewReader(cursorDiag1Png)) diag2Img, _, err := image.Decode(bytes.NewReader(cursorDiag2Png)) if err != nil { return err } w.cursors[DiagResize1Cursor] = glfw.CreateCursor(diag1Img, 8, 8) // [/] w.cursors[DiagResize2Cursor] = glfw.CreateCursor(diag2Img, 8, 8) // [\] // Set up key callback to dispatch event w.SetKeyCallback(func(x *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { w.keyEv.Key = Key(key) w.keyEv.Mods = ModifierKey(mods) w.mods = w.keyEv.Mods if action == glfw.Press { w.Dispatch(OnKeyDown, &w.keyEv) } else if action == glfw.Release { w.Dispatch(OnKeyUp, &w.keyEv) } else if action == glfw.Repeat { w.Dispatch(OnKeyRepeat, &w.keyEv) } }) // Set up char callback to dispatch event w.SetCharModsCallback(func(x *glfw.Window, char rune, mods glfw.ModifierKey) { w.charEv.Char = char w.charEv.Mods = ModifierKey(mods) w.Dispatch(OnChar, &w.charEv) }) // Set up mouse button callback to dispatch event w.SetMouseButtonCallback(func(x *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) { xpos, ypos := x.GetCursorPos() w.mouseEv.Button = MouseButton(button) w.mouseEv.Mods = ModifierKey(mods) w.mouseEv.Xpos = float32(xpos) //* float32(w.scaleX) TODO w.mouseEv.Ypos = float32(ypos) //* float32(w.scaleY) if action == glfw.Press { w.Dispatch(OnMouseDown, &w.mouseEv) } else if action == glfw.Release { w.Dispatch(OnMouseUp, &w.mouseEv) } }) // Set up window size callback to dispatch event w.SetSizeCallback(func(x *glfw.Window, width int, height int) { fbw, fbh := x.GetFramebufferSize() w.sizeEv.Width = width w.sizeEv.Height = height w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) w.Dispatch(OnWindowSize, &w.sizeEv) }) // Set up window position callback to dispatch event w.SetPosCallback(func(x *glfw.Window, xpos int, ypos int) { w.posEv.Xpos = xpos w.posEv.Ypos = ypos w.Dispatch(OnWindowPos, &w.posEv) }) // Set up window focus callback to dispatch event w.SetFocusCallback(func(x *glfw.Window, focused bool) { w.focusEv.Focused = focused w.Dispatch(OnWindowFocus, &w.focusEv) }) // Set up window cursor position callback to dispatch event w.SetCursorPosCallback(func(x *glfw.Window, xpos float64, ypos float64) { w.cursorEv.Xpos = float32(xpos) w.cursorEv.Ypos = float32(ypos) w.cursorEv.Mods = w.mods w.Dispatch(OnCursor, &w.cursorEv) }) // Set up mouse wheel scroll callback to dispatch event w.SetScrollCallback(func(x *glfw.Window, xoff float64, yoff float64) { w.scrollEv.Xoffset = float32(xoff) w.scrollEv.Yoffset = float32(yoff) w.scrollEv.Mods = w.mods w.Dispatch(OnScroll, &w.scrollEv) }) win = w // Set singleton return nil } // Gls returns the associated OpenGL state. func (w *GlfwWindow) Gls() *gls.GLS { return w.gls } // FullScreen returns whether this windows is currently fullscreen. func (w *GlfwWindow)
() bool { return w.fullscreen } // SetFullScreen sets this window as fullscreen on the primary monitor // TODO allow for fullscreen with resolutions different than the monitor's func (w *GlfwWindow) SetFullScreen(full bool) { // If already in the desired state, nothing to do if w.fullscreen == full { return } // Set window fullscreen on the primary monitor if full { // Save current position and size of the window w.lastX, w.lastY = w.GetPos() w.lastWidth, w.lastHeight = w.GetSize() // Get size of primary monitor mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() width := vmode.Width height := vmode.Height // Set as fullscreen on the primary monitor w.SetMonitor(mon, 0, 0, width, height, vmode.RefreshRate) w.fullscreen = true } else { // Restore window to previous position and size w.SetMonitor(nil, w.lastX, w.lastY, w.lastWidth, w.lastHeight, glfw.DontCare) w.fullscreen = false } } // Destroy destroys this window and its context func (w *GlfwWindow) Destroy() { w.Window.Destroy() glfw.Terminate() runtime.UnlockOSThread() // Important when using the execution tracer } // Scale returns this window's DPI scale factor (FramebufferSize / Size) func (w *GlfwWindow) GetScale() (x float64, y float64) { return w.scaleX, w.scaleY } // ScreenResolution returns the screen resolution func (w *GlfwWindow) ScreenResolution(p interface{}) (width, height int) { mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() return vmode.Width, vmode.Height } // PollEvents process events in the event queue func (w *GlfwWindow) PollEvents() { glfw.PollEvents() } // SetSwapInterval sets the number of screen updates to wait from the time SwapBuffer() // is called before swapping the buffers and returning. func (w *GlfwWindow) SetSwapInterval(interval int) { glfw.SwapInterval(interval) } // SetCursor sets the window's cursor. func (w *GlfwWindow) SetCursor(cursor Cursor) { cur, ok := w.cursors[cursor] if !ok { panic("Invalid cursor") } w.Window.SetCursor(cur) } // CreateCursor creates a new custom cursor and returns an int handle. func (w *GlfwWindow) CreateCursor(imgFile string, xhot, yhot int) (Cursor, error) { // Open image file file, err := os.Open(imgFile) if err != nil { return 0, err } defer file.Close() // Decode image img, _, err := image.Decode(file) if err != nil { return 0, err } // Create and store cursor w.lastCursorKey += 1 w.cursors[Cursor(w.lastCursorKey)] = glfw.CreateCursor(img, xhot, yhot) return w.lastCursorKey, nil } // DisposeCursor deletes the existing custom cursor with the provided int handle. func (w *GlfwWindow) DisposeCursor(cursor Cursor) { if cursor <= CursorLast { panic("Can't dispose standard cursor") } w.cursors[cursor].Destroy() delete(w.cursors, cursor) } // DisposeAllCursors deletes all existing custom cursors. func (w *GlfwWindow) DisposeAllCustomCursors() { // Destroy and delete all custom cursors for key := range w.cursors { if key > CursorLast { w.cursors[key].Destroy() delete(w.cursors, key) } } // Set the next cursor key as the last standard cursor key + 1 w.lastCursorKey = CursorLast } // Center centers the window on the screen. //func (w *GlfwWindow) Center() { // // // TODO //}
FullScreen
identifier_name
glfw.go
// Copyright 2016 The G3N Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !wasm // +build !wasm package window import ( "bytes" "fmt" "image" _ "image/png" "os" "runtime" "github.com/g3n/engine/core" "github.com/g3n/engine/gls" "github.com/g3n/engine/gui/assets" "github.com/go-gl/glfw/v3.3/glfw" ) // Keycodes const ( KeyUnknown = Key(glfw.KeyUnknown) KeySpace = Key(glfw.KeySpace) KeyApostrophe = Key(glfw.KeyApostrophe) KeyComma = Key(glfw.KeyComma) KeyMinus = Key(glfw.KeyMinus) KeyPeriod = Key(glfw.KeyPeriod) KeySlash = Key(glfw.KeySlash) Key0 = Key(glfw.Key0) Key1 = Key(glfw.Key1) Key2 = Key(glfw.Key2) Key3 = Key(glfw.Key3) Key4 = Key(glfw.Key4) Key5 = Key(glfw.Key5) Key6 = Key(glfw.Key6) Key7 = Key(glfw.Key7) Key8 = Key(glfw.Key8) Key9 = Key(glfw.Key9) KeySemicolon = Key(glfw.KeySemicolon) KeyEqual = Key(glfw.KeyEqual) KeyA = Key(glfw.KeyA) KeyB = Key(glfw.KeyB) KeyC = Key(glfw.KeyC) KeyD = Key(glfw.KeyD) KeyE = Key(glfw.KeyE) KeyF = Key(glfw.KeyF) KeyG = Key(glfw.KeyG) KeyH = Key(glfw.KeyH) KeyI = Key(glfw.KeyI) KeyJ = Key(glfw.KeyJ) KeyK = Key(glfw.KeyK) KeyL = Key(glfw.KeyL) KeyM = Key(glfw.KeyM) KeyN = Key(glfw.KeyN) KeyO = Key(glfw.KeyO) KeyP = Key(glfw.KeyP) KeyQ = Key(glfw.KeyQ) KeyR = Key(glfw.KeyR) KeyS = Key(glfw.KeyS) KeyT = Key(glfw.KeyT) KeyU = Key(glfw.KeyU) KeyV = Key(glfw.KeyV) KeyW = Key(glfw.KeyW) KeyX = Key(glfw.KeyX) KeyY = Key(glfw.KeyY) KeyZ = Key(glfw.KeyZ) KeyLeftBracket = Key(glfw.KeyLeftBracket) KeyBackslash = Key(glfw.KeyBackslash) KeyRightBracket = Key(glfw.KeyRightBracket) KeyGraveAccent = Key(glfw.KeyGraveAccent) KeyWorld1 = Key(glfw.KeyWorld1) KeyWorld2 = Key(glfw.KeyWorld2) KeyEscape = Key(glfw.KeyEscape) KeyEnter = Key(glfw.KeyEnter) KeyTab = Key(glfw.KeyTab) KeyBackspace = Key(glfw.KeyBackspace) KeyInsert = Key(glfw.KeyInsert) KeyDelete = Key(glfw.KeyDelete) KeyRight = Key(glfw.KeyRight) KeyLeft = Key(glfw.KeyLeft) KeyDown = Key(glfw.KeyDown) KeyUp = Key(glfw.KeyUp) KeyPageUp = Key(glfw.KeyPageUp) KeyPageDown = Key(glfw.KeyPageDown) KeyHome = Key(glfw.KeyHome) KeyEnd = Key(glfw.KeyEnd) KeyCapsLock = Key(glfw.KeyCapsLock) KeyScrollLock = Key(glfw.KeyScrollLock) KeyNumLock = Key(glfw.KeyNumLock) KeyPrintScreen = Key(glfw.KeyPrintScreen) KeyPause = Key(glfw.KeyPause) KeyF1 = Key(glfw.KeyF1) KeyF2 = Key(glfw.KeyF2) KeyF3 = Key(glfw.KeyF3) KeyF4 = Key(glfw.KeyF4) KeyF5 = Key(glfw.KeyF5) KeyF6 = Key(glfw.KeyF6) KeyF7 = Key(glfw.KeyF7) KeyF8 = Key(glfw.KeyF8) KeyF9 = Key(glfw.KeyF9) KeyF10 = Key(glfw.KeyF10) KeyF11 = Key(glfw.KeyF11) KeyF12 = Key(glfw.KeyF12) KeyF13 = Key(glfw.KeyF13) KeyF14 = Key(glfw.KeyF14) KeyF15 = Key(glfw.KeyF15) KeyF16 = Key(glfw.KeyF16) KeyF17 = Key(glfw.KeyF17) KeyF18 = Key(glfw.KeyF18) KeyF19 = Key(glfw.KeyF19) KeyF20 = Key(glfw.KeyF20) KeyF21 = Key(glfw.KeyF21) KeyF22 = Key(glfw.KeyF22) KeyF23 = Key(glfw.KeyF23) KeyF24 = Key(glfw.KeyF24) KeyF25 = Key(glfw.KeyF25) KeyKP0 = Key(glfw.KeyKP0) KeyKP1 = Key(glfw.KeyKP1) KeyKP2 = Key(glfw.KeyKP2) KeyKP3 = Key(glfw.KeyKP3) KeyKP4 = Key(glfw.KeyKP4) KeyKP5 = Key(glfw.KeyKP5) KeyKP6 = Key(glfw.KeyKP6) KeyKP7 = Key(glfw.KeyKP7) KeyKP8 = Key(glfw.KeyKP8) KeyKP9 = Key(glfw.KeyKP9) KeyKPDecimal = Key(glfw.KeyKPDecimal) KeyKPDivide = Key(glfw.KeyKPDivide) KeyKPMultiply = Key(glfw.KeyKPMultiply) KeyKPSubtract = Key(glfw.KeyKPSubtract) KeyKPAdd = Key(glfw.KeyKPAdd) KeyKPEnter = Key(glfw.KeyKPEnter) KeyKPEqual = Key(glfw.KeyKPEqual) KeyLeftShift = Key(glfw.KeyLeftShift) KeyLeftControl = Key(glfw.KeyLeftControl) KeyLeftAlt = Key(glfw.KeyLeftAlt) KeyLeftSuper = Key(glfw.KeyLeftSuper) KeyRightShift = Key(glfw.KeyRightShift) KeyRightControl = Key(glfw.KeyRightControl) KeyRightAlt = Key(glfw.KeyRightAlt) KeyRightSuper = Key(glfw.KeyRightSuper) KeyMenu = Key(glfw.KeyMenu) KeyLast = Key(glfw.KeyLast) ) // Modifier keys const ( ModShift = ModifierKey(glfw.ModShift) ModControl = ModifierKey(glfw.ModControl) ModAlt = ModifierKey(glfw.ModAlt) ModSuper = ModifierKey(glfw.ModSuper) ) // Mouse buttons const ( MouseButton1 = MouseButton(glfw.MouseButton1) MouseButton2 = MouseButton(glfw.MouseButton2) MouseButton3 = MouseButton(glfw.MouseButton3) MouseButton4 = MouseButton(glfw.MouseButton4) MouseButton5 = MouseButton(glfw.MouseButton5) MouseButton6 = MouseButton(glfw.MouseButton6) MouseButton7 = MouseButton(glfw.MouseButton7) MouseButton8 = MouseButton(glfw.MouseButton8) MouseButtonLast = MouseButton(glfw.MouseButtonLast) MouseButtonLeft = MouseButton(glfw.MouseButtonLeft) MouseButtonRight = MouseButton(glfw.MouseButtonRight) MouseButtonMiddle = MouseButton(glfw.MouseButtonMiddle) ) // Input modes const ( CursorInputMode = InputMode(glfw.CursorMode) // See Cursor mode values StickyKeysInputMode = InputMode(glfw.StickyKeysMode) // Value can be either 1 or 0 StickyMouseButtonsInputMode = InputMode(glfw.StickyMouseButtonsMode) // Value can be either 1 or 0 ) // Cursor mode values const ( CursorNormal = CursorMode(glfw.CursorNormal) CursorHidden = CursorMode(glfw.CursorHidden) CursorDisabled = CursorMode(glfw.CursorDisabled) ) // GlfwWindow describes one glfw window type GlfwWindow struct { *glfw.Window // Embedded GLFW window core.Dispatcher // Embedded event dispatcher gls *gls.GLS // Associated OpenGL State fullscreen bool lastX int lastY int lastWidth int lastHeight int scaleX float64 scaleY float64 // Events keyEv KeyEvent charEv CharEvent mouseEv MouseEvent posEv PosEvent sizeEv SizeEvent cursorEv CursorEvent scrollEv ScrollEvent focusEv FocusEvent mods ModifierKey // Current modifier keys // Cursors cursors map[Cursor]*glfw.Cursor lastCursorKey Cursor } // Init initializes the GlfwWindow singleton with the specified width, height, and title. func Init(width, height int, title string) error { // Panic if already created if win != nil { panic(fmt.Errorf("can only call window.Init() once")) } // OpenGL functions must be executed in the same thread where // the context was created (by wmgr.CreateWindow()) runtime.LockOSThread() // Create wrapper window with dispatcher w := new(GlfwWindow) w.Dispatcher.Initialize() var err error // Initialize GLFW err = glfw.Init() if err != nil { return err } // Set window hints glfw.WindowHint(glfw.ContextVersionMajor, 3) glfw.WindowHint(glfw.ContextVersionMinor, 3) glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile) glfw.WindowHint(glfw.Samples, 8) // Set OpenGL forward compatible context only for OSX because it is required for OSX. // When this is set, glLineWidth(width) only accepts width=1.0 and generates an error // for any other values although the spec says it should ignore unsupported widths // and generate an error only when width <= 0. if runtime.GOOS == "darwin" { glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True) } // Create window and set it as the current context. // The window is created always as not full screen because if it is // created as full screen it not possible to revert it to windowed mode. // At the end of this function, the window will be set to full screen if requested. w.Window, err = glfw.CreateWindow(width, height, title, nil, nil) if err != nil { return err } w.MakeContextCurrent() // Create OpenGL state w.gls, err = gls.New() if err != nil { return err } // Compute and store scale fbw, fbh := w.GetFramebufferSize() w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) // Create map for cursors w.cursors = make(map[Cursor]*glfw.Cursor) w.lastCursorKey = CursorLast // Preallocate GLFW standard cursors w.cursors[ArrowCursor] = glfw.CreateStandardCursor(glfw.ArrowCursor) w.cursors[IBeamCursor] = glfw.CreateStandardCursor(glfw.IBeamCursor) w.cursors[CrosshairCursor] = glfw.CreateStandardCursor(glfw.CrosshairCursor) w.cursors[HandCursor] = glfw.CreateStandardCursor(glfw.HandCursor) w.cursors[HResizeCursor] = glfw.CreateStandardCursor(glfw.HResizeCursor) w.cursors[VResizeCursor] = glfw.CreateStandardCursor(glfw.VResizeCursor) // Preallocate extra G3N standard cursors (diagonal resize cursors) cursorDiag1Png := assets.MustAsset("cursors/diag1.png") // [/] cursorDiag2Png := assets.MustAsset("cursors/diag2.png") // [\] diag1Img, _, err := image.Decode(bytes.NewReader(cursorDiag1Png)) diag2Img, _, err := image.Decode(bytes.NewReader(cursorDiag2Png)) if err != nil { return err } w.cursors[DiagResize1Cursor] = glfw.CreateCursor(diag1Img, 8, 8) // [/] w.cursors[DiagResize2Cursor] = glfw.CreateCursor(diag2Img, 8, 8) // [\] // Set up key callback to dispatch event w.SetKeyCallback(func(x *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) { w.keyEv.Key = Key(key) w.keyEv.Mods = ModifierKey(mods) w.mods = w.keyEv.Mods if action == glfw.Press { w.Dispatch(OnKeyDown, &w.keyEv) } else if action == glfw.Release { w.Dispatch(OnKeyUp, &w.keyEv) } else if action == glfw.Repeat { w.Dispatch(OnKeyRepeat, &w.keyEv) } }) // Set up char callback to dispatch event w.SetCharModsCallback(func(x *glfw.Window, char rune, mods glfw.ModifierKey) { w.charEv.Char = char w.charEv.Mods = ModifierKey(mods) w.Dispatch(OnChar, &w.charEv) }) // Set up mouse button callback to dispatch event w.SetMouseButtonCallback(func(x *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) { xpos, ypos := x.GetCursorPos() w.mouseEv.Button = MouseButton(button) w.mouseEv.Mods = ModifierKey(mods) w.mouseEv.Xpos = float32(xpos) //* float32(w.scaleX) TODO w.mouseEv.Ypos = float32(ypos) //* float32(w.scaleY) if action == glfw.Press { w.Dispatch(OnMouseDown, &w.mouseEv) } else if action == glfw.Release { w.Dispatch(OnMouseUp, &w.mouseEv) } }) // Set up window size callback to dispatch event w.SetSizeCallback(func(x *glfw.Window, width int, height int) { fbw, fbh := x.GetFramebufferSize() w.sizeEv.Width = width w.sizeEv.Height = height w.scaleX = float64(fbw) / float64(width) w.scaleY = float64(fbh) / float64(height) w.Dispatch(OnWindowSize, &w.sizeEv) }) // Set up window position callback to dispatch event w.SetPosCallback(func(x *glfw.Window, xpos int, ypos int) { w.posEv.Xpos = xpos w.posEv.Ypos = ypos w.Dispatch(OnWindowPos, &w.posEv) }) // Set up window focus callback to dispatch event w.SetFocusCallback(func(x *glfw.Window, focused bool) { w.focusEv.Focused = focused w.Dispatch(OnWindowFocus, &w.focusEv) }) // Set up window cursor position callback to dispatch event w.SetCursorPosCallback(func(x *glfw.Window, xpos float64, ypos float64) { w.cursorEv.Xpos = float32(xpos) w.cursorEv.Ypos = float32(ypos) w.cursorEv.Mods = w.mods w.Dispatch(OnCursor, &w.cursorEv) }) // Set up mouse wheel scroll callback to dispatch event w.SetScrollCallback(func(x *glfw.Window, xoff float64, yoff float64) { w.scrollEv.Xoffset = float32(xoff) w.scrollEv.Yoffset = float32(yoff) w.scrollEv.Mods = w.mods w.Dispatch(OnScroll, &w.scrollEv) }) win = w // Set singleton return nil } // Gls returns the associated OpenGL state. func (w *GlfwWindow) Gls() *gls.GLS { return w.gls } // FullScreen returns whether this windows is currently fullscreen. func (w *GlfwWindow) FullScreen() bool { return w.fullscreen } // SetFullScreen sets this window as fullscreen on the primary monitor // TODO allow for fullscreen with resolutions different than the monitor's func (w *GlfwWindow) SetFullScreen(full bool) { // If already in the desired state, nothing to do if w.fullscreen == full { return } // Set window fullscreen on the primary monitor if full { // Save current position and size of the window w.lastX, w.lastY = w.GetPos() w.lastWidth, w.lastHeight = w.GetSize() // Get size of primary monitor mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() width := vmode.Width height := vmode.Height // Set as fullscreen on the primary monitor w.SetMonitor(mon, 0, 0, width, height, vmode.RefreshRate) w.fullscreen = true } else { // Restore window to previous position and size w.SetMonitor(nil, w.lastX, w.lastY, w.lastWidth, w.lastHeight, glfw.DontCare) w.fullscreen = false } } // Destroy destroys this window and its context func (w *GlfwWindow) Destroy() { w.Window.Destroy() glfw.Terminate() runtime.UnlockOSThread() // Important when using the execution tracer } // Scale returns this window's DPI scale factor (FramebufferSize / Size) func (w *GlfwWindow) GetScale() (x float64, y float64) { return w.scaleX, w.scaleY } // ScreenResolution returns the screen resolution func (w *GlfwWindow) ScreenResolution(p interface{}) (width, height int) { mon := glfw.GetPrimaryMonitor() vmode := mon.GetVideoMode() return vmode.Width, vmode.Height } // PollEvents process events in the event queue func (w *GlfwWindow) PollEvents() { glfw.PollEvents() } // SetSwapInterval sets the number of screen updates to wait from the time SwapBuffer() // is called before swapping the buffers and returning. func (w *GlfwWindow) SetSwapInterval(interval int) { glfw.SwapInterval(interval) } // SetCursor sets the window's cursor. func (w *GlfwWindow) SetCursor(cursor Cursor) { cur, ok := w.cursors[cursor] if !ok { panic("Invalid cursor") } w.Window.SetCursor(cur) } // CreateCursor creates a new custom cursor and returns an int handle. func (w *GlfwWindow) CreateCursor(imgFile string, xhot, yhot int) (Cursor, error) { // Open image file file, err := os.Open(imgFile) if err != nil { return 0, err } defer file.Close() // Decode image img, _, err := image.Decode(file) if err != nil { return 0, err
return w.lastCursorKey, nil } // DisposeCursor deletes the existing custom cursor with the provided int handle. func (w *GlfwWindow) DisposeCursor(cursor Cursor) { if cursor <= CursorLast { panic("Can't dispose standard cursor") } w.cursors[cursor].Destroy() delete(w.cursors, cursor) } // DisposeAllCursors deletes all existing custom cursors. func (w *GlfwWindow) DisposeAllCustomCursors() { // Destroy and delete all custom cursors for key := range w.cursors { if key > CursorLast { w.cursors[key].Destroy() delete(w.cursors, key) } } // Set the next cursor key as the last standard cursor key + 1 w.lastCursorKey = CursorLast } // Center centers the window on the screen. //func (w *GlfwWindow) Center() { // // // TODO //}
} // Create and store cursor w.lastCursorKey += 1 w.cursors[Cursor(w.lastCursorKey)] = glfw.CreateCursor(img, xhot, yhot)
random_line_split
rf_model.py
""" CCC Team 42, Melbourne Thuy Ngoc Ha - 963370 Lan Zhou - 824371 Zijian Wang - 950618 Ivan Chee - 736901 Duer Wang - 824325 """ """ A python file used for making predictions on tweeters that lack food information or homeless information. Using random forest classification model for predicting food. Using random forest regression model for predicting homeless and homeless trend. """ import sys import os import httplib2 import json import csv import codecs import time as t from couch import Couch from pyspark.sql import SparkSession from pyspark.mllib.linalg import Vectors from pyspark.mllib.regression import LabeledPoint from pyspark.mllib.tree import RandomForest from pyspark.sql.functions import udf from pyspark.sql.types import * from keywords import Keywords # set up spark environment #os.environ['SPARK_HOME'] = "spark" #sys.path.append("spark/python") #sys.path.append("spark/python/lib") COUCHDB_NAME = "" OUT_COUCHDB_NAME = "" REFORMED_FILE = "" APP_NAME = "random forest model" SPARK_URL = "local[*]" RANDOM_SEED = 12345 TRAINING_DATA_RATIO = 0.7 RF_NUM_TREES = 10 RF_MAX_DEPTH = 5 RF_NUM_BINS = 32 food_dict = {} rev_dict = {} food_pre = False homeless_pre = False """ --------------------------------------------------------- ------------------ Auxiliary Functions ------------------ --------------------------------------------------------- """ # get coordinates of a given city def cityPos(name): url = "https://maps.googleapis.com/maps/api/geocode/json?" + \ "key=AIzaSyBsZErhxaT1oVgMrT-xGLcAN5nK3UHeGBU&address=" + name req = httplib2.Http(".cache") resp, content = req.request(url, "GET") res = json.loads(content) return res["results"][0]["geometry"] # read data from couchdb and reform them to read easier def trans(path): con = Couch(COUCHDB_NAME) jsonData = con.query_all() csvfile = open(REFORMED_FILE, 'w', newline='') writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL) keys=['id', 'time', 'timestamp', 'lat', 'lng', 'polarity', 'followers', \ 'following', 'homeless', 'homeless_trend', 'food_class'] writer.writerow(keys) i = 0 for dic in jsonData: try: # get coordinates if dic['location']['coordinates'] is None: city = dic['location']['place_name'] city = city.replace(" ","%20") coor = cityPos(city) lng = coor['location']['lng'] lat = coor['location']['lat'] else: lng = dic['location']['coordinates'][0] lat = dic['location']['coordinates'][1] # get time amd timesptamp time = dic['created_at']['day']+ '-' + \ trans_month(dic['created_at']['month'])+ '-' + \ dic['created_at']['year']+ ' ' +dic['created_at']['time'] timeArray = t.strptime(time, "%d-%m-%Y %H:%M:%S") timestamp = t.mktime(timeArray) # to ensure at least one of homeless info and food info appears home = dic['homeless'] foods = dic['food_list'] if (home is None) and (foods is None or len(foods) == 0): continue # get homeless information if home is None: homeless = -1 homeless_trend = 0 else: homeless = dic['homeless']['cnt16'] homeless_trend = dic['homeless']['incre/decre'] # get food if foods is None or len(foods) == 0: writer.writerow([i, time, timestamp, lat, lng, dic['polarity'], \ dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, "-1"]) i += 1 else: for food in foods: food_class = get_food_class(food) writer.writerow([i, time, timestamp, lat, lng, \ dic['polarity'], dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, food_class]) i += 1 except: continue csvfile.close() def trans_month(month): month_dic = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', \ 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', \ 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'} return month_dic[month] # transform food name into food classes that are integers def get_food_class(food): if not food in food_dict.keys(): food_dict[food] = str(len(food_dict)) return food_dict[food] def generate_rev_dict(): for key,value in food_dict.items(): rev_dict[value] = key # get food name by food class def get_food_type(food_class): the_class = str(food_class) if the_class in rev_dict.keys(): return rev_dict[the_class] return None # get food group by food name def get_food_group(food):
""" --------------------------------------------------------- --------------------- Main Function --------------------- --------------------------------------------------------- """ if __name__ == "__main__": COUCHDB_NAME = sys.argv[1] OUT_COUCHDB_NAME = sys.argv[2] REFORMED_FILE = sys.argv[3] # create spark session spark = SparkSession.builder.appName(APP_NAME) \ .master(SPARK_URL).getOrCreate() """ ---------- data preparation ---------- """ # read data from couchdb trans(REFORMED_FILE) # reform data into a dataframe df = spark.read.options(header = "true", inferschema = "true")\ .csv(REFORMED_FILE) print("\nTotal number of rows loaded: %d" % df.count()) # remove duplicated entries df = df.drop_duplicates() print("\nTotal number of rows without duplicates: %d" % df.count()) df.show() # filter dataframe df_no_food = df.filter(df['food_class'] == -1) df_no_homeless = df.filter(df['homeless'] == -1) df_all_info = df.filter(df['food_class'] >= 0).filter(df['homeless'] >= 0) print("\nNumber of rows having all information: %d" % df_all_info.count()) print("number of rows without food information: %d" % df_no_food.count()) print("number of rows without homeless information: %d" % df_no_homeless.count()) # transform dataframe into RDD transformed_df_food = df_all_info.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) transformed_df_homeless = df_all_info.rdd.map(lambda row: LabeledPoint(row[-3], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_homeless_trend = df_all_info.rdd.map(lambda row: LabeledPoint(row[-2], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) # split reformed data into tranning data and test data splits = [TRAINING_DATA_RATIO, 1.0 - TRAINING_DATA_RATIO] training_data_food, test_data_food = transformed_df_food.randomSplit(splits, RANDOM_SEED) training_data_homeless, test_data_homeless = transformed_df_homeless.randomSplit(splits, RANDOM_SEED) training_data_homeless_trend, test_data_homeless_trend = transformed_df_homeless_trend.randomSplit(splits, RANDOM_SEED) print("\nNumber of training set rows: %d" % training_data_food.count()) print("Number of test set rows: %d" % test_data_food.count()) """ ---------- model training ---------- """ # train the classification model using training data start_time = t.time() num_classes = len(food_dict) model_food_classifier = RandomForest.trainClassifier(training_data_food, \ numClasses=num_classes,categoricalFeaturesInfo={},\ numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="gini", \ maxDepth=RF_MAX_DEPTH, maxBins=32, \ seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train food classifier: %.3f seconds" % elapsed_time) # train the regression model using training data start_time = t.time() model_homeless_regressor = RandomForest.trainRegressor(training_data_homeless,\ categoricalFeaturesInfo={},numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="variance",\ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) model_homeless_trend_regressor = RandomForest.trainRegressor(training_data_homeless_trend, categoricalFeaturesInfo={}, \ numTrees=RF_NUM_TREES, featureSubsetStrategy="auto", impurity="variance", \ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train homeless regressor: %.3f seconds" % elapsed_time) # make predictions using test data and calculate the accuracy food_predictions = model_food_classifier.predict(test_data_food.map(lambda x: x.features)) homeless_predictions = model_homeless_regressor.predict(test_data_homeless.map(lambda x: x.features)) homeless_trend_predictions = model_homeless_trend_regressor.predict(test_data_homeless_trend.map(lambda x: x.features)) labels_and_predictions_food = test_data_food.map(lambda x: x.label).zip(food_predictions) labels_and_predictions_homeless = test_data_homeless.map(lambda x: x.label).zip(homeless_predictions) labels_and_predictions_homeless_trend = test_data_homeless_trend.map(lambda x: x.label).zip(homeless_trend_predictions) food_acc = labels_and_predictions_food.filter(lambda x: x[0] == x[1]).count() / float(test_data_food.count()) homeless_acc = labels_and_predictions_homeless.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless.count()) homeless_trend_acc = labels_and_predictions_homeless_trend.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless_trend.count()) print("\nFood classifier accuracy: %.3f%%" % (food_acc * 100)) print("Homeless regressor accuracy: %.3f%%" % (homeless_acc * 100)) print("Homeless trend regressor accuracy: %.3f%%" % (homeless_trend_acc * 100)) """ ---------- make predictions ---------- """ food_pre = df_no_food.count() > 0 homeless_pre = df_no_homeless.count() > 0 # make food predictions if food_pre: transformed_df_no_food = df_no_food.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) predict_foods = model_food_classifier.predict(transformed_df_no_food.map(lambda x: x.features)) # make homeless predictions if homeless_pre: transformed_df_no_homeless = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[8], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_no_homeless_trend = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[9], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) predict_homeless = model_homeless_regressor.predict(transformed_df_no_homeless.map(lambda x: x.features)) predict_homeless_trend = model_homeless_trend_regressor.predict(transformed_df_no_homeless_trend.map(lambda x: x.features)) # zip id with predictions preparing for joining data if food_pre: rdd_predict_foods = df_no_food.rdd.map(lambda row: row[0]).zip(predict_foods.map(int)) list_predict_foods = rdd_predict_foods.collect() if homeless_pre: rdd_predict_homeless = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless.map(int)) rdd_predict_homeless_trend = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless_trend.map(int)) list_predict_homeless = rdd_predict_homeless.collect() list_predict_homeless_trend = rdd_predict_homeless_trend.collect() """ ---------- join predictions to original data """ # transform predicted rdd to dataframe if food_pre: df_predict_foods = spark.createDataFrame(list_predict_foods, schema=["id","food_class"]) df_no_food = df_no_food.drop('food_class') concat_df_food = df_no_food.join(df_predict_foods, on='id') if homeless_pre: df_predict_homeless = spark.createDataFrame(list_predict_homeless, schema=["id","homeless"]) df_predict_homeless_trend = spark.createDataFrame(list_predict_homeless_trend, schema=["id","homeless_trend"]) df_no_homeless = df_no_homeless.drop('homeless').drop('homeless_trend') concat_df_homeless = df_no_homeless.join(df_predict_homeless, on='id').join(df_predict_homeless_trend, on='id') generate_rev_dict() get_food_type_udf = udf(get_food_type, StringType()) get_food_group_udf = udf(get_food_group, StringType()) df_all_info = df_all_info.withColumn('food', get_food_type_udf(df_all_info['food_class'])) df_all_info = df_all_info.drop('food_class') # reform the dataframe to prepare for tranforming to json if food_pre: concat_df_food = concat_df_food.withColumn('food', get_food_type_udf(concat_df_food['food_class'])) concat_df_food = concat_df_food.drop('food_class') union_df = df_all_info.union(concat_df_food) else: union_df = df_all_info if homeless_pre: concat_df_homeless = concat_df_homeless.withColumn('food', get_food_type_udf(concat_df_homeless['food_class'])) concat_df_homeless = concat_df_homeless.drop('food_class') union_df = union_df.union(concat_df_homeless) union_df = union_df.drop('id') union_df = union_df.drop('timestamp') union_df = union_df.withColumn('food_group', get_food_group_udf(union_df['food'])) print("\nTotal number of rows of final data: %d" % (union_df.count())) union_df.show() """ ---------- transform dataframe into json preparing for inserting back to couchdb """ json_data = union_df.toJSON() print("\nStart inserting data back to database...") # insert data into couchdb my_db = Couch(OUT_COUCHDB_NAME) final_json = {} final_json["type"] = "FeatureCollection" final_json["features"] = [] j = 0 for row in json_data.collect(): entry = {} entry["type"] = "Feature" entry["properties"] = {} entry["geometry"] = {} entry["geometry"]["type"] = "Point" entry["geometry"]["coordinates"] = [] json_obj = json.loads(row) entry["properties"]["time"] = json_obj["time"] entry["properties"]["polarity"] = json_obj["polarity"] entry["properties"]["followers"] = json_obj["followers"] entry["properties"]["following"] = json_obj["following"] entry["properties"]["food"] = json_obj["food"] entry["properties"]["food_group"] = json_obj["food_group"] entry["properties"]["homeless"] = json_obj["homeless"] entry["properties"]["homeless_trend"] = json_obj["homeless_trend"] entry["geometry"]["coordinates"].append(json_obj["lat"]) entry["geometry"]["coordinates"].append(json_obj["lng"]) final_json["features"].append(entry) j += 1 print('\n') my_db.insert(final_json) print("\nTotal number of rows inserted: %d" % (j)) spark.stop()
if food in Keywords.fastfood: return "fastfood" if food in Keywords.fruits: return "fruits" if food in Keywords.grains: return "grains" if food in Keywords.meat: return "meat" if food in Keywords.seafood: return "seafood" if food in Keywords.vegetables: return "vegetables" return None
identifier_body
rf_model.py
""" CCC Team 42, Melbourne Thuy Ngoc Ha - 963370 Lan Zhou - 824371 Zijian Wang - 950618 Ivan Chee - 736901 Duer Wang - 824325 """ """ A python file used for making predictions on tweeters that lack food information or homeless information. Using random forest classification model for predicting food. Using random forest regression model for predicting homeless and homeless trend. """ import sys import os import httplib2 import json import csv import codecs import time as t from couch import Couch from pyspark.sql import SparkSession from pyspark.mllib.linalg import Vectors from pyspark.mllib.regression import LabeledPoint from pyspark.mllib.tree import RandomForest from pyspark.sql.functions import udf from pyspark.sql.types import * from keywords import Keywords # set up spark environment #os.environ['SPARK_HOME'] = "spark" #sys.path.append("spark/python") #sys.path.append("spark/python/lib") COUCHDB_NAME = "" OUT_COUCHDB_NAME = "" REFORMED_FILE = "" APP_NAME = "random forest model" SPARK_URL = "local[*]" RANDOM_SEED = 12345 TRAINING_DATA_RATIO = 0.7 RF_NUM_TREES = 10 RF_MAX_DEPTH = 5 RF_NUM_BINS = 32 food_dict = {} rev_dict = {} food_pre = False homeless_pre = False """ --------------------------------------------------------- ------------------ Auxiliary Functions ------------------ --------------------------------------------------------- """ # get coordinates of a given city def cityPos(name): url = "https://maps.googleapis.com/maps/api/geocode/json?" + \ "key=AIzaSyBsZErhxaT1oVgMrT-xGLcAN5nK3UHeGBU&address=" + name req = httplib2.Http(".cache") resp, content = req.request(url, "GET") res = json.loads(content) return res["results"][0]["geometry"] # read data from couchdb and reform them to read easier def trans(path): con = Couch(COUCHDB_NAME) jsonData = con.query_all() csvfile = open(REFORMED_FILE, 'w', newline='') writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL) keys=['id', 'time', 'timestamp', 'lat', 'lng', 'polarity', 'followers', \ 'following', 'homeless', 'homeless_trend', 'food_class'] writer.writerow(keys) i = 0 for dic in jsonData: try: # get coordinates if dic['location']['coordinates'] is None: city = dic['location']['place_name'] city = city.replace(" ","%20") coor = cityPos(city) lng = coor['location']['lng'] lat = coor['location']['lat'] else: lng = dic['location']['coordinates'][0] lat = dic['location']['coordinates'][1] # get time amd timesptamp time = dic['created_at']['day']+ '-' + \ trans_month(dic['created_at']['month'])+ '-' + \ dic['created_at']['year']+ ' ' +dic['created_at']['time'] timeArray = t.strptime(time, "%d-%m-%Y %H:%M:%S") timestamp = t.mktime(timeArray) # to ensure at least one of homeless info and food info appears home = dic['homeless'] foods = dic['food_list'] if (home is None) and (foods is None or len(foods) == 0): continue # get homeless information if home is None: homeless = -1 homeless_trend = 0 else: homeless = dic['homeless']['cnt16'] homeless_trend = dic['homeless']['incre/decre'] # get food if foods is None or len(foods) == 0: writer.writerow([i, time, timestamp, lat, lng, dic['polarity'], \ dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, "-1"]) i += 1 else: for food in foods: food_class = get_food_class(food) writer.writerow([i, time, timestamp, lat, lng, \ dic['polarity'], dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, food_class]) i += 1 except: continue csvfile.close() def trans_month(month): month_dic = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', \ 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', \ 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'} return month_dic[month] # transform food name into food classes that are integers def get_food_class(food): if not food in food_dict.keys(): food_dict[food] = str(len(food_dict)) return food_dict[food] def generate_rev_dict(): for key,value in food_dict.items(): rev_dict[value] = key # get food name by food class def get_food_type(food_class): the_class = str(food_class) if the_class in rev_dict.keys(): return rev_dict[the_class] return None # get food group by food name def
(food): if food in Keywords.fastfood: return "fastfood" if food in Keywords.fruits: return "fruits" if food in Keywords.grains: return "grains" if food in Keywords.meat: return "meat" if food in Keywords.seafood: return "seafood" if food in Keywords.vegetables: return "vegetables" return None """ --------------------------------------------------------- --------------------- Main Function --------------------- --------------------------------------------------------- """ if __name__ == "__main__": COUCHDB_NAME = sys.argv[1] OUT_COUCHDB_NAME = sys.argv[2] REFORMED_FILE = sys.argv[3] # create spark session spark = SparkSession.builder.appName(APP_NAME) \ .master(SPARK_URL).getOrCreate() """ ---------- data preparation ---------- """ # read data from couchdb trans(REFORMED_FILE) # reform data into a dataframe df = spark.read.options(header = "true", inferschema = "true")\ .csv(REFORMED_FILE) print("\nTotal number of rows loaded: %d" % df.count()) # remove duplicated entries df = df.drop_duplicates() print("\nTotal number of rows without duplicates: %d" % df.count()) df.show() # filter dataframe df_no_food = df.filter(df['food_class'] == -1) df_no_homeless = df.filter(df['homeless'] == -1) df_all_info = df.filter(df['food_class'] >= 0).filter(df['homeless'] >= 0) print("\nNumber of rows having all information: %d" % df_all_info.count()) print("number of rows without food information: %d" % df_no_food.count()) print("number of rows without homeless information: %d" % df_no_homeless.count()) # transform dataframe into RDD transformed_df_food = df_all_info.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) transformed_df_homeless = df_all_info.rdd.map(lambda row: LabeledPoint(row[-3], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_homeless_trend = df_all_info.rdd.map(lambda row: LabeledPoint(row[-2], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) # split reformed data into tranning data and test data splits = [TRAINING_DATA_RATIO, 1.0 - TRAINING_DATA_RATIO] training_data_food, test_data_food = transformed_df_food.randomSplit(splits, RANDOM_SEED) training_data_homeless, test_data_homeless = transformed_df_homeless.randomSplit(splits, RANDOM_SEED) training_data_homeless_trend, test_data_homeless_trend = transformed_df_homeless_trend.randomSplit(splits, RANDOM_SEED) print("\nNumber of training set rows: %d" % training_data_food.count()) print("Number of test set rows: %d" % test_data_food.count()) """ ---------- model training ---------- """ # train the classification model using training data start_time = t.time() num_classes = len(food_dict) model_food_classifier = RandomForest.trainClassifier(training_data_food, \ numClasses=num_classes,categoricalFeaturesInfo={},\ numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="gini", \ maxDepth=RF_MAX_DEPTH, maxBins=32, \ seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train food classifier: %.3f seconds" % elapsed_time) # train the regression model using training data start_time = t.time() model_homeless_regressor = RandomForest.trainRegressor(training_data_homeless,\ categoricalFeaturesInfo={},numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="variance",\ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) model_homeless_trend_regressor = RandomForest.trainRegressor(training_data_homeless_trend, categoricalFeaturesInfo={}, \ numTrees=RF_NUM_TREES, featureSubsetStrategy="auto", impurity="variance", \ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train homeless regressor: %.3f seconds" % elapsed_time) # make predictions using test data and calculate the accuracy food_predictions = model_food_classifier.predict(test_data_food.map(lambda x: x.features)) homeless_predictions = model_homeless_regressor.predict(test_data_homeless.map(lambda x: x.features)) homeless_trend_predictions = model_homeless_trend_regressor.predict(test_data_homeless_trend.map(lambda x: x.features)) labels_and_predictions_food = test_data_food.map(lambda x: x.label).zip(food_predictions) labels_and_predictions_homeless = test_data_homeless.map(lambda x: x.label).zip(homeless_predictions) labels_and_predictions_homeless_trend = test_data_homeless_trend.map(lambda x: x.label).zip(homeless_trend_predictions) food_acc = labels_and_predictions_food.filter(lambda x: x[0] == x[1]).count() / float(test_data_food.count()) homeless_acc = labels_and_predictions_homeless.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless.count()) homeless_trend_acc = labels_and_predictions_homeless_trend.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless_trend.count()) print("\nFood classifier accuracy: %.3f%%" % (food_acc * 100)) print("Homeless regressor accuracy: %.3f%%" % (homeless_acc * 100)) print("Homeless trend regressor accuracy: %.3f%%" % (homeless_trend_acc * 100)) """ ---------- make predictions ---------- """ food_pre = df_no_food.count() > 0 homeless_pre = df_no_homeless.count() > 0 # make food predictions if food_pre: transformed_df_no_food = df_no_food.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) predict_foods = model_food_classifier.predict(transformed_df_no_food.map(lambda x: x.features)) # make homeless predictions if homeless_pre: transformed_df_no_homeless = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[8], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_no_homeless_trend = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[9], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) predict_homeless = model_homeless_regressor.predict(transformed_df_no_homeless.map(lambda x: x.features)) predict_homeless_trend = model_homeless_trend_regressor.predict(transformed_df_no_homeless_trend.map(lambda x: x.features)) # zip id with predictions preparing for joining data if food_pre: rdd_predict_foods = df_no_food.rdd.map(lambda row: row[0]).zip(predict_foods.map(int)) list_predict_foods = rdd_predict_foods.collect() if homeless_pre: rdd_predict_homeless = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless.map(int)) rdd_predict_homeless_trend = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless_trend.map(int)) list_predict_homeless = rdd_predict_homeless.collect() list_predict_homeless_trend = rdd_predict_homeless_trend.collect() """ ---------- join predictions to original data """ # transform predicted rdd to dataframe if food_pre: df_predict_foods = spark.createDataFrame(list_predict_foods, schema=["id","food_class"]) df_no_food = df_no_food.drop('food_class') concat_df_food = df_no_food.join(df_predict_foods, on='id') if homeless_pre: df_predict_homeless = spark.createDataFrame(list_predict_homeless, schema=["id","homeless"]) df_predict_homeless_trend = spark.createDataFrame(list_predict_homeless_trend, schema=["id","homeless_trend"]) df_no_homeless = df_no_homeless.drop('homeless').drop('homeless_trend') concat_df_homeless = df_no_homeless.join(df_predict_homeless, on='id').join(df_predict_homeless_trend, on='id') generate_rev_dict() get_food_type_udf = udf(get_food_type, StringType()) get_food_group_udf = udf(get_food_group, StringType()) df_all_info = df_all_info.withColumn('food', get_food_type_udf(df_all_info['food_class'])) df_all_info = df_all_info.drop('food_class') # reform the dataframe to prepare for tranforming to json if food_pre: concat_df_food = concat_df_food.withColumn('food', get_food_type_udf(concat_df_food['food_class'])) concat_df_food = concat_df_food.drop('food_class') union_df = df_all_info.union(concat_df_food) else: union_df = df_all_info if homeless_pre: concat_df_homeless = concat_df_homeless.withColumn('food', get_food_type_udf(concat_df_homeless['food_class'])) concat_df_homeless = concat_df_homeless.drop('food_class') union_df = union_df.union(concat_df_homeless) union_df = union_df.drop('id') union_df = union_df.drop('timestamp') union_df = union_df.withColumn('food_group', get_food_group_udf(union_df['food'])) print("\nTotal number of rows of final data: %d" % (union_df.count())) union_df.show() """ ---------- transform dataframe into json preparing for inserting back to couchdb """ json_data = union_df.toJSON() print("\nStart inserting data back to database...") # insert data into couchdb my_db = Couch(OUT_COUCHDB_NAME) final_json = {} final_json["type"] = "FeatureCollection" final_json["features"] = [] j = 0 for row in json_data.collect(): entry = {} entry["type"] = "Feature" entry["properties"] = {} entry["geometry"] = {} entry["geometry"]["type"] = "Point" entry["geometry"]["coordinates"] = [] json_obj = json.loads(row) entry["properties"]["time"] = json_obj["time"] entry["properties"]["polarity"] = json_obj["polarity"] entry["properties"]["followers"] = json_obj["followers"] entry["properties"]["following"] = json_obj["following"] entry["properties"]["food"] = json_obj["food"] entry["properties"]["food_group"] = json_obj["food_group"] entry["properties"]["homeless"] = json_obj["homeless"] entry["properties"]["homeless_trend"] = json_obj["homeless_trend"] entry["geometry"]["coordinates"].append(json_obj["lat"]) entry["geometry"]["coordinates"].append(json_obj["lng"]) final_json["features"].append(entry) j += 1 print('\n') my_db.insert(final_json) print("\nTotal number of rows inserted: %d" % (j)) spark.stop()
get_food_group
identifier_name
rf_model.py
""" CCC Team 42, Melbourne Thuy Ngoc Ha - 963370 Lan Zhou - 824371 Zijian Wang - 950618 Ivan Chee - 736901 Duer Wang - 824325 """ """ A python file used for making predictions on tweeters that lack food information or homeless information. Using random forest classification model for predicting food. Using random forest regression model for predicting homeless and homeless trend. """ import sys import os import httplib2 import json import csv import codecs import time as t from couch import Couch from pyspark.sql import SparkSession from pyspark.mllib.linalg import Vectors from pyspark.mllib.regression import LabeledPoint from pyspark.mllib.tree import RandomForest from pyspark.sql.functions import udf from pyspark.sql.types import * from keywords import Keywords # set up spark environment #os.environ['SPARK_HOME'] = "spark" #sys.path.append("spark/python") #sys.path.append("spark/python/lib") COUCHDB_NAME = "" OUT_COUCHDB_NAME = "" REFORMED_FILE = "" APP_NAME = "random forest model" SPARK_URL = "local[*]" RANDOM_SEED = 12345 TRAINING_DATA_RATIO = 0.7 RF_NUM_TREES = 10 RF_MAX_DEPTH = 5 RF_NUM_BINS = 32 food_dict = {} rev_dict = {} food_pre = False homeless_pre = False """ --------------------------------------------------------- ------------------ Auxiliary Functions ------------------ --------------------------------------------------------- """ # get coordinates of a given city def cityPos(name): url = "https://maps.googleapis.com/maps/api/geocode/json?" + \ "key=AIzaSyBsZErhxaT1oVgMrT-xGLcAN5nK3UHeGBU&address=" + name req = httplib2.Http(".cache") resp, content = req.request(url, "GET") res = json.loads(content) return res["results"][0]["geometry"] # read data from couchdb and reform them to read easier def trans(path): con = Couch(COUCHDB_NAME) jsonData = con.query_all() csvfile = open(REFORMED_FILE, 'w', newline='') writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL) keys=['id', 'time', 'timestamp', 'lat', 'lng', 'polarity', 'followers', \ 'following', 'homeless', 'homeless_trend', 'food_class'] writer.writerow(keys) i = 0 for dic in jsonData: try: # get coordinates if dic['location']['coordinates'] is None: city = dic['location']['place_name'] city = city.replace(" ","%20") coor = cityPos(city) lng = coor['location']['lng'] lat = coor['location']['lat'] else: lng = dic['location']['coordinates'][0] lat = dic['location']['coordinates'][1] # get time amd timesptamp time = dic['created_at']['day']+ '-' + \ trans_month(dic['created_at']['month'])+ '-' + \ dic['created_at']['year']+ ' ' +dic['created_at']['time'] timeArray = t.strptime(time, "%d-%m-%Y %H:%M:%S") timestamp = t.mktime(timeArray) # to ensure at least one of homeless info and food info appears home = dic['homeless'] foods = dic['food_list'] if (home is None) and (foods is None or len(foods) == 0): continue # get homeless information if home is None: homeless = -1 homeless_trend = 0 else: homeless = dic['homeless']['cnt16'] homeless_trend = dic['homeless']['incre/decre'] # get food if foods is None or len(foods) == 0: writer.writerow([i, time, timestamp, lat, lng, dic['polarity'], \ dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, "-1"]) i += 1 else: for food in foods: food_class = get_food_class(food) writer.writerow([i, time, timestamp, lat, lng, \ dic['polarity'], dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, food_class]) i += 1 except: continue csvfile.close() def trans_month(month): month_dic = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', \ 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', \ 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'} return month_dic[month] # transform food name into food classes that are integers def get_food_class(food): if not food in food_dict.keys(): food_dict[food] = str(len(food_dict)) return food_dict[food] def generate_rev_dict(): for key,value in food_dict.items(): rev_dict[value] = key # get food name by food class def get_food_type(food_class): the_class = str(food_class) if the_class in rev_dict.keys(): return rev_dict[the_class] return None # get food group by food name def get_food_group(food): if food in Keywords.fastfood: return "fastfood" if food in Keywords.fruits: return "fruits" if food in Keywords.grains: return "grains" if food in Keywords.meat: return "meat" if food in Keywords.seafood: return "seafood" if food in Keywords.vegetables: return "vegetables" return None """ --------------------------------------------------------- --------------------- Main Function --------------------- --------------------------------------------------------- """ if __name__ == "__main__": COUCHDB_NAME = sys.argv[1] OUT_COUCHDB_NAME = sys.argv[2] REFORMED_FILE = sys.argv[3] # create spark session spark = SparkSession.builder.appName(APP_NAME) \ .master(SPARK_URL).getOrCreate() """ ---------- data preparation ---------- """ # read data from couchdb trans(REFORMED_FILE) # reform data into a dataframe df = spark.read.options(header = "true", inferschema = "true")\ .csv(REFORMED_FILE) print("\nTotal number of rows loaded: %d" % df.count()) # remove duplicated entries df = df.drop_duplicates() print("\nTotal number of rows without duplicates: %d" % df.count()) df.show() # filter dataframe df_no_food = df.filter(df['food_class'] == -1) df_no_homeless = df.filter(df['homeless'] == -1) df_all_info = df.filter(df['food_class'] >= 0).filter(df['homeless'] >= 0) print("\nNumber of rows having all information: %d" % df_all_info.count()) print("number of rows without food information: %d" % df_no_food.count()) print("number of rows without homeless information: %d" % df_no_homeless.count()) # transform dataframe into RDD transformed_df_food = df_all_info.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) transformed_df_homeless = df_all_info.rdd.map(lambda row: LabeledPoint(row[-3], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_homeless_trend = df_all_info.rdd.map(lambda row: LabeledPoint(row[-2], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) # split reformed data into tranning data and test data splits = [TRAINING_DATA_RATIO, 1.0 - TRAINING_DATA_RATIO] training_data_food, test_data_food = transformed_df_food.randomSplit(splits, RANDOM_SEED) training_data_homeless, test_data_homeless = transformed_df_homeless.randomSplit(splits, RANDOM_SEED) training_data_homeless_trend, test_data_homeless_trend = transformed_df_homeless_trend.randomSplit(splits, RANDOM_SEED) print("\nNumber of training set rows: %d" % training_data_food.count()) print("Number of test set rows: %d" % test_data_food.count()) """ ---------- model training ---------- """ # train the classification model using training data start_time = t.time() num_classes = len(food_dict) model_food_classifier = RandomForest.trainClassifier(training_data_food, \ numClasses=num_classes,categoricalFeaturesInfo={},\ numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="gini", \ maxDepth=RF_MAX_DEPTH, maxBins=32, \ seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train food classifier: %.3f seconds" % elapsed_time) # train the regression model using training data start_time = t.time() model_homeless_regressor = RandomForest.trainRegressor(training_data_homeless,\ categoricalFeaturesInfo={},numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="variance",\ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) model_homeless_trend_regressor = RandomForest.trainRegressor(training_data_homeless_trend, categoricalFeaturesInfo={}, \ numTrees=RF_NUM_TREES, featureSubsetStrategy="auto", impurity="variance", \ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train homeless regressor: %.3f seconds" % elapsed_time) # make predictions using test data and calculate the accuracy food_predictions = model_food_classifier.predict(test_data_food.map(lambda x: x.features)) homeless_predictions = model_homeless_regressor.predict(test_data_homeless.map(lambda x: x.features)) homeless_trend_predictions = model_homeless_trend_regressor.predict(test_data_homeless_trend.map(lambda x: x.features)) labels_and_predictions_food = test_data_food.map(lambda x: x.label).zip(food_predictions) labels_and_predictions_homeless = test_data_homeless.map(lambda x: x.label).zip(homeless_predictions) labels_and_predictions_homeless_trend = test_data_homeless_trend.map(lambda x: x.label).zip(homeless_trend_predictions) food_acc = labels_and_predictions_food.filter(lambda x: x[0] == x[1]).count() / float(test_data_food.count()) homeless_acc = labels_and_predictions_homeless.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless.count()) homeless_trend_acc = labels_and_predictions_homeless_trend.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless_trend.count()) print("\nFood classifier accuracy: %.3f%%" % (food_acc * 100)) print("Homeless regressor accuracy: %.3f%%" % (homeless_acc * 100)) print("Homeless trend regressor accuracy: %.3f%%" % (homeless_trend_acc * 100)) """ ---------- make predictions ---------- """ food_pre = df_no_food.count() > 0 homeless_pre = df_no_homeless.count() > 0 # make food predictions if food_pre: transformed_df_no_food = df_no_food.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) predict_foods = model_food_classifier.predict(transformed_df_no_food.map(lambda x: x.features)) # make homeless predictions if homeless_pre: transformed_df_no_homeless = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[8], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_no_homeless_trend = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[9], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) predict_homeless = model_homeless_regressor.predict(transformed_df_no_homeless.map(lambda x: x.features)) predict_homeless_trend = model_homeless_trend_regressor.predict(transformed_df_no_homeless_trend.map(lambda x: x.features)) # zip id with predictions preparing for joining data if food_pre: rdd_predict_foods = df_no_food.rdd.map(lambda row: row[0]).zip(predict_foods.map(int)) list_predict_foods = rdd_predict_foods.collect() if homeless_pre: rdd_predict_homeless = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless.map(int)) rdd_predict_homeless_trend = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless_trend.map(int)) list_predict_homeless = rdd_predict_homeless.collect() list_predict_homeless_trend = rdd_predict_homeless_trend.collect() """ ---------- join predictions to original data """ # transform predicted rdd to dataframe if food_pre: df_predict_foods = spark.createDataFrame(list_predict_foods, schema=["id","food_class"]) df_no_food = df_no_food.drop('food_class') concat_df_food = df_no_food.join(df_predict_foods, on='id') if homeless_pre: df_predict_homeless = spark.createDataFrame(list_predict_homeless, schema=["id","homeless"]) df_predict_homeless_trend = spark.createDataFrame(list_predict_homeless_trend, schema=["id","homeless_trend"]) df_no_homeless = df_no_homeless.drop('homeless').drop('homeless_trend') concat_df_homeless = df_no_homeless.join(df_predict_homeless, on='id').join(df_predict_homeless_trend, on='id') generate_rev_dict() get_food_type_udf = udf(get_food_type, StringType()) get_food_group_udf = udf(get_food_group, StringType()) df_all_info = df_all_info.withColumn('food', get_food_type_udf(df_all_info['food_class'])) df_all_info = df_all_info.drop('food_class') # reform the dataframe to prepare for tranforming to json if food_pre: concat_df_food = concat_df_food.withColumn('food', get_food_type_udf(concat_df_food['food_class'])) concat_df_food = concat_df_food.drop('food_class') union_df = df_all_info.union(concat_df_food) else: union_df = df_all_info if homeless_pre: concat_df_homeless = concat_df_homeless.withColumn('food', get_food_type_udf(concat_df_homeless['food_class'])) concat_df_homeless = concat_df_homeless.drop('food_class') union_df = union_df.union(concat_df_homeless) union_df = union_df.drop('id') union_df = union_df.drop('timestamp') union_df = union_df.withColumn('food_group', get_food_group_udf(union_df['food'])) print("\nTotal number of rows of final data: %d" % (union_df.count())) union_df.show() """ ---------- transform dataframe into json preparing for inserting back to couchdb """ json_data = union_df.toJSON() print("\nStart inserting data back to database...") # insert data into couchdb my_db = Couch(OUT_COUCHDB_NAME) final_json = {} final_json["type"] = "FeatureCollection" final_json["features"] = [] j = 0 for row in json_data.collect(): entry = {} entry["type"] = "Feature" entry["properties"] = {} entry["geometry"] = {} entry["geometry"]["type"] = "Point" entry["geometry"]["coordinates"] = [] json_obj = json.loads(row)
entry["properties"]["polarity"] = json_obj["polarity"] entry["properties"]["followers"] = json_obj["followers"] entry["properties"]["following"] = json_obj["following"] entry["properties"]["food"] = json_obj["food"] entry["properties"]["food_group"] = json_obj["food_group"] entry["properties"]["homeless"] = json_obj["homeless"] entry["properties"]["homeless_trend"] = json_obj["homeless_trend"] entry["geometry"]["coordinates"].append(json_obj["lat"]) entry["geometry"]["coordinates"].append(json_obj["lng"]) final_json["features"].append(entry) j += 1 print('\n') my_db.insert(final_json) print("\nTotal number of rows inserted: %d" % (j)) spark.stop()
entry["properties"]["time"] = json_obj["time"]
random_line_split
rf_model.py
""" CCC Team 42, Melbourne Thuy Ngoc Ha - 963370 Lan Zhou - 824371 Zijian Wang - 950618 Ivan Chee - 736901 Duer Wang - 824325 """ """ A python file used for making predictions on tweeters that lack food information or homeless information. Using random forest classification model for predicting food. Using random forest regression model for predicting homeless and homeless trend. """ import sys import os import httplib2 import json import csv import codecs import time as t from couch import Couch from pyspark.sql import SparkSession from pyspark.mllib.linalg import Vectors from pyspark.mllib.regression import LabeledPoint from pyspark.mllib.tree import RandomForest from pyspark.sql.functions import udf from pyspark.sql.types import * from keywords import Keywords # set up spark environment #os.environ['SPARK_HOME'] = "spark" #sys.path.append("spark/python") #sys.path.append("spark/python/lib") COUCHDB_NAME = "" OUT_COUCHDB_NAME = "" REFORMED_FILE = "" APP_NAME = "random forest model" SPARK_URL = "local[*]" RANDOM_SEED = 12345 TRAINING_DATA_RATIO = 0.7 RF_NUM_TREES = 10 RF_MAX_DEPTH = 5 RF_NUM_BINS = 32 food_dict = {} rev_dict = {} food_pre = False homeless_pre = False """ --------------------------------------------------------- ------------------ Auxiliary Functions ------------------ --------------------------------------------------------- """ # get coordinates of a given city def cityPos(name): url = "https://maps.googleapis.com/maps/api/geocode/json?" + \ "key=AIzaSyBsZErhxaT1oVgMrT-xGLcAN5nK3UHeGBU&address=" + name req = httplib2.Http(".cache") resp, content = req.request(url, "GET") res = json.loads(content) return res["results"][0]["geometry"] # read data from couchdb and reform them to read easier def trans(path): con = Couch(COUCHDB_NAME) jsonData = con.query_all() csvfile = open(REFORMED_FILE, 'w', newline='') writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL) keys=['id', 'time', 'timestamp', 'lat', 'lng', 'polarity', 'followers', \ 'following', 'homeless', 'homeless_trend', 'food_class'] writer.writerow(keys) i = 0 for dic in jsonData:
csvfile.close() def trans_month(month): month_dic = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', \ 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', \ 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'} return month_dic[month] # transform food name into food classes that are integers def get_food_class(food): if not food in food_dict.keys(): food_dict[food] = str(len(food_dict)) return food_dict[food] def generate_rev_dict(): for key,value in food_dict.items(): rev_dict[value] = key # get food name by food class def get_food_type(food_class): the_class = str(food_class) if the_class in rev_dict.keys(): return rev_dict[the_class] return None # get food group by food name def get_food_group(food): if food in Keywords.fastfood: return "fastfood" if food in Keywords.fruits: return "fruits" if food in Keywords.grains: return "grains" if food in Keywords.meat: return "meat" if food in Keywords.seafood: return "seafood" if food in Keywords.vegetables: return "vegetables" return None """ --------------------------------------------------------- --------------------- Main Function --------------------- --------------------------------------------------------- """ if __name__ == "__main__": COUCHDB_NAME = sys.argv[1] OUT_COUCHDB_NAME = sys.argv[2] REFORMED_FILE = sys.argv[3] # create spark session spark = SparkSession.builder.appName(APP_NAME) \ .master(SPARK_URL).getOrCreate() """ ---------- data preparation ---------- """ # read data from couchdb trans(REFORMED_FILE) # reform data into a dataframe df = spark.read.options(header = "true", inferschema = "true")\ .csv(REFORMED_FILE) print("\nTotal number of rows loaded: %d" % df.count()) # remove duplicated entries df = df.drop_duplicates() print("\nTotal number of rows without duplicates: %d" % df.count()) df.show() # filter dataframe df_no_food = df.filter(df['food_class'] == -1) df_no_homeless = df.filter(df['homeless'] == -1) df_all_info = df.filter(df['food_class'] >= 0).filter(df['homeless'] >= 0) print("\nNumber of rows having all information: %d" % df_all_info.count()) print("number of rows without food information: %d" % df_no_food.count()) print("number of rows without homeless information: %d" % df_no_homeless.count()) # transform dataframe into RDD transformed_df_food = df_all_info.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) transformed_df_homeless = df_all_info.rdd.map(lambda row: LabeledPoint(row[-3], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_homeless_trend = df_all_info.rdd.map(lambda row: LabeledPoint(row[-2], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) # split reformed data into tranning data and test data splits = [TRAINING_DATA_RATIO, 1.0 - TRAINING_DATA_RATIO] training_data_food, test_data_food = transformed_df_food.randomSplit(splits, RANDOM_SEED) training_data_homeless, test_data_homeless = transformed_df_homeless.randomSplit(splits, RANDOM_SEED) training_data_homeless_trend, test_data_homeless_trend = transformed_df_homeless_trend.randomSplit(splits, RANDOM_SEED) print("\nNumber of training set rows: %d" % training_data_food.count()) print("Number of test set rows: %d" % test_data_food.count()) """ ---------- model training ---------- """ # train the classification model using training data start_time = t.time() num_classes = len(food_dict) model_food_classifier = RandomForest.trainClassifier(training_data_food, \ numClasses=num_classes,categoricalFeaturesInfo={},\ numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="gini", \ maxDepth=RF_MAX_DEPTH, maxBins=32, \ seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train food classifier: %.3f seconds" % elapsed_time) # train the regression model using training data start_time = t.time() model_homeless_regressor = RandomForest.trainRegressor(training_data_homeless,\ categoricalFeaturesInfo={},numTrees=RF_NUM_TREES, \ featureSubsetStrategy="auto", impurity="variance",\ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) model_homeless_trend_regressor = RandomForest.trainRegressor(training_data_homeless_trend, categoricalFeaturesInfo={}, \ numTrees=RF_NUM_TREES, featureSubsetStrategy="auto", impurity="variance", \ maxDepth=RF_MAX_DEPTH, maxBins=32, seed=RANDOM_SEED) end_time = t.time() elapsed_time = end_time - start_time print("\nTime to train homeless regressor: %.3f seconds" % elapsed_time) # make predictions using test data and calculate the accuracy food_predictions = model_food_classifier.predict(test_data_food.map(lambda x: x.features)) homeless_predictions = model_homeless_regressor.predict(test_data_homeless.map(lambda x: x.features)) homeless_trend_predictions = model_homeless_trend_regressor.predict(test_data_homeless_trend.map(lambda x: x.features)) labels_and_predictions_food = test_data_food.map(lambda x: x.label).zip(food_predictions) labels_and_predictions_homeless = test_data_homeless.map(lambda x: x.label).zip(homeless_predictions) labels_and_predictions_homeless_trend = test_data_homeless_trend.map(lambda x: x.label).zip(homeless_trend_predictions) food_acc = labels_and_predictions_food.filter(lambda x: x[0] == x[1]).count() / float(test_data_food.count()) homeless_acc = labels_and_predictions_homeless.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless.count()) homeless_trend_acc = labels_and_predictions_homeless_trend.filter(lambda x: abs(x[0]-x[1]) < 10).count() / float(test_data_homeless_trend.count()) print("\nFood classifier accuracy: %.3f%%" % (food_acc * 100)) print("Homeless regressor accuracy: %.3f%%" % (homeless_acc * 100)) print("Homeless trend regressor accuracy: %.3f%%" % (homeless_trend_acc * 100)) """ ---------- make predictions ---------- """ food_pre = df_no_food.count() > 0 homeless_pre = df_no_homeless.count() > 0 # make food predictions if food_pre: transformed_df_no_food = df_no_food.rdd.map(lambda row: LabeledPoint(row[-1], Vectors.dense(row[2:-1]))) predict_foods = model_food_classifier.predict(transformed_df_no_food.map(lambda x: x.features)) # make homeless predictions if homeless_pre: transformed_df_no_homeless = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[8], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) transformed_df_no_homeless_trend = df_no_homeless.rdd.map(lambda row: LabeledPoint(row[9], Vectors.dense(row[2],row[3],row[4],row[5],row[6],row[7],row[10]))) predict_homeless = model_homeless_regressor.predict(transformed_df_no_homeless.map(lambda x: x.features)) predict_homeless_trend = model_homeless_trend_regressor.predict(transformed_df_no_homeless_trend.map(lambda x: x.features)) # zip id with predictions preparing for joining data if food_pre: rdd_predict_foods = df_no_food.rdd.map(lambda row: row[0]).zip(predict_foods.map(int)) list_predict_foods = rdd_predict_foods.collect() if homeless_pre: rdd_predict_homeless = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless.map(int)) rdd_predict_homeless_trend = df_no_homeless.rdd.map(lambda row: row[0]).zip(predict_homeless_trend.map(int)) list_predict_homeless = rdd_predict_homeless.collect() list_predict_homeless_trend = rdd_predict_homeless_trend.collect() """ ---------- join predictions to original data """ # transform predicted rdd to dataframe if food_pre: df_predict_foods = spark.createDataFrame(list_predict_foods, schema=["id","food_class"]) df_no_food = df_no_food.drop('food_class') concat_df_food = df_no_food.join(df_predict_foods, on='id') if homeless_pre: df_predict_homeless = spark.createDataFrame(list_predict_homeless, schema=["id","homeless"]) df_predict_homeless_trend = spark.createDataFrame(list_predict_homeless_trend, schema=["id","homeless_trend"]) df_no_homeless = df_no_homeless.drop('homeless').drop('homeless_trend') concat_df_homeless = df_no_homeless.join(df_predict_homeless, on='id').join(df_predict_homeless_trend, on='id') generate_rev_dict() get_food_type_udf = udf(get_food_type, StringType()) get_food_group_udf = udf(get_food_group, StringType()) df_all_info = df_all_info.withColumn('food', get_food_type_udf(df_all_info['food_class'])) df_all_info = df_all_info.drop('food_class') # reform the dataframe to prepare for tranforming to json if food_pre: concat_df_food = concat_df_food.withColumn('food', get_food_type_udf(concat_df_food['food_class'])) concat_df_food = concat_df_food.drop('food_class') union_df = df_all_info.union(concat_df_food) else: union_df = df_all_info if homeless_pre: concat_df_homeless = concat_df_homeless.withColumn('food', get_food_type_udf(concat_df_homeless['food_class'])) concat_df_homeless = concat_df_homeless.drop('food_class') union_df = union_df.union(concat_df_homeless) union_df = union_df.drop('id') union_df = union_df.drop('timestamp') union_df = union_df.withColumn('food_group', get_food_group_udf(union_df['food'])) print("\nTotal number of rows of final data: %d" % (union_df.count())) union_df.show() """ ---------- transform dataframe into json preparing for inserting back to couchdb """ json_data = union_df.toJSON() print("\nStart inserting data back to database...") # insert data into couchdb my_db = Couch(OUT_COUCHDB_NAME) final_json = {} final_json["type"] = "FeatureCollection" final_json["features"] = [] j = 0 for row in json_data.collect(): entry = {} entry["type"] = "Feature" entry["properties"] = {} entry["geometry"] = {} entry["geometry"]["type"] = "Point" entry["geometry"]["coordinates"] = [] json_obj = json.loads(row) entry["properties"]["time"] = json_obj["time"] entry["properties"]["polarity"] = json_obj["polarity"] entry["properties"]["followers"] = json_obj["followers"] entry["properties"]["following"] = json_obj["following"] entry["properties"]["food"] = json_obj["food"] entry["properties"]["food_group"] = json_obj["food_group"] entry["properties"]["homeless"] = json_obj["homeless"] entry["properties"]["homeless_trend"] = json_obj["homeless_trend"] entry["geometry"]["coordinates"].append(json_obj["lat"]) entry["geometry"]["coordinates"].append(json_obj["lng"]) final_json["features"].append(entry) j += 1 print('\n') my_db.insert(final_json) print("\nTotal number of rows inserted: %d" % (j)) spark.stop()
try: # get coordinates if dic['location']['coordinates'] is None: city = dic['location']['place_name'] city = city.replace(" ","%20") coor = cityPos(city) lng = coor['location']['lng'] lat = coor['location']['lat'] else: lng = dic['location']['coordinates'][0] lat = dic['location']['coordinates'][1] # get time amd timesptamp time = dic['created_at']['day']+ '-' + \ trans_month(dic['created_at']['month'])+ '-' + \ dic['created_at']['year']+ ' ' +dic['created_at']['time'] timeArray = t.strptime(time, "%d-%m-%Y %H:%M:%S") timestamp = t.mktime(timeArray) # to ensure at least one of homeless info and food info appears home = dic['homeless'] foods = dic['food_list'] if (home is None) and (foods is None or len(foods) == 0): continue # get homeless information if home is None: homeless = -1 homeless_trend = 0 else: homeless = dic['homeless']['cnt16'] homeless_trend = dic['homeless']['incre/decre'] # get food if foods is None or len(foods) == 0: writer.writerow([i, time, timestamp, lat, lng, dic['polarity'], \ dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, "-1"]) i += 1 else: for food in foods: food_class = get_food_class(food) writer.writerow([i, time, timestamp, lat, lng, \ dic['polarity'], dic['user']['followers'], \ dic['user']['following'], homeless, \ homeless_trend, food_class]) i += 1 except: continue
conditional_block
actions.rs
use crate::cards::CardInstance; use crate::game::{Game, PlayerActionState, PlayerActiveInteraction, Time, UPDATE_DURATION}; use crate::geometry::{ Facing, FloatingVector, FloatingVectorExtension, GridVector, GridVectorExtension, Rotation, TILE_RADIUS, TILE_SIZE, TILE_WIDTH, }; use crate::mechanisms::{BuildMechanism, Conveyor, ConveyorSide, Mechanism, MechanismType}; use crate::ui_glue::Draw; use guard::guard; use ordered_float::OrderedFloat; use rand::prelude::*; use serde::{Deserialize, Serialize}; use std::fmt::Debug; pub enum ActionStatus { StillGoing, Completed, } pub struct ActionUpdateContext<'a> { pub game: &'a mut Game, } impl<'a> ActionUpdateContext<'a> { pub fn interaction_state(&self) -> &PlayerActiveInteraction { match &self.game.player.action_state { PlayerActionState::Interacting(i) => i, _ => unreachable!(), } } pub fn this_card(&self) -> &CardInstance { self.game.cards.selected().unwrap() } pub fn this_card_mut(&mut self) -> &mut CardInstance { self.game.cards.selected_mut().unwrap() } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Cost { Fixed(i32), Variable, None, } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct ActionDisplayInfo { pub name: String, pub health_cost: Cost, pub time_cost: Cost, pub rules_text: String, pub flavor_text: String, } impl Default for ActionDisplayInfo { fn default() -> Self { ActionDisplayInfo { name: "".to_string(), health_cost: Cost::None, time_cost: Cost::Fixed(2), rules_text: "".to_string(), flavor_text: "".to_string(), } } } #[allow(unused)] pub trait ActionTrait { /** Perform a single time-step update on this action, possibly modifying the game state. Note that the action is removed from `game` before doing this, so that both mutable references can be held at the same time, so the action still stored in `game` is temporarily invalid. */ fn update(&mut self, context: ActionUpdateContext) -> ActionStatus; fn display_info(&self) -> ActionDisplayInfo; fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw); fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Action: ActionTrait { SimpleAction, } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct SimpleAction { display_info: ActionDisplayInfo, is_card: bool, simple_action_type: SimpleActionType, progress: Time, cancel_progress: Time, } #[allow(unused)] pub trait SimpleActionTrait { fn finish(&self, context: ActionUpdateContext); fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) {} fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum SimpleActionType: SimpleActionTrait { Reshuffle, BuildConveyor, BuildMechanism, } } fn smootherstep(a: f64, b: f64, x: f64) -> f64 { let x = ((x - a) / (b - a)).clamp(0.0, 1.0); x * x * x * (x * (x * 6.0 - 15.0) + 10.0) } impl SimpleAction { pub fn new( time_cost: i32, health_cost: Option<i32>, name: &str, rules_text: &str, flavor_text: &str, is_card: bool, simple_action_type: SimpleActionType, ) -> SimpleAction { SimpleAction { display_info: ActionDisplayInfo { name: name.to_string(), health_cost: match health_cost { Some(c) => Cost::Fixed(c), None => Cost::None, }, time_cost: Cost::Fixed(time_cost), rules_text: rules_text.to_string(), flavor_text: flavor_text.to_string(), }, is_card, simple_action_type, progress: 0.0, cancel_progress: 0.0, } } fn time_cost(&self) -> f64 { match self.display_info.time_cost { Cost::Fixed(cost) => cost as f64, _ => panic!(), } } fn health_cost(&self) -> f64 { match self.display_info.health_cost { Cost::Fixed(cost) => cost as f64, Cost::None => 0.0, _ => panic!(), } } fn cooldown_time(&self) -> f64 { self.time_cost() * 0.25 } fn startup_time(&self) -> f64 { self.time_cost() * 0.25 } fn finish_time(&self) -> f64 { self.time_cost() - self.cooldown_time() } fn finished(&self) -> bool { self.progress > self.finish_time() } fn health_to_pay_by(&self, progress: f64) -> f64 { smootherstep(self.startup_time(), self.finish_time(), progress) * self.health_cost() } } impl ActionTrait for SimpleAction { fn update(&mut self, context: ActionUpdateContext) -> ActionStatus { let simple_action_type = self.simple_action_type.clone(); let canceled = context.interaction_state().canceled && !self.finished(); if canceled { self.cancel_progress += UPDATE_DURATION; } else { let was_finished = self.finished(); let health_paid_already = self.health_to_pay_by(self.progress); self.progress += UPDATE_DURATION; let health_payment = self.health_to_pay_by(self.progress) - health_paid_already; context.game.player.health -= health_payment; if self.finished() > was_finished { if self.is_card { match context.game.cards.selected_index { Some(index) => { if index + 1 == context.game.cards.deck.len() { context.game.cards.selected_index = None; } else { context.game.cards.selected_index = Some(index + 1); } } _ => unreachable!(), } } self.simple_action_type.finish(context); } } if self.progress > self.time_cost() || self.cancel_progress > self.cooldown_time() { ActionStatus::Completed } else { ActionStatus::StillGoing } } fn display_info(&self) -> ActionDisplayInfo { self.display_info.clone() } fn possible(&self, game: &Game) -> bool { self.simple_action_type.possible(game) } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_progress(game, draw); let a = game.player.position + FloatingVector::new(-TILE_RADIUS as f64 * 0.5, 0.0); draw.rectangle_on_map( 70, a, FloatingVector::new(TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64), "#000", ); draw.rectangle_on_map( 71, a, FloatingVector::new( TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64 * self.progress / self.time_cost(), ), "#ff0", ); } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_preview(game, draw); } } impl SimpleActionTrait for BuildMechanism { fn finish(&self, context: ActionUpdateContext) { context.game.create_mechanism( context.game.player.position.containing_tile(), self.mechanism(context.game), ); } fn possible(&self, game: &Game) -> bool { let position = game.player.position.containing_tile(); game.grid.get(position).is_some() && game.mechanism(position).is_none() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw)
} #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct BuildConveyor { pub allow_splitting: bool, } #[derive(Copy, Clone, PartialEq, Serialize, Deserialize, Debug)] struct BuildConveyorCandidate { position: GridVector, input_side: Facing, } impl BuildConveyorCandidate { fn input_position(&self) -> GridVector { self.position + self.input_side.unit_vector() * TILE_WIDTH } fn output_side(&self) -> Facing { self.input_side + Rotation::U_TURN } } impl BuildConveyor { fn candidate_valid( game: &Game, candidate: BuildConveyorCandidate, allow_splitting: bool, ) -> bool { if game.grid.get(candidate.position).is_none() { return false; }; let output_mechanism = game.mechanism(candidate.position); //debug!("{:?}", (candidate, input_mechanism, output_mechanism)); guard!(let Some(input_mechanism) = game.mechanism(candidate.input_position()) else { return false }); if !input_mechanism .mechanism_type .can_be_material_source(candidate.output_side()) { return false; } if !allow_splitting { if matches!(&input_mechanism.mechanism_type, MechanismType::Conveyor(conveyor) if conveyor.sides.iter().filter(|&&side| side == ConveyorSide::Output).count() > 0) { return false; } } if let Some(output_mechanism) = output_mechanism { guard!(let Mechanism { mechanism_type: MechanismType::Conveyor(conveyor), .. } = output_mechanism else { return false }); if conveyor.sides[candidate.input_side.as_index()] != ConveyorSide::Disconnected { return false; } } true } /// the returned facing is the input side of the new conveyor fn current_target(game: &Game, allow_splitting: bool) -> Option<BuildConveyorCandidate> { let player_position = game.player.position.containing_tile(); let player_offset = game.player.position - player_position.to_floating(); let mut candidates = Vec::new(); let mut consider = |candidate, score| { if Self::candidate_valid(game, candidate, allow_splitting) { candidates.push((candidate, score)) } }; for facing in Facing::ALL_FACINGS { consider( BuildConveyorCandidate { position: player_position, input_side: facing, }, (player_offset - facing.unit_vector().to_floating()).magnitude_squared(), ); consider( BuildConveyorCandidate { position: player_position - facing.unit_vector() * TILE_WIDTH, input_side: facing, }, (player_offset - -facing.unit_vector().to_floating()).magnitude_squared(), ); } candidates .into_iter() .min_by_key(|&(_, score)| OrderedFloat(score)) .map(|(c, _)| c) } } impl SimpleActionTrait for BuildConveyor { fn finish(&self, context: ActionUpdateContext) { let candidate = Self::current_target(context.game, self.allow_splitting).unwrap(); let mut sides = [ConveyorSide::Disconnected; 4]; sides[candidate.input_side.as_index()] = ConveyorSide::Input; context.game.create_mechanism( candidate.position, Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, last_sent: Facing::from_index(0), }), }, ); context .game .mutate_mechanism(candidate.input_position(), |mechanism| { if let Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, .. }), .. } = mechanism { sides[candidate.output_side().as_index()] = ConveyorSide::Output; } }); } fn possible(&self, game: &Game) -> bool { Self::current_target(game, self.allow_splitting).is_some() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { if let Some(candidate) = Self::current_target(game, self.allow_splitting) { draw.rectangle_on_map( 5, candidate.position.to_floating(), TILE_SIZE.to_floating(), "#666", ); draw.rectangle_on_map( 5, candidate.input_position().to_floating(), TILE_SIZE.to_floating(), "#555", ); } else { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#555", ); } } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct Reshuffle; impl SimpleActionTrait for Reshuffle { fn finish(&self, context: ActionUpdateContext) { let cards = &mut context.game.cards; cards.deck.shuffle(&mut rand::thread_rng()); cards.selected_index = Some(0); } }
{ draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#666", ); }
identifier_body
actions.rs
use crate::cards::CardInstance; use crate::game::{Game, PlayerActionState, PlayerActiveInteraction, Time, UPDATE_DURATION}; use crate::geometry::{ Facing, FloatingVector, FloatingVectorExtension, GridVector, GridVectorExtension, Rotation, TILE_RADIUS, TILE_SIZE, TILE_WIDTH, }; use crate::mechanisms::{BuildMechanism, Conveyor, ConveyorSide, Mechanism, MechanismType}; use crate::ui_glue::Draw; use guard::guard; use ordered_float::OrderedFloat; use rand::prelude::*; use serde::{Deserialize, Serialize}; use std::fmt::Debug; pub enum ActionStatus { StillGoing, Completed, } pub struct ActionUpdateContext<'a> { pub game: &'a mut Game, } impl<'a> ActionUpdateContext<'a> { pub fn interaction_state(&self) -> &PlayerActiveInteraction { match &self.game.player.action_state { PlayerActionState::Interacting(i) => i, _ => unreachable!(), } } pub fn this_card(&self) -> &CardInstance { self.game.cards.selected().unwrap() } pub fn this_card_mut(&mut self) -> &mut CardInstance { self.game.cards.selected_mut().unwrap() } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Cost { Fixed(i32), Variable, None, } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct ActionDisplayInfo { pub name: String, pub health_cost: Cost, pub time_cost: Cost, pub rules_text: String, pub flavor_text: String, } impl Default for ActionDisplayInfo { fn default() -> Self { ActionDisplayInfo { name: "".to_string(), health_cost: Cost::None, time_cost: Cost::Fixed(2), rules_text: "".to_string(), flavor_text: "".to_string(), } } } #[allow(unused)] pub trait ActionTrait { /** Perform a single time-step update on this action, possibly modifying the game state. Note that the action is removed from `game` before doing this, so that both mutable references can be held at the same time, so the action still stored in `game` is temporarily invalid. */ fn update(&mut self, context: ActionUpdateContext) -> ActionStatus; fn display_info(&self) -> ActionDisplayInfo; fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw); fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Action: ActionTrait { SimpleAction, } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct SimpleAction { display_info: ActionDisplayInfo, is_card: bool, simple_action_type: SimpleActionType, progress: Time, cancel_progress: Time, } #[allow(unused)] pub trait SimpleActionTrait { fn finish(&self, context: ActionUpdateContext); fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) {} fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum SimpleActionType: SimpleActionTrait { Reshuffle, BuildConveyor, BuildMechanism, } } fn smootherstep(a: f64, b: f64, x: f64) -> f64 { let x = ((x - a) / (b - a)).clamp(0.0, 1.0); x * x * x * (x * (x * 6.0 - 15.0) + 10.0) } impl SimpleAction { pub fn new( time_cost: i32, health_cost: Option<i32>, name: &str, rules_text: &str, flavor_text: &str, is_card: bool, simple_action_type: SimpleActionType, ) -> SimpleAction { SimpleAction { display_info: ActionDisplayInfo { name: name.to_string(), health_cost: match health_cost { Some(c) => Cost::Fixed(c), None => Cost::None, }, time_cost: Cost::Fixed(time_cost), rules_text: rules_text.to_string(), flavor_text: flavor_text.to_string(), }, is_card, simple_action_type, progress: 0.0, cancel_progress: 0.0, } } fn time_cost(&self) -> f64 { match self.display_info.time_cost { Cost::Fixed(cost) => cost as f64, _ => panic!(), } } fn health_cost(&self) -> f64 { match self.display_info.health_cost { Cost::Fixed(cost) => cost as f64, Cost::None => 0.0, _ => panic!(), } } fn cooldown_time(&self) -> f64 { self.time_cost() * 0.25 } fn startup_time(&self) -> f64 { self.time_cost() * 0.25 } fn finish_time(&self) -> f64 { self.time_cost() - self.cooldown_time() } fn
(&self) -> bool { self.progress > self.finish_time() } fn health_to_pay_by(&self, progress: f64) -> f64 { smootherstep(self.startup_time(), self.finish_time(), progress) * self.health_cost() } } impl ActionTrait for SimpleAction { fn update(&mut self, context: ActionUpdateContext) -> ActionStatus { let simple_action_type = self.simple_action_type.clone(); let canceled = context.interaction_state().canceled && !self.finished(); if canceled { self.cancel_progress += UPDATE_DURATION; } else { let was_finished = self.finished(); let health_paid_already = self.health_to_pay_by(self.progress); self.progress += UPDATE_DURATION; let health_payment = self.health_to_pay_by(self.progress) - health_paid_already; context.game.player.health -= health_payment; if self.finished() > was_finished { if self.is_card { match context.game.cards.selected_index { Some(index) => { if index + 1 == context.game.cards.deck.len() { context.game.cards.selected_index = None; } else { context.game.cards.selected_index = Some(index + 1); } } _ => unreachable!(), } } self.simple_action_type.finish(context); } } if self.progress > self.time_cost() || self.cancel_progress > self.cooldown_time() { ActionStatus::Completed } else { ActionStatus::StillGoing } } fn display_info(&self) -> ActionDisplayInfo { self.display_info.clone() } fn possible(&self, game: &Game) -> bool { self.simple_action_type.possible(game) } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_progress(game, draw); let a = game.player.position + FloatingVector::new(-TILE_RADIUS as f64 * 0.5, 0.0); draw.rectangle_on_map( 70, a, FloatingVector::new(TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64), "#000", ); draw.rectangle_on_map( 71, a, FloatingVector::new( TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64 * self.progress / self.time_cost(), ), "#ff0", ); } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_preview(game, draw); } } impl SimpleActionTrait for BuildMechanism { fn finish(&self, context: ActionUpdateContext) { context.game.create_mechanism( context.game.player.position.containing_tile(), self.mechanism(context.game), ); } fn possible(&self, game: &Game) -> bool { let position = game.player.position.containing_tile(); game.grid.get(position).is_some() && game.mechanism(position).is_none() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#666", ); } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct BuildConveyor { pub allow_splitting: bool, } #[derive(Copy, Clone, PartialEq, Serialize, Deserialize, Debug)] struct BuildConveyorCandidate { position: GridVector, input_side: Facing, } impl BuildConveyorCandidate { fn input_position(&self) -> GridVector { self.position + self.input_side.unit_vector() * TILE_WIDTH } fn output_side(&self) -> Facing { self.input_side + Rotation::U_TURN } } impl BuildConveyor { fn candidate_valid( game: &Game, candidate: BuildConveyorCandidate, allow_splitting: bool, ) -> bool { if game.grid.get(candidate.position).is_none() { return false; }; let output_mechanism = game.mechanism(candidate.position); //debug!("{:?}", (candidate, input_mechanism, output_mechanism)); guard!(let Some(input_mechanism) = game.mechanism(candidate.input_position()) else { return false }); if !input_mechanism .mechanism_type .can_be_material_source(candidate.output_side()) { return false; } if !allow_splitting { if matches!(&input_mechanism.mechanism_type, MechanismType::Conveyor(conveyor) if conveyor.sides.iter().filter(|&&side| side == ConveyorSide::Output).count() > 0) { return false; } } if let Some(output_mechanism) = output_mechanism { guard!(let Mechanism { mechanism_type: MechanismType::Conveyor(conveyor), .. } = output_mechanism else { return false }); if conveyor.sides[candidate.input_side.as_index()] != ConveyorSide::Disconnected { return false; } } true } /// the returned facing is the input side of the new conveyor fn current_target(game: &Game, allow_splitting: bool) -> Option<BuildConveyorCandidate> { let player_position = game.player.position.containing_tile(); let player_offset = game.player.position - player_position.to_floating(); let mut candidates = Vec::new(); let mut consider = |candidate, score| { if Self::candidate_valid(game, candidate, allow_splitting) { candidates.push((candidate, score)) } }; for facing in Facing::ALL_FACINGS { consider( BuildConveyorCandidate { position: player_position, input_side: facing, }, (player_offset - facing.unit_vector().to_floating()).magnitude_squared(), ); consider( BuildConveyorCandidate { position: player_position - facing.unit_vector() * TILE_WIDTH, input_side: facing, }, (player_offset - -facing.unit_vector().to_floating()).magnitude_squared(), ); } candidates .into_iter() .min_by_key(|&(_, score)| OrderedFloat(score)) .map(|(c, _)| c) } } impl SimpleActionTrait for BuildConveyor { fn finish(&self, context: ActionUpdateContext) { let candidate = Self::current_target(context.game, self.allow_splitting).unwrap(); let mut sides = [ConveyorSide::Disconnected; 4]; sides[candidate.input_side.as_index()] = ConveyorSide::Input; context.game.create_mechanism( candidate.position, Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, last_sent: Facing::from_index(0), }), }, ); context .game .mutate_mechanism(candidate.input_position(), |mechanism| { if let Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, .. }), .. } = mechanism { sides[candidate.output_side().as_index()] = ConveyorSide::Output; } }); } fn possible(&self, game: &Game) -> bool { Self::current_target(game, self.allow_splitting).is_some() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { if let Some(candidate) = Self::current_target(game, self.allow_splitting) { draw.rectangle_on_map( 5, candidate.position.to_floating(), TILE_SIZE.to_floating(), "#666", ); draw.rectangle_on_map( 5, candidate.input_position().to_floating(), TILE_SIZE.to_floating(), "#555", ); } else { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#555", ); } } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct Reshuffle; impl SimpleActionTrait for Reshuffle { fn finish(&self, context: ActionUpdateContext) { let cards = &mut context.game.cards; cards.deck.shuffle(&mut rand::thread_rng()); cards.selected_index = Some(0); } }
finished
identifier_name
actions.rs
use crate::cards::CardInstance; use crate::game::{Game, PlayerActionState, PlayerActiveInteraction, Time, UPDATE_DURATION}; use crate::geometry::{ Facing, FloatingVector, FloatingVectorExtension, GridVector, GridVectorExtension, Rotation, TILE_RADIUS, TILE_SIZE, TILE_WIDTH, }; use crate::mechanisms::{BuildMechanism, Conveyor, ConveyorSide, Mechanism, MechanismType}; use crate::ui_glue::Draw; use guard::guard; use ordered_float::OrderedFloat; use rand::prelude::*; use serde::{Deserialize, Serialize}; use std::fmt::Debug; pub enum ActionStatus { StillGoing, Completed, } pub struct ActionUpdateContext<'a> { pub game: &'a mut Game, } impl<'a> ActionUpdateContext<'a> { pub fn interaction_state(&self) -> &PlayerActiveInteraction { match &self.game.player.action_state { PlayerActionState::Interacting(i) => i, _ => unreachable!(), } } pub fn this_card(&self) -> &CardInstance { self.game.cards.selected().unwrap() } pub fn this_card_mut(&mut self) -> &mut CardInstance { self.game.cards.selected_mut().unwrap() } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Cost { Fixed(i32), Variable, None, } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct ActionDisplayInfo { pub name: String, pub health_cost: Cost, pub time_cost: Cost, pub rules_text: String, pub flavor_text: String, } impl Default for ActionDisplayInfo { fn default() -> Self { ActionDisplayInfo { name: "".to_string(), health_cost: Cost::None, time_cost: Cost::Fixed(2), rules_text: "".to_string(), flavor_text: "".to_string(), } } } #[allow(unused)] pub trait ActionTrait { /** Perform a single time-step update on this action, possibly modifying the game state. Note that the action is removed from `game` before doing this, so that both mutable references can be held at the same time, so the action still stored in `game` is temporarily invalid. */ fn update(&mut self, context: ActionUpdateContext) -> ActionStatus; fn display_info(&self) -> ActionDisplayInfo; fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw); fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Action: ActionTrait { SimpleAction, } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct SimpleAction { display_info: ActionDisplayInfo, is_card: bool, simple_action_type: SimpleActionType, progress: Time, cancel_progress: Time, } #[allow(unused)] pub trait SimpleActionTrait { fn finish(&self, context: ActionUpdateContext); fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) {} fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum SimpleActionType: SimpleActionTrait { Reshuffle, BuildConveyor, BuildMechanism, } } fn smootherstep(a: f64, b: f64, x: f64) -> f64 { let x = ((x - a) / (b - a)).clamp(0.0, 1.0); x * x * x * (x * (x * 6.0 - 15.0) + 10.0) } impl SimpleAction { pub fn new( time_cost: i32, health_cost: Option<i32>, name: &str, rules_text: &str, flavor_text: &str, is_card: bool, simple_action_type: SimpleActionType, ) -> SimpleAction { SimpleAction { display_info: ActionDisplayInfo { name: name.to_string(), health_cost: match health_cost { Some(c) => Cost::Fixed(c), None => Cost::None, }, time_cost: Cost::Fixed(time_cost), rules_text: rules_text.to_string(), flavor_text: flavor_text.to_string(), }, is_card, simple_action_type, progress: 0.0, cancel_progress: 0.0, } } fn time_cost(&self) -> f64 { match self.display_info.time_cost { Cost::Fixed(cost) => cost as f64, _ => panic!(), } } fn health_cost(&self) -> f64 { match self.display_info.health_cost { Cost::Fixed(cost) => cost as f64, Cost::None => 0.0, _ => panic!(), } } fn cooldown_time(&self) -> f64 { self.time_cost() * 0.25 } fn startup_time(&self) -> f64 { self.time_cost() * 0.25 } fn finish_time(&self) -> f64 { self.time_cost() - self.cooldown_time() } fn finished(&self) -> bool { self.progress > self.finish_time() } fn health_to_pay_by(&self, progress: f64) -> f64 { smootherstep(self.startup_time(), self.finish_time(), progress) * self.health_cost() } } impl ActionTrait for SimpleAction { fn update(&mut self, context: ActionUpdateContext) -> ActionStatus { let simple_action_type = self.simple_action_type.clone(); let canceled = context.interaction_state().canceled && !self.finished(); if canceled { self.cancel_progress += UPDATE_DURATION; } else { let was_finished = self.finished(); let health_paid_already = self.health_to_pay_by(self.progress); self.progress += UPDATE_DURATION; let health_payment = self.health_to_pay_by(self.progress) - health_paid_already; context.game.player.health -= health_payment; if self.finished() > was_finished { if self.is_card { match context.game.cards.selected_index { Some(index) => { if index + 1 == context.game.cards.deck.len() { context.game.cards.selected_index = None; } else
} _ => unreachable!(), } } self.simple_action_type.finish(context); } } if self.progress > self.time_cost() || self.cancel_progress > self.cooldown_time() { ActionStatus::Completed } else { ActionStatus::StillGoing } } fn display_info(&self) -> ActionDisplayInfo { self.display_info.clone() } fn possible(&self, game: &Game) -> bool { self.simple_action_type.possible(game) } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_progress(game, draw); let a = game.player.position + FloatingVector::new(-TILE_RADIUS as f64 * 0.5, 0.0); draw.rectangle_on_map( 70, a, FloatingVector::new(TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64), "#000", ); draw.rectangle_on_map( 71, a, FloatingVector::new( TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64 * self.progress / self.time_cost(), ), "#ff0", ); } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_preview(game, draw); } } impl SimpleActionTrait for BuildMechanism { fn finish(&self, context: ActionUpdateContext) { context.game.create_mechanism( context.game.player.position.containing_tile(), self.mechanism(context.game), ); } fn possible(&self, game: &Game) -> bool { let position = game.player.position.containing_tile(); game.grid.get(position).is_some() && game.mechanism(position).is_none() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#666", ); } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct BuildConveyor { pub allow_splitting: bool, } #[derive(Copy, Clone, PartialEq, Serialize, Deserialize, Debug)] struct BuildConveyorCandidate { position: GridVector, input_side: Facing, } impl BuildConveyorCandidate { fn input_position(&self) -> GridVector { self.position + self.input_side.unit_vector() * TILE_WIDTH } fn output_side(&self) -> Facing { self.input_side + Rotation::U_TURN } } impl BuildConveyor { fn candidate_valid( game: &Game, candidate: BuildConveyorCandidate, allow_splitting: bool, ) -> bool { if game.grid.get(candidate.position).is_none() { return false; }; let output_mechanism = game.mechanism(candidate.position); //debug!("{:?}", (candidate, input_mechanism, output_mechanism)); guard!(let Some(input_mechanism) = game.mechanism(candidate.input_position()) else { return false }); if !input_mechanism .mechanism_type .can_be_material_source(candidate.output_side()) { return false; } if !allow_splitting { if matches!(&input_mechanism.mechanism_type, MechanismType::Conveyor(conveyor) if conveyor.sides.iter().filter(|&&side| side == ConveyorSide::Output).count() > 0) { return false; } } if let Some(output_mechanism) = output_mechanism { guard!(let Mechanism { mechanism_type: MechanismType::Conveyor(conveyor), .. } = output_mechanism else { return false }); if conveyor.sides[candidate.input_side.as_index()] != ConveyorSide::Disconnected { return false; } } true } /// the returned facing is the input side of the new conveyor fn current_target(game: &Game, allow_splitting: bool) -> Option<BuildConveyorCandidate> { let player_position = game.player.position.containing_tile(); let player_offset = game.player.position - player_position.to_floating(); let mut candidates = Vec::new(); let mut consider = |candidate, score| { if Self::candidate_valid(game, candidate, allow_splitting) { candidates.push((candidate, score)) } }; for facing in Facing::ALL_FACINGS { consider( BuildConveyorCandidate { position: player_position, input_side: facing, }, (player_offset - facing.unit_vector().to_floating()).magnitude_squared(), ); consider( BuildConveyorCandidate { position: player_position - facing.unit_vector() * TILE_WIDTH, input_side: facing, }, (player_offset - -facing.unit_vector().to_floating()).magnitude_squared(), ); } candidates .into_iter() .min_by_key(|&(_, score)| OrderedFloat(score)) .map(|(c, _)| c) } } impl SimpleActionTrait for BuildConveyor { fn finish(&self, context: ActionUpdateContext) { let candidate = Self::current_target(context.game, self.allow_splitting).unwrap(); let mut sides = [ConveyorSide::Disconnected; 4]; sides[candidate.input_side.as_index()] = ConveyorSide::Input; context.game.create_mechanism( candidate.position, Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, last_sent: Facing::from_index(0), }), }, ); context .game .mutate_mechanism(candidate.input_position(), |mechanism| { if let Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, .. }), .. } = mechanism { sides[candidate.output_side().as_index()] = ConveyorSide::Output; } }); } fn possible(&self, game: &Game) -> bool { Self::current_target(game, self.allow_splitting).is_some() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { if let Some(candidate) = Self::current_target(game, self.allow_splitting) { draw.rectangle_on_map( 5, candidate.position.to_floating(), TILE_SIZE.to_floating(), "#666", ); draw.rectangle_on_map( 5, candidate.input_position().to_floating(), TILE_SIZE.to_floating(), "#555", ); } else { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#555", ); } } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct Reshuffle; impl SimpleActionTrait for Reshuffle { fn finish(&self, context: ActionUpdateContext) { let cards = &mut context.game.cards; cards.deck.shuffle(&mut rand::thread_rng()); cards.selected_index = Some(0); } }
{ context.game.cards.selected_index = Some(index + 1); }
conditional_block
actions.rs
use crate::cards::CardInstance; use crate::game::{Game, PlayerActionState, PlayerActiveInteraction, Time, UPDATE_DURATION}; use crate::geometry::{ Facing, FloatingVector, FloatingVectorExtension, GridVector, GridVectorExtension, Rotation, TILE_RADIUS, TILE_SIZE, TILE_WIDTH, }; use crate::mechanisms::{BuildMechanism, Conveyor, ConveyorSide, Mechanism, MechanismType}; use crate::ui_glue::Draw; use guard::guard; use ordered_float::OrderedFloat; use rand::prelude::*; use serde::{Deserialize, Serialize}; use std::fmt::Debug; pub enum ActionStatus { StillGoing, Completed, } pub struct ActionUpdateContext<'a> { pub game: &'a mut Game, } impl<'a> ActionUpdateContext<'a> { pub fn interaction_state(&self) -> &PlayerActiveInteraction { match &self.game.player.action_state { PlayerActionState::Interacting(i) => i, _ => unreachable!(), } } pub fn this_card(&self) -> &CardInstance { self.game.cards.selected().unwrap() } pub fn this_card_mut(&mut self) -> &mut CardInstance { self.game.cards.selected_mut().unwrap() } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Cost { Fixed(i32), Variable, None, } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct ActionDisplayInfo { pub name: String, pub health_cost: Cost, pub time_cost: Cost, pub rules_text: String, pub flavor_text: String, } impl Default for ActionDisplayInfo { fn default() -> Self { ActionDisplayInfo { name: "".to_string(), health_cost: Cost::None, time_cost: Cost::Fixed(2), rules_text: "".to_string(), flavor_text: "".to_string(), } } } #[allow(unused)] pub trait ActionTrait { /** Perform a single time-step update on this action, possibly modifying the game state. Note that the action is removed from `game` before doing this, so that both mutable references can be held at the same time, so the action still stored in `game` is temporarily invalid. */ fn update(&mut self, context: ActionUpdateContext) -> ActionStatus; fn display_info(&self) -> ActionDisplayInfo; fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw); fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum Action: ActionTrait { SimpleAction, } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct SimpleAction { display_info: ActionDisplayInfo, is_card: bool, simple_action_type: SimpleActionType, progress: Time, cancel_progress: Time, } #[allow(unused)] pub trait SimpleActionTrait { fn finish(&self, context: ActionUpdateContext); fn possible(&self, game: &Game) -> bool { true } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) {} fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) {} } trait_enum! { #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub enum SimpleActionType: SimpleActionTrait { Reshuffle, BuildConveyor, BuildMechanism, } } fn smootherstep(a: f64, b: f64, x: f64) -> f64 { let x = ((x - a) / (b - a)).clamp(0.0, 1.0); x * x * x * (x * (x * 6.0 - 15.0) + 10.0) } impl SimpleAction { pub fn new( time_cost: i32, health_cost: Option<i32>, name: &str, rules_text: &str, flavor_text: &str, is_card: bool, simple_action_type: SimpleActionType, ) -> SimpleAction { SimpleAction { display_info: ActionDisplayInfo { name: name.to_string(), health_cost: match health_cost { Some(c) => Cost::Fixed(c), None => Cost::None, }, time_cost: Cost::Fixed(time_cost), rules_text: rules_text.to_string(), flavor_text: flavor_text.to_string(), }, is_card, simple_action_type, progress: 0.0, cancel_progress: 0.0, } } fn time_cost(&self) -> f64 { match self.display_info.time_cost { Cost::Fixed(cost) => cost as f64, _ => panic!(), } } fn health_cost(&self) -> f64 { match self.display_info.health_cost { Cost::Fixed(cost) => cost as f64, Cost::None => 0.0, _ => panic!(), } } fn cooldown_time(&self) -> f64 { self.time_cost() * 0.25 } fn startup_time(&self) -> f64 { self.time_cost() * 0.25 } fn finish_time(&self) -> f64 { self.time_cost() - self.cooldown_time() } fn finished(&self) -> bool { self.progress > self.finish_time() } fn health_to_pay_by(&self, progress: f64) -> f64 { smootherstep(self.startup_time(), self.finish_time(), progress) * self.health_cost() } } impl ActionTrait for SimpleAction { fn update(&mut self, context: ActionUpdateContext) -> ActionStatus { let simple_action_type = self.simple_action_type.clone(); let canceled = context.interaction_state().canceled && !self.finished(); if canceled { self.cancel_progress += UPDATE_DURATION; } else { let was_finished = self.finished(); let health_paid_already = self.health_to_pay_by(self.progress); self.progress += UPDATE_DURATION; let health_payment = self.health_to_pay_by(self.progress) - health_paid_already; context.game.player.health -= health_payment; if self.finished() > was_finished { if self.is_card { match context.game.cards.selected_index { Some(index) => { if index + 1 == context.game.cards.deck.len() { context.game.cards.selected_index = None; } else { context.game.cards.selected_index = Some(index + 1); } } _ => unreachable!(), } } self.simple_action_type.finish(context); } } if self.progress > self.time_cost() || self.cancel_progress > self.cooldown_time() { ActionStatus::Completed } else { ActionStatus::StillGoing } } fn display_info(&self) -> ActionDisplayInfo { self.display_info.clone() } fn possible(&self, game: &Game) -> bool { self.simple_action_type.possible(game) } fn draw_progress(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_progress(game, draw); let a = game.player.position + FloatingVector::new(-TILE_RADIUS as f64 * 0.5, 0.0); draw.rectangle_on_map( 70, a, FloatingVector::new(TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64), "#000", ); draw.rectangle_on_map( 71, a, FloatingVector::new( TILE_RADIUS as f64 * 0.25, TILE_WIDTH as f64 * self.progress / self.time_cost(), ), "#ff0", ); } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { self.simple_action_type.draw_preview(game, draw); } } impl SimpleActionTrait for BuildMechanism { fn finish(&self, context: ActionUpdateContext) { context.game.create_mechanism( context.game.player.position.containing_tile(), self.mechanism(context.game), ); } fn possible(&self, game: &Game) -> bool { let position = game.player.position.containing_tile(); game.grid.get(position).is_some() && game.mechanism(position).is_none() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#666", ); } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct BuildConveyor { pub allow_splitting: bool, } #[derive(Copy, Clone, PartialEq, Serialize, Deserialize, Debug)] struct BuildConveyorCandidate { position: GridVector, input_side: Facing, } impl BuildConveyorCandidate { fn input_position(&self) -> GridVector { self.position + self.input_side.unit_vector() * TILE_WIDTH } fn output_side(&self) -> Facing { self.input_side + Rotation::U_TURN } } impl BuildConveyor { fn candidate_valid( game: &Game, candidate: BuildConveyorCandidate, allow_splitting: bool, ) -> bool { if game.grid.get(candidate.position).is_none() { return false; }; let output_mechanism = game.mechanism(candidate.position); //debug!("{:?}", (candidate, input_mechanism, output_mechanism)); guard!(let Some(input_mechanism) = game.mechanism(candidate.input_position()) else { return false }); if !input_mechanism .mechanism_type .can_be_material_source(candidate.output_side()) { return false; } if !allow_splitting { if matches!(&input_mechanism.mechanism_type, MechanismType::Conveyor(conveyor) if conveyor.sides.iter().filter(|&&side| side == ConveyorSide::Output).count() > 0) { return false; } } if let Some(output_mechanism) = output_mechanism { guard!(let Mechanism { mechanism_type: MechanismType::Conveyor(conveyor), .. } = output_mechanism else { return false }); if conveyor.sides[candidate.input_side.as_index()] != ConveyorSide::Disconnected { return false; } } true } /// the returned facing is the input side of the new conveyor fn current_target(game: &Game, allow_splitting: bool) -> Option<BuildConveyorCandidate> { let player_position = game.player.position.containing_tile(); let player_offset = game.player.position - player_position.to_floating(); let mut candidates = Vec::new(); let mut consider = |candidate, score| { if Self::candidate_valid(game, candidate, allow_splitting) { candidates.push((candidate, score)) } }; for facing in Facing::ALL_FACINGS { consider( BuildConveyorCandidate { position: player_position, input_side: facing, }, (player_offset - facing.unit_vector().to_floating()).magnitude_squared(), ); consider( BuildConveyorCandidate { position: player_position - facing.unit_vector() * TILE_WIDTH, input_side: facing, }, (player_offset - -facing.unit_vector().to_floating()).magnitude_squared(), ); } candidates .into_iter() .min_by_key(|&(_, score)| OrderedFloat(score)) .map(|(c, _)| c) } } impl SimpleActionTrait for BuildConveyor { fn finish(&self, context: ActionUpdateContext) { let candidate = Self::current_target(context.game, self.allow_splitting).unwrap(); let mut sides = [ConveyorSide::Disconnected; 4]; sides[candidate.input_side.as_index()] = ConveyorSide::Input; context.game.create_mechanism( candidate.position, Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, last_sent: Facing::from_index(0), }), }, ); context .game .mutate_mechanism(candidate.input_position(), |mechanism| { if let Mechanism { mechanism_type: MechanismType::Conveyor(Conveyor { sides, .. }), .. } = mechanism { sides[candidate.output_side().as_index()] = ConveyorSide::Output; } }); } fn possible(&self, game: &Game) -> bool { Self::current_target(game, self.allow_splitting).is_some() } fn draw_preview(&self, game: &Game, draw: &mut dyn Draw) { if let Some(candidate) = Self::current_target(game, self.allow_splitting) { draw.rectangle_on_map( 5, candidate.position.to_floating(), TILE_SIZE.to_floating(), "#666", ); draw.rectangle_on_map( 5, candidate.input_position().to_floating(), TILE_SIZE.to_floating(), "#555", ); } else { draw.rectangle_on_map( 5, game.player.position.containing_tile().to_floating(), TILE_SIZE.to_floating(), "#555", ); } } } #[derive(Clone, PartialEq, Serialize, Deserialize, Debug)] pub struct Reshuffle; impl SimpleActionTrait for Reshuffle { fn finish(&self, context: ActionUpdateContext) { let cards = &mut context.game.cards; cards.deck.shuffle(&mut rand::thread_rng()); cards.selected_index = Some(0); }
}
random_line_split
train.py
import argparse from prepro import read_correction import os import numpy as np import torch from apex import amp import ujson as json from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from model import DocREModel from utils import set_seed, collate_fn from prepro import read_docred from evaluation import to_official, official_evaluate #import wandb from tqdm import tqdm import pickle def train(args, model, train_features, dev_features, test_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator): model.zero_grad() for step, batch in tqdm(enumerate(train_dataloader)): model.train() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'labels': batch[2], 'entity_pos': batch[3], 'hts': batch[4], } outputs = model(**inputs) loss = outputs[0] / args.gradient_accumulation_steps with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() num_steps += 1 #wandb.log({"loss": loss.item()}, step=num_steps) if (step + 1) == len(train_dataloader) - 1 or (args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): ''' dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") #wandb.log(dev_output, step=num_steps) print(dev_output) if dev_score > best_score: best_score = dev_score pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if args.save_path != "": torch.save(model.state_dict(), args.save_path) ''' if args.save_path != "": torch.save(model.state_dict(), args.save_path+'_'+str(epoch+1)+'_'+str(step)+'.pt') return num_steps new_layer = ["extractor", "bilinear"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": 1e-4}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) ans = to_official(preds, features) best_f1 = 0.0 best_f1_ign = 0.0 if len(ans) > 0: best_f1, _, best_f1_ign, _ = official_evaluate(ans, args.data_dir) output = { tag + "_F1": best_f1 * 100, tag + "_F1_ign": best_f1_ign * 100, } return best_f1, output def report(args, model, features): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) preds = to_official(preds, features) return preds def main():
if __name__ == "__main__": main()
parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default="./dataset/docred", type=str) parser.add_argument("--transformer_type", default="bert", type=str) parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str) parser.add_argument("--train_file", default="train_annotated.json", type=str) parser.add_argument("--dev_file", default="dev.json", type=str) parser.add_argument("--test_file", default="test.json", type=str) parser.add_argument("--save_path", default="./ckpt/base.pt", type=str) parser.add_argument("--load_path", default="", type=str) parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--train_batch_size", default=4, type=int, help="Batch size for training.") parser.add_argument("--test_batch_size", default=8, type=int, help="Batch size for testing.") parser.add_argument("--gradient_accumulation_steps", default=1, type=int, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--num_labels", default=4, type=int, help="Max number of labels in prediction.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--warmup_ratio", default=0.06, type=float, help="Warm up ratio for Adam.") parser.add_argument("--num_train_epochs", default=30.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--evaluation_steps", default=7500, type=int, help="Number of training steps between evaluations.") parser.add_argument("--seed", type=int, default=66, help="random seed for initialization") parser.add_argument("--num_class", type=int, default=97, help="Number of relation types in dataset.") args = parser.parse_args() #wandb.init(project="DocRED") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) read = read_correction #read = read_docred train_file = os.path.join(args.data_dir, args.train_file) dev_file = os.path.join(args.data_dir, args.dev_file) test_file = os.path.join(args.data_dir, args.test_file) train_file_p = train_file.replace('.json', '.pkl') dev_file_p = dev_file.replace('.json', '.pkl') #test_file_p = test_file.replace('.json', 'pkl') if os.path.exists(train_file_p): with open(train_file_p, 'rb') as f: train_features = pickle.load(f) else: train_features = read(train_file, tokenizer, max_seq_length=args.max_seq_length) with open(train_file_p, 'wb') as f: pickle.dump(train_features, f) if os.path.exists(dev_file_p): with open(dev_file_p, 'rb') as f: dev_features = pickle.load(f) else: dev_features = read(dev_file, tokenizer, max_seq_length=args.max_seq_length) with open(dev_file_p, 'wb') as f: pickle.dump(dev_features, f) test_features = dev_features model = AutoModel.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) config.cls_token_id = tokenizer.cls_token_id config.sep_token_id = tokenizer.sep_token_id config.transformer_type = args.transformer_type set_seed(args) model = DocREModel(config, model, num_labels=args.num_labels) model.to(0) if args.load_path == "": # Training train(args, model, train_features, dev_features, test_features) else: # Testing model = amp.initialize(model, opt_level="O1", verbosity=0) model.load_state_dict(torch.load(args.load_path)) dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") print(dev_output) pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh)
identifier_body
train.py
import argparse from prepro import read_correction import os import numpy as np import torch from apex import amp import ujson as json from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from model import DocREModel from utils import set_seed, collate_fn from prepro import read_docred from evaluation import to_official, official_evaluate #import wandb from tqdm import tqdm import pickle def train(args, model, train_features, dev_features, test_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator): model.zero_grad() for step, batch in tqdm(enumerate(train_dataloader)): model.train() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'labels': batch[2], 'entity_pos': batch[3], 'hts': batch[4], } outputs = model(**inputs) loss = outputs[0] / args.gradient_accumulation_steps with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() num_steps += 1 #wandb.log({"loss": loss.item()}, step=num_steps) if (step + 1) == len(train_dataloader) - 1 or (args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): ''' dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") #wandb.log(dev_output, step=num_steps) print(dev_output) if dev_score > best_score: best_score = dev_score pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if args.save_path != "": torch.save(model.state_dict(), args.save_path) ''' if args.save_path != "": torch.save(model.state_dict(), args.save_path+'_'+str(epoch+1)+'_'+str(step)+'.pt') return num_steps new_layer = ["extractor", "bilinear"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": 1e-4}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def
(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) ans = to_official(preds, features) best_f1 = 0.0 best_f1_ign = 0.0 if len(ans) > 0: best_f1, _, best_f1_ign, _ = official_evaluate(ans, args.data_dir) output = { tag + "_F1": best_f1 * 100, tag + "_F1_ign": best_f1_ign * 100, } return best_f1, output def report(args, model, features): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) preds = to_official(preds, features) return preds def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default="./dataset/docred", type=str) parser.add_argument("--transformer_type", default="bert", type=str) parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str) parser.add_argument("--train_file", default="train_annotated.json", type=str) parser.add_argument("--dev_file", default="dev.json", type=str) parser.add_argument("--test_file", default="test.json", type=str) parser.add_argument("--save_path", default="./ckpt/base.pt", type=str) parser.add_argument("--load_path", default="", type=str) parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--train_batch_size", default=4, type=int, help="Batch size for training.") parser.add_argument("--test_batch_size", default=8, type=int, help="Batch size for testing.") parser.add_argument("--gradient_accumulation_steps", default=1, type=int, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--num_labels", default=4, type=int, help="Max number of labels in prediction.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--warmup_ratio", default=0.06, type=float, help="Warm up ratio for Adam.") parser.add_argument("--num_train_epochs", default=30.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--evaluation_steps", default=7500, type=int, help="Number of training steps between evaluations.") parser.add_argument("--seed", type=int, default=66, help="random seed for initialization") parser.add_argument("--num_class", type=int, default=97, help="Number of relation types in dataset.") args = parser.parse_args() #wandb.init(project="DocRED") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) read = read_correction #read = read_docred train_file = os.path.join(args.data_dir, args.train_file) dev_file = os.path.join(args.data_dir, args.dev_file) test_file = os.path.join(args.data_dir, args.test_file) train_file_p = train_file.replace('.json', '.pkl') dev_file_p = dev_file.replace('.json', '.pkl') #test_file_p = test_file.replace('.json', 'pkl') if os.path.exists(train_file_p): with open(train_file_p, 'rb') as f: train_features = pickle.load(f) else: train_features = read(train_file, tokenizer, max_seq_length=args.max_seq_length) with open(train_file_p, 'wb') as f: pickle.dump(train_features, f) if os.path.exists(dev_file_p): with open(dev_file_p, 'rb') as f: dev_features = pickle.load(f) else: dev_features = read(dev_file, tokenizer, max_seq_length=args.max_seq_length) with open(dev_file_p, 'wb') as f: pickle.dump(dev_features, f) test_features = dev_features model = AutoModel.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) config.cls_token_id = tokenizer.cls_token_id config.sep_token_id = tokenizer.sep_token_id config.transformer_type = args.transformer_type set_seed(args) model = DocREModel(config, model, num_labels=args.num_labels) model.to(0) if args.load_path == "": # Training train(args, model, train_features, dev_features, test_features) else: # Testing model = amp.initialize(model, opt_level="O1", verbosity=0) model.load_state_dict(torch.load(args.load_path)) dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") print(dev_output) pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if __name__ == "__main__": main()
evaluate
identifier_name
train.py
import argparse from prepro import read_correction import os import numpy as np import torch from apex import amp import ujson as json from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from model import DocREModel from utils import set_seed, collate_fn from prepro import read_docred from evaluation import to_official, official_evaluate #import wandb from tqdm import tqdm import pickle def train(args, model, train_features, dev_features, test_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator): model.zero_grad() for step, batch in tqdm(enumerate(train_dataloader)): model.train() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'labels': batch[2], 'entity_pos': batch[3], 'hts': batch[4], } outputs = model(**inputs) loss = outputs[0] / args.gradient_accumulation_steps with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() num_steps += 1 #wandb.log({"loss": loss.item()}, step=num_steps) if (step + 1) == len(train_dataloader) - 1 or (args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0):
dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") #wandb.log(dev_output, step=num_steps) print(dev_output) if dev_score > best_score: best_score = dev_score pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if args.save_path != "": torch.save(model.state_dict(), args.save_path) ''' if args.save_path != "": torch.save(model.state_dict(), args.save_path+'_'+str(epoch+1)+'_'+str(step)+'.pt') return num_steps new_layer = ["extractor", "bilinear"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": 1e-4}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) ans = to_official(preds, features) best_f1 = 0.0 best_f1_ign = 0.0 if len(ans) > 0: best_f1, _, best_f1_ign, _ = official_evaluate(ans, args.data_dir) output = { tag + "_F1": best_f1 * 100, tag + "_F1_ign": best_f1_ign * 100, } return best_f1, output def report(args, model, features): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) preds = to_official(preds, features) return preds def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default="./dataset/docred", type=str) parser.add_argument("--transformer_type", default="bert", type=str) parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str) parser.add_argument("--train_file", default="train_annotated.json", type=str) parser.add_argument("--dev_file", default="dev.json", type=str) parser.add_argument("--test_file", default="test.json", type=str) parser.add_argument("--save_path", default="./ckpt/base.pt", type=str) parser.add_argument("--load_path", default="", type=str) parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--train_batch_size", default=4, type=int, help="Batch size for training.") parser.add_argument("--test_batch_size", default=8, type=int, help="Batch size for testing.") parser.add_argument("--gradient_accumulation_steps", default=1, type=int, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--num_labels", default=4, type=int, help="Max number of labels in prediction.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--warmup_ratio", default=0.06, type=float, help="Warm up ratio for Adam.") parser.add_argument("--num_train_epochs", default=30.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--evaluation_steps", default=7500, type=int, help="Number of training steps between evaluations.") parser.add_argument("--seed", type=int, default=66, help="random seed for initialization") parser.add_argument("--num_class", type=int, default=97, help="Number of relation types in dataset.") args = parser.parse_args() #wandb.init(project="DocRED") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) read = read_correction #read = read_docred train_file = os.path.join(args.data_dir, args.train_file) dev_file = os.path.join(args.data_dir, args.dev_file) test_file = os.path.join(args.data_dir, args.test_file) train_file_p = train_file.replace('.json', '.pkl') dev_file_p = dev_file.replace('.json', '.pkl') #test_file_p = test_file.replace('.json', 'pkl') if os.path.exists(train_file_p): with open(train_file_p, 'rb') as f: train_features = pickle.load(f) else: train_features = read(train_file, tokenizer, max_seq_length=args.max_seq_length) with open(train_file_p, 'wb') as f: pickle.dump(train_features, f) if os.path.exists(dev_file_p): with open(dev_file_p, 'rb') as f: dev_features = pickle.load(f) else: dev_features = read(dev_file, tokenizer, max_seq_length=args.max_seq_length) with open(dev_file_p, 'wb') as f: pickle.dump(dev_features, f) test_features = dev_features model = AutoModel.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) config.cls_token_id = tokenizer.cls_token_id config.sep_token_id = tokenizer.sep_token_id config.transformer_type = args.transformer_type set_seed(args) model = DocREModel(config, model, num_labels=args.num_labels) model.to(0) if args.load_path == "": # Training train(args, model, train_features, dev_features, test_features) else: # Testing model = amp.initialize(model, opt_level="O1", verbosity=0) model.load_state_dict(torch.load(args.load_path)) dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") print(dev_output) pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if __name__ == "__main__": main()
'''
random_line_split
train.py
import argparse from prepro import read_correction import os import numpy as np import torch from apex import amp import ujson as json from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from model import DocREModel from utils import set_seed, collate_fn from prepro import read_docred from evaluation import to_official, official_evaluate #import wandb from tqdm import tqdm import pickle def train(args, model, train_features, dev_features, test_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator): model.zero_grad() for step, batch in tqdm(enumerate(train_dataloader)): model.train() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'labels': batch[2], 'entity_pos': batch[3], 'hts': batch[4], } outputs = model(**inputs) loss = outputs[0] / args.gradient_accumulation_steps with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() num_steps += 1 #wandb.log({"loss": loss.item()}, step=num_steps) if (step + 1) == len(train_dataloader) - 1 or (args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): ''' dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") #wandb.log(dev_output, step=num_steps) print(dev_output) if dev_score > best_score: best_score = dev_score pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if args.save_path != "": torch.save(model.state_dict(), args.save_path) ''' if args.save_path != "": torch.save(model.state_dict(), args.save_path+'_'+str(epoch+1)+'_'+str(step)+'.pt') return num_steps new_layer = ["extractor", "bilinear"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": 1e-4}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) ans = to_official(preds, features) best_f1 = 0.0 best_f1_ign = 0.0 if len(ans) > 0:
output = { tag + "_F1": best_f1 * 100, tag + "_F1_ign": best_f1_ign * 100, } return best_f1, output def report(args, model, features): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds = [] for batch in dataloader: model.eval() inputs = {'input_ids': batch[0].to(args.device), 'attention_mask': batch[1].to(args.device), 'entity_pos': batch[3], 'hts': batch[4], } with torch.no_grad(): pred, *_ = model(**inputs) pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) preds = np.concatenate(preds, axis=0).astype(np.float32) preds = to_official(preds, features) return preds def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default="./dataset/docred", type=str) parser.add_argument("--transformer_type", default="bert", type=str) parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str) parser.add_argument("--train_file", default="train_annotated.json", type=str) parser.add_argument("--dev_file", default="dev.json", type=str) parser.add_argument("--test_file", default="test.json", type=str) parser.add_argument("--save_path", default="./ckpt/base.pt", type=str) parser.add_argument("--load_path", default="", type=str) parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--train_batch_size", default=4, type=int, help="Batch size for training.") parser.add_argument("--test_batch_size", default=8, type=int, help="Batch size for testing.") parser.add_argument("--gradient_accumulation_steps", default=1, type=int, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--num_labels", default=4, type=int, help="Max number of labels in prediction.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--warmup_ratio", default=0.06, type=float, help="Warm up ratio for Adam.") parser.add_argument("--num_train_epochs", default=30.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--evaluation_steps", default=7500, type=int, help="Number of training steps between evaluations.") parser.add_argument("--seed", type=int, default=66, help="random seed for initialization") parser.add_argument("--num_class", type=int, default=97, help="Number of relation types in dataset.") args = parser.parse_args() #wandb.init(project="DocRED") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") args.n_gpu = torch.cuda.device_count() args.device = device config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=args.num_class, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, ) read = read_correction #read = read_docred train_file = os.path.join(args.data_dir, args.train_file) dev_file = os.path.join(args.data_dir, args.dev_file) test_file = os.path.join(args.data_dir, args.test_file) train_file_p = train_file.replace('.json', '.pkl') dev_file_p = dev_file.replace('.json', '.pkl') #test_file_p = test_file.replace('.json', 'pkl') if os.path.exists(train_file_p): with open(train_file_p, 'rb') as f: train_features = pickle.load(f) else: train_features = read(train_file, tokenizer, max_seq_length=args.max_seq_length) with open(train_file_p, 'wb') as f: pickle.dump(train_features, f) if os.path.exists(dev_file_p): with open(dev_file_p, 'rb') as f: dev_features = pickle.load(f) else: dev_features = read(dev_file, tokenizer, max_seq_length=args.max_seq_length) with open(dev_file_p, 'wb') as f: pickle.dump(dev_features, f) test_features = dev_features model = AutoModel.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) config.cls_token_id = tokenizer.cls_token_id config.sep_token_id = tokenizer.sep_token_id config.transformer_type = args.transformer_type set_seed(args) model = DocREModel(config, model, num_labels=args.num_labels) model.to(0) if args.load_path == "": # Training train(args, model, train_features, dev_features, test_features) else: # Testing model = amp.initialize(model, opt_level="O1", verbosity=0) model.load_state_dict(torch.load(args.load_path)) dev_score, dev_output = evaluate(args, model, dev_features, tag="dev") print(dev_output) pred = report(args, model, test_features) with open("result.json", "w") as fh: json.dump(pred, fh) if __name__ == "__main__": main()
best_f1, _, best_f1_ign, _ = official_evaluate(ans, args.data_dir)
conditional_block
Set.go
// PRELIMINARIES: package Set import ( "fmt" "strconv" ) // ***************************************************************************** // STRUCTS AND INTERFACES: type setMember interface{} // Lets the members of a set be, in effect, of arbitrary type. type Set struct { mem map[setMember]bool } /* The members of a set are a hashmap seeing if a set contains a certain element. */ /* I do not allow multiplicites. This is a bit of a philosophical standpoint, but I think the elements of a set are a kind of representation of an object rather than objects in and of themselves. E.g. in the set {1, 1, 2, 3} there is no discernible difference between the two ones, and so there is no real difference between {1, 1, 2, 3} and {1, 2, 3}, since the additional one does not bring any new information. This standpoint is consistent with the usual mathematical definition of being a member of a set.*/ // ***************************************************************************** // METHODS AND FUNCTIONS: // Constructor of the set. func NewSet() Set { var emptySet Set emptySet.mem = make(map[setMember]bool) return emptySet } // ----------------------------------------------------------------------------- // Add an element to the set. func (s Set) Append(newMem setMember) { s.mem[newMem] = true } // ----------------------------------------------------------------------------- /* Remove an element from the set. If it is already not in the set, the method does nothing. */ func (s Set) Remove(removeMem setMember) { delete(s.mem, removeMem) } // ----------------------------------------------------------------------------- // Set equality: Two sets are equal if they have the same elements. func Equals(oneSet, otherSet Set) bool { if len(oneSet.mem) != len(otherSet.mem) { return false } /* Obviously, if the sets have different numbers of elements, they are not equal. */ for k := range oneSet.mem { if otherSet.mem[k] == false { return false } } /* If the sets have an equal number of elements and an element in one of the sets is not in the other set, they are not equal. */ return true } // ----------------------------------------------------------------------------- /* The union of two sets is the set of members of either (or both) sets. This function is a method which acts on a set and returns the union of it and another set specified in the argument of the function. */ func (s Set) Union(otherSet Set) Set { unionSet := NewSet() for k := range s.mem { unionSet.mem[k] = true } // Adds the elements of the set to the union set. for k := range otherSet.mem { unionSet.mem[k] = true } // Adds the elements of the set in the argument to the union set. return unionSet } // ----------------------------------------------------------------------------- /* The intersection of two sets is the set of members of both sets. This function is a method which acts on a set and returns the intersection of it and another set specified in the argument of the function. */ func (s Set) Intersection(otherSet Set) Set { interSet := NewSet() for k := range s.mem { if otherSet.mem[k] == true { interSet.mem[k] = true // Adds an element to the intersection set if it is a member of both sets. } } return interSet } // ----------------------------------------------------------------------------- /* The relative complement of a set relative to another set (call it universe) is the set of members of set which are not members of universe. This method returns the relative complement of the set it acts upon relative to the set specified in the argument.*/ func (s Set) RelCompl(universe Set) Set { relSet := NewSet() for k := range s.mem { if universe.mem[k] == false { relSet.mem[k] = true // Adds a member to the relative set if it is in s but not in universe. } } return relSet } // ----------------------------------------------------------------------------- // Return a slice containing all subsets of mySet. func PowerSet(mySet Set) []Set { powSet := []Set{} mySlice := []setMember{} mySubSlices := [][]setMember{} /* The function first breaks the set down into a slice which is easier to work with. The problem then amounts to finding all subslices of this slice. This can be done by realizing that, since the power set of a set with N elements has 2^N elements, these elements can be represented by binary strings with N digits. This is what the subset() function below does. */ for k := range mySet.mem { mySlice = append(mySlice, k) } mySubSlices = subsets(mySlice) // Generate the slice containing all subslices of this slice. for i := 0; i < len(mySubSlices); i++ { newSubSet := NewSet() for k := 0; k < len(mySubSlices[i]); k++ { newSubSet.mem[mySubSlices[i][k]] = true } powSet = append(powSet, newSubSet) } return powSet } /* This function takes a slice as input and returns a slice containing all subslices of this slice. */ func subsets(mySlice []setMember) [][]setMember { indexSlice := []string{} subSetSlice := [][]setMember{} binaryLen := "%0" + strconv.Itoa(len(mySlice)) + "b" for i := 0; i < pow(2, len(mySlice)); i++ { indexSlice = append(indexSlice, fmt.Sprintf(binaryLen, i)) } /* Generate all binary numbers from 0 to 2^len(mySlice)-1 and set their number of digits to len(mySlice). */ for i := 0; i < pow(2, len(mySlice)); i++ { subsets := []setMember{} for k := 0; k < len(mySlice); k++ { if indexSlice[i][k] == byte('1') { subsets = append(subsets, mySlice[k]) } /* Every binary number from 0 to 2^(len(mySLice))-1 represents a subset of mySlice: iterate through every binary number, and if the k:th element of the binary number is 1, append the k:th element of mySlice to subsets, which represents a given subset. When all the binary numbers are iterated through, every possible subset of mySlice will have been generated through this process, since there are 2^len(mySlice) unique binary numbers and 2^len(mySlice) subsets of mySlice, counting the empty slice. */ } subSetSlice = append(subSetSlice, subsets) // Append the generated subset to the subSetSlice. } return subSetSlice } /* There is no native integer exponential method in Go. I used this basic implementation I found on: http://grokbase.com/t/gg/golang-nuts/139n3edtq3/go-nuts-integer-exponentiation-in-go */ func pow(a, exp int) int { p := 1 for exp > 0 { if exp&1 != 0 { p *= a } exp >>= 1 a *= a } return p } // Testing some conditions put on the power set. func testPowerSet(subSetSlice []Set, theSet Set) { allElemsInPowSet := NewSet() if len(subSetSlice) != pow(2, len(theSet.mem)) { panic("Error in number of elements in PowerSet") } // Check if the length of the set relative to the power set align. for i := 0; i < len(subSetSlice); i++ { for k := range subSetSlice[i].mem { allElemsInPowSet.Append(k) // Collect all elements found in the power set. if theSet.mem[k] == false { panic("Error in allowed set members in PowerSet") // Checks if some elements of the power set are not in the set. } } } for i := range allElemsInPowSet.mem { if theSet.mem[i] == false { panic("Error: elements missing in PowerSet") } } // Check if some elements of the set are not in the power set. } // ----------------------------------------------------------------------------- /* Return a string representation of the set. */ func (s *Set)
() string { mySetSlice := []setMember{} for i := range s.mem { mySetSlice = append(mySetSlice, i) } myString := fmt.Sprint(mySetSlice) return myString } func testSetString(theSetString string, theSet Set) { counter := 0 for i := range theSet.mem { for k := 0; k < len(theSetString); k++ { if fmt.Sprint(i) == string(theSetString[k]) { counter += 1 } } } if counter != len(theSet.mem) { panic("Error in SetString: wrong number of elements") } // Seeing if every element of theSet can be found as a string in theSetString. } // ***************************************************************************** // TESTING: func main() { theSet := NewSet() theOtherSet := NewSet() emptySet := NewSet() // --------------------------------------------------------------------------- // TESTING OF EMPTY SET AND ONE-ELEMENT SET OPERATIONS: if Equals(theSet, theOtherSet) != true { panic("Error handling empty set in Equals") } testPowerSet(PowerSet(emptySet), emptySet) theSet.Append(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theSet.Intersection(emptySet), emptySet) != true { panic("Error handling intersection of set with emptySet") } if Equals(theSet.Union(emptySet), theSet) != true { panic("Error handling union of set with emptySet") } // Testing union and intersection on emptySet. if Equals(theSet, theOtherSet) != false { panic("Error handling comparing empty to one-element set in Equals") } if Equals(theSet, theSet) != true { panic("Error handling comparing set to itself in Equals") } theOtherSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error handling comparing one-element sets in Equals") } // Testing Equals with one and zero-element sets. theSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error in handling same-element appending in Append") } testPowerSet(PowerSet(theSet), theSet) if len(PowerSet(emptySet)) != 1 { panic("Error in handling power set of empty set") } if Equals(theSet.RelCompl(emptySet), theSet) != true { panic("Error in handling relative complement of set and empty set in RelCompl") } if Equals(theSet.RelCompl(theOtherSet), emptySet) != true { panic("Error in handling relative complement of one-element sets in RelCompl") } // Testing RelCompl with one- and zero-element sets. if Equals(emptySet.Intersection(emptySet), emptySet) != true { panic("Error in handling intersection of empty set with itself in Intersection") } if Equals(emptySet.Union(emptySet), emptySet) != true { panic("Error in handling union of non-empty with itself in Union") } // Testing of methods on emptySet. if Equals(theSet.Intersection(theSet), theSet) != true { panic("Error in handling intersection of non-empty set with itself in Intersection") } if Equals(theSet.Union(theSet), theSet) != true { panic("Error in handling union of non-empty set with itself in Union") } // Testing of methods on non-empty set. theOtherSet.Remove(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theOtherSet, emptySet) != true { panic("Error in handling removal of element in one-element set in Remove") } theOtherSet.Remove("k") if Equals(theOtherSet, emptySet) != true { panic("Error in handling removal of non-existent element in Remove") } // Testing Remove. // --------------------------------------------------------------------------- // TESTING MULTI-ELEMENT SET OPERATIONS: theSet.Append(2) theSet.Append(3) theSet.Append("a") theSet.Append("b") theOtherSet.Append(1) theOtherSet.Append(2) theOtherSet.Append(3) theOtherSet.Append("a") theOtherSet.Append("b") // Adding some elements to the sets. testPowerSet(PowerSet(theSet), theSet) if Equals(theSet, theOtherSet) != true { panic("Error in handling multi-element set in Equals") } theOtherSet.Append("c") if Equals(theSet, theOtherSet) != false { panic("Error in handling multi-element set in Equals") } // Testing Equals. for i := 0; i < 12; i++ { theOtherSet.Append(i) } testPowerSet(PowerSet(theOtherSet), theOtherSet) // Testing PowerSet. if Equals(theSet.Intersection(theOtherSet), theSet) != true { panic("Error in handling multi-element set intersection") } if Equals(theSet.Union(theOtherSet), theOtherSet) != true { panic("Error in handling multi-element set union") } /* Testing union and intersection. Since in this state, all elements of theSet are members of theOtherSet, these comparisons are valid. */ if Equals(theSet.RelCompl(theOtherSet), emptySet) != true { panic("Error in handling multi-element set relative complement") } // Case all elements of theSet are elments in theOtherSet. oneElementSet := NewSet() oneElementSet.Append("d") theSet.Append("d") if Equals(theSet.RelCompl(theOtherSet), oneElementSet) != true { panic("Error in handling multi-element set relative complement") } // Case not all elements of theSet are elments in theOtherSet. testSetString(emptySet.SetString(), emptySet) testSetString(theSet.SetString(), theSet) // Testing the SetString method. }
SetString
identifier_name
Set.go
// PRELIMINARIES: package Set import ( "fmt" "strconv" ) // ***************************************************************************** // STRUCTS AND INTERFACES: type setMember interface{} // Lets the members of a set be, in effect, of arbitrary type. type Set struct { mem map[setMember]bool } /* The members of a set are a hashmap seeing if a set contains a certain element. */ /* I do not allow multiplicites. This is a bit of a philosophical standpoint, but I think the elements of a set are a kind of representation of an object rather than objects in and of themselves. E.g. in the set {1, 1, 2, 3} there is no discernible difference between the two ones, and so there is no real difference between {1, 1, 2, 3} and {1, 2, 3}, since the additional one does not bring any new information. This standpoint is consistent with the usual mathematical definition of being a member of a set.*/ // ***************************************************************************** // METHODS AND FUNCTIONS: // Constructor of the set. func NewSet() Set { var emptySet Set emptySet.mem = make(map[setMember]bool) return emptySet } // ----------------------------------------------------------------------------- // Add an element to the set. func (s Set) Append(newMem setMember) { s.mem[newMem] = true } // ----------------------------------------------------------------------------- /* Remove an element from the set. If it is already not in the set, the method does nothing. */ func (s Set) Remove(removeMem setMember) { delete(s.mem, removeMem) } // ----------------------------------------------------------------------------- // Set equality: Two sets are equal if they have the same elements. func Equals(oneSet, otherSet Set) bool { if len(oneSet.mem) != len(otherSet.mem) { return false } /* Obviously, if the sets have different numbers of elements, they are not equal. */ for k := range oneSet.mem { if otherSet.mem[k] == false { return false } } /* If the sets have an equal number of elements and an element in one of the sets is not in the other set, they are not equal. */ return true } // ----------------------------------------------------------------------------- /* The union of two sets is the set of members of either (or both) sets. This function is a method which acts on a set and returns the union of it and
unionSet := NewSet() for k := range s.mem { unionSet.mem[k] = true } // Adds the elements of the set to the union set. for k := range otherSet.mem { unionSet.mem[k] = true } // Adds the elements of the set in the argument to the union set. return unionSet } // ----------------------------------------------------------------------------- /* The intersection of two sets is the set of members of both sets. This function is a method which acts on a set and returns the intersection of it and another set specified in the argument of the function. */ func (s Set) Intersection(otherSet Set) Set { interSet := NewSet() for k := range s.mem { if otherSet.mem[k] == true { interSet.mem[k] = true // Adds an element to the intersection set if it is a member of both sets. } } return interSet } // ----------------------------------------------------------------------------- /* The relative complement of a set relative to another set (call it universe) is the set of members of set which are not members of universe. This method returns the relative complement of the set it acts upon relative to the set specified in the argument.*/ func (s Set) RelCompl(universe Set) Set { relSet := NewSet() for k := range s.mem { if universe.mem[k] == false { relSet.mem[k] = true // Adds a member to the relative set if it is in s but not in universe. } } return relSet } // ----------------------------------------------------------------------------- // Return a slice containing all subsets of mySet. func PowerSet(mySet Set) []Set { powSet := []Set{} mySlice := []setMember{} mySubSlices := [][]setMember{} /* The function first breaks the set down into a slice which is easier to work with. The problem then amounts to finding all subslices of this slice. This can be done by realizing that, since the power set of a set with N elements has 2^N elements, these elements can be represented by binary strings with N digits. This is what the subset() function below does. */ for k := range mySet.mem { mySlice = append(mySlice, k) } mySubSlices = subsets(mySlice) // Generate the slice containing all subslices of this slice. for i := 0; i < len(mySubSlices); i++ { newSubSet := NewSet() for k := 0; k < len(mySubSlices[i]); k++ { newSubSet.mem[mySubSlices[i][k]] = true } powSet = append(powSet, newSubSet) } return powSet } /* This function takes a slice as input and returns a slice containing all subslices of this slice. */ func subsets(mySlice []setMember) [][]setMember { indexSlice := []string{} subSetSlice := [][]setMember{} binaryLen := "%0" + strconv.Itoa(len(mySlice)) + "b" for i := 0; i < pow(2, len(mySlice)); i++ { indexSlice = append(indexSlice, fmt.Sprintf(binaryLen, i)) } /* Generate all binary numbers from 0 to 2^len(mySlice)-1 and set their number of digits to len(mySlice). */ for i := 0; i < pow(2, len(mySlice)); i++ { subsets := []setMember{} for k := 0; k < len(mySlice); k++ { if indexSlice[i][k] == byte('1') { subsets = append(subsets, mySlice[k]) } /* Every binary number from 0 to 2^(len(mySLice))-1 represents a subset of mySlice: iterate through every binary number, and if the k:th element of the binary number is 1, append the k:th element of mySlice to subsets, which represents a given subset. When all the binary numbers are iterated through, every possible subset of mySlice will have been generated through this process, since there are 2^len(mySlice) unique binary numbers and 2^len(mySlice) subsets of mySlice, counting the empty slice. */ } subSetSlice = append(subSetSlice, subsets) // Append the generated subset to the subSetSlice. } return subSetSlice } /* There is no native integer exponential method in Go. I used this basic implementation I found on: http://grokbase.com/t/gg/golang-nuts/139n3edtq3/go-nuts-integer-exponentiation-in-go */ func pow(a, exp int) int { p := 1 for exp > 0 { if exp&1 != 0 { p *= a } exp >>= 1 a *= a } return p } // Testing some conditions put on the power set. func testPowerSet(subSetSlice []Set, theSet Set) { allElemsInPowSet := NewSet() if len(subSetSlice) != pow(2, len(theSet.mem)) { panic("Error in number of elements in PowerSet") } // Check if the length of the set relative to the power set align. for i := 0; i < len(subSetSlice); i++ { for k := range subSetSlice[i].mem { allElemsInPowSet.Append(k) // Collect all elements found in the power set. if theSet.mem[k] == false { panic("Error in allowed set members in PowerSet") // Checks if some elements of the power set are not in the set. } } } for i := range allElemsInPowSet.mem { if theSet.mem[i] == false { panic("Error: elements missing in PowerSet") } } // Check if some elements of the set are not in the power set. } // ----------------------------------------------------------------------------- /* Return a string representation of the set. */ func (s *Set) SetString() string { mySetSlice := []setMember{} for i := range s.mem { mySetSlice = append(mySetSlice, i) } myString := fmt.Sprint(mySetSlice) return myString } func testSetString(theSetString string, theSet Set) { counter := 0 for i := range theSet.mem { for k := 0; k < len(theSetString); k++ { if fmt.Sprint(i) == string(theSetString[k]) { counter += 1 } } } if counter != len(theSet.mem) { panic("Error in SetString: wrong number of elements") } // Seeing if every element of theSet can be found as a string in theSetString. } // ***************************************************************************** // TESTING: func main() { theSet := NewSet() theOtherSet := NewSet() emptySet := NewSet() // --------------------------------------------------------------------------- // TESTING OF EMPTY SET AND ONE-ELEMENT SET OPERATIONS: if Equals(theSet, theOtherSet) != true { panic("Error handling empty set in Equals") } testPowerSet(PowerSet(emptySet), emptySet) theSet.Append(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theSet.Intersection(emptySet), emptySet) != true { panic("Error handling intersection of set with emptySet") } if Equals(theSet.Union(emptySet), theSet) != true { panic("Error handling union of set with emptySet") } // Testing union and intersection on emptySet. if Equals(theSet, theOtherSet) != false { panic("Error handling comparing empty to one-element set in Equals") } if Equals(theSet, theSet) != true { panic("Error handling comparing set to itself in Equals") } theOtherSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error handling comparing one-element sets in Equals") } // Testing Equals with one and zero-element sets. theSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error in handling same-element appending in Append") } testPowerSet(PowerSet(theSet), theSet) if len(PowerSet(emptySet)) != 1 { panic("Error in handling power set of empty set") } if Equals(theSet.RelCompl(emptySet), theSet) != true { panic("Error in handling relative complement of set and empty set in RelCompl") } if Equals(theSet.RelCompl(theOtherSet), emptySet) != true { panic("Error in handling relative complement of one-element sets in RelCompl") } // Testing RelCompl with one- and zero-element sets. if Equals(emptySet.Intersection(emptySet), emptySet) != true { panic("Error in handling intersection of empty set with itself in Intersection") } if Equals(emptySet.Union(emptySet), emptySet) != true { panic("Error in handling union of non-empty with itself in Union") } // Testing of methods on emptySet. if Equals(theSet.Intersection(theSet), theSet) != true { panic("Error in handling intersection of non-empty set with itself in Intersection") } if Equals(theSet.Union(theSet), theSet) != true { panic("Error in handling union of non-empty set with itself in Union") } // Testing of methods on non-empty set. theOtherSet.Remove(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theOtherSet, emptySet) != true { panic("Error in handling removal of element in one-element set in Remove") } theOtherSet.Remove("k") if Equals(theOtherSet, emptySet) != true { panic("Error in handling removal of non-existent element in Remove") } // Testing Remove. // --------------------------------------------------------------------------- // TESTING MULTI-ELEMENT SET OPERATIONS: theSet.Append(2) theSet.Append(3) theSet.Append("a") theSet.Append("b") theOtherSet.Append(1) theOtherSet.Append(2) theOtherSet.Append(3) theOtherSet.Append("a") theOtherSet.Append("b") // Adding some elements to the sets. testPowerSet(PowerSet(theSet), theSet) if Equals(theSet, theOtherSet) != true { panic("Error in handling multi-element set in Equals") } theOtherSet.Append("c") if Equals(theSet, theOtherSet) != false { panic("Error in handling multi-element set in Equals") } // Testing Equals. for i := 0; i < 12; i++ { theOtherSet.Append(i) } testPowerSet(PowerSet(theOtherSet), theOtherSet) // Testing PowerSet. if Equals(theSet.Intersection(theOtherSet), theSet) != true { panic("Error in handling multi-element set intersection") } if Equals(theSet.Union(theOtherSet), theOtherSet) != true { panic("Error in handling multi-element set union") } /* Testing union and intersection. Since in this state, all elements of theSet are members of theOtherSet, these comparisons are valid. */ if Equals(theSet.RelCompl(theOtherSet), emptySet) != true { panic("Error in handling multi-element set relative complement") } // Case all elements of theSet are elments in theOtherSet. oneElementSet := NewSet() oneElementSet.Append("d") theSet.Append("d") if Equals(theSet.RelCompl(theOtherSet), oneElementSet) != true { panic("Error in handling multi-element set relative complement") } // Case not all elements of theSet are elments in theOtherSet. testSetString(emptySet.SetString(), emptySet) testSetString(theSet.SetString(), theSet) // Testing the SetString method. }
another set specified in the argument of the function. */ func (s Set) Union(otherSet Set) Set {
random_line_split
Set.go
// PRELIMINARIES: package Set import ( "fmt" "strconv" ) // ***************************************************************************** // STRUCTS AND INTERFACES: type setMember interface{} // Lets the members of a set be, in effect, of arbitrary type. type Set struct { mem map[setMember]bool } /* The members of a set are a hashmap seeing if a set contains a certain element. */ /* I do not allow multiplicites. This is a bit of a philosophical standpoint, but I think the elements of a set are a kind of representation of an object rather than objects in and of themselves. E.g. in the set {1, 1, 2, 3} there is no discernible difference between the two ones, and so there is no real difference between {1, 1, 2, 3} and {1, 2, 3}, since the additional one does not bring any new information. This standpoint is consistent with the usual mathematical definition of being a member of a set.*/ // ***************************************************************************** // METHODS AND FUNCTIONS: // Constructor of the set. func NewSet() Set { var emptySet Set emptySet.mem = make(map[setMember]bool) return emptySet } // ----------------------------------------------------------------------------- // Add an element to the set. func (s Set) Append(newMem setMember) { s.mem[newMem] = true } // ----------------------------------------------------------------------------- /* Remove an element from the set. If it is already not in the set, the method does nothing. */ func (s Set) Remove(removeMem setMember)
// ----------------------------------------------------------------------------- // Set equality: Two sets are equal if they have the same elements. func Equals(oneSet, otherSet Set) bool { if len(oneSet.mem) != len(otherSet.mem) { return false } /* Obviously, if the sets have different numbers of elements, they are not equal. */ for k := range oneSet.mem { if otherSet.mem[k] == false { return false } } /* If the sets have an equal number of elements and an element in one of the sets is not in the other set, they are not equal. */ return true } // ----------------------------------------------------------------------------- /* The union of two sets is the set of members of either (or both) sets. This function is a method which acts on a set and returns the union of it and another set specified in the argument of the function. */ func (s Set) Union(otherSet Set) Set { unionSet := NewSet() for k := range s.mem { unionSet.mem[k] = true } // Adds the elements of the set to the union set. for k := range otherSet.mem { unionSet.mem[k] = true } // Adds the elements of the set in the argument to the union set. return unionSet } // ----------------------------------------------------------------------------- /* The intersection of two sets is the set of members of both sets. This function is a method which acts on a set and returns the intersection of it and another set specified in the argument of the function. */ func (s Set) Intersection(otherSet Set) Set { interSet := NewSet() for k := range s.mem { if otherSet.mem[k] == true { interSet.mem[k] = true // Adds an element to the intersection set if it is a member of both sets. } } return interSet } // ----------------------------------------------------------------------------- /* The relative complement of a set relative to another set (call it universe) is the set of members of set which are not members of universe. This method returns the relative complement of the set it acts upon relative to the set specified in the argument.*/ func (s Set) RelCompl(universe Set) Set { relSet := NewSet() for k := range s.mem { if universe.mem[k] == false { relSet.mem[k] = true // Adds a member to the relative set if it is in s but not in universe. } } return relSet } // ----------------------------------------------------------------------------- // Return a slice containing all subsets of mySet. func PowerSet(mySet Set) []Set { powSet := []Set{} mySlice := []setMember{} mySubSlices := [][]setMember{} /* The function first breaks the set down into a slice which is easier to work with. The problem then amounts to finding all subslices of this slice. This can be done by realizing that, since the power set of a set with N elements has 2^N elements, these elements can be represented by binary strings with N digits. This is what the subset() function below does. */ for k := range mySet.mem { mySlice = append(mySlice, k) } mySubSlices = subsets(mySlice) // Generate the slice containing all subslices of this slice. for i := 0; i < len(mySubSlices); i++ { newSubSet := NewSet() for k := 0; k < len(mySubSlices[i]); k++ { newSubSet.mem[mySubSlices[i][k]] = true } powSet = append(powSet, newSubSet) } return powSet } /* This function takes a slice as input and returns a slice containing all subslices of this slice. */ func subsets(mySlice []setMember) [][]setMember { indexSlice := []string{} subSetSlice := [][]setMember{} binaryLen := "%0" + strconv.Itoa(len(mySlice)) + "b" for i := 0; i < pow(2, len(mySlice)); i++ { indexSlice = append(indexSlice, fmt.Sprintf(binaryLen, i)) } /* Generate all binary numbers from 0 to 2^len(mySlice)-1 and set their number of digits to len(mySlice). */ for i := 0; i < pow(2, len(mySlice)); i++ { subsets := []setMember{} for k := 0; k < len(mySlice); k++ { if indexSlice[i][k] == byte('1') { subsets = append(subsets, mySlice[k]) } /* Every binary number from 0 to 2^(len(mySLice))-1 represents a subset of mySlice: iterate through every binary number, and if the k:th element of the binary number is 1, append the k:th element of mySlice to subsets, which represents a given subset. When all the binary numbers are iterated through, every possible subset of mySlice will have been generated through this process, since there are 2^len(mySlice) unique binary numbers and 2^len(mySlice) subsets of mySlice, counting the empty slice. */ } subSetSlice = append(subSetSlice, subsets) // Append the generated subset to the subSetSlice. } return subSetSlice } /* There is no native integer exponential method in Go. I used this basic implementation I found on: http://grokbase.com/t/gg/golang-nuts/139n3edtq3/go-nuts-integer-exponentiation-in-go */ func pow(a, exp int) int { p := 1 for exp > 0 { if exp&1 != 0 { p *= a } exp >>= 1 a *= a } return p } // Testing some conditions put on the power set. func testPowerSet(subSetSlice []Set, theSet Set) { allElemsInPowSet := NewSet() if len(subSetSlice) != pow(2, len(theSet.mem)) { panic("Error in number of elements in PowerSet") } // Check if the length of the set relative to the power set align. for i := 0; i < len(subSetSlice); i++ { for k := range subSetSlice[i].mem { allElemsInPowSet.Append(k) // Collect all elements found in the power set. if theSet.mem[k] == false { panic("Error in allowed set members in PowerSet") // Checks if some elements of the power set are not in the set. } } } for i := range allElemsInPowSet.mem { if theSet.mem[i] == false { panic("Error: elements missing in PowerSet") } } // Check if some elements of the set are not in the power set. } // ----------------------------------------------------------------------------- /* Return a string representation of the set. */ func (s *Set) SetString() string { mySetSlice := []setMember{} for i := range s.mem { mySetSlice = append(mySetSlice, i) } myString := fmt.Sprint(mySetSlice) return myString } func testSetString(theSetString string, theSet Set) { counter := 0 for i := range theSet.mem { for k := 0; k < len(theSetString); k++ { if fmt.Sprint(i) == string(theSetString[k]) { counter += 1 } } } if counter != len(theSet.mem) { panic("Error in SetString: wrong number of elements") } // Seeing if every element of theSet can be found as a string in theSetString. } // ***************************************************************************** // TESTING: func main() { theSet := NewSet() theOtherSet := NewSet() emptySet := NewSet() // --------------------------------------------------------------------------- // TESTING OF EMPTY SET AND ONE-ELEMENT SET OPERATIONS: if Equals(theSet, theOtherSet) != true { panic("Error handling empty set in Equals") } testPowerSet(PowerSet(emptySet), emptySet) theSet.Append(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theSet.Intersection(emptySet), emptySet) != true { panic("Error handling intersection of set with emptySet") } if Equals(theSet.Union(emptySet), theSet) != true { panic("Error handling union of set with emptySet") } // Testing union and intersection on emptySet. if Equals(theSet, theOtherSet) != false { panic("Error handling comparing empty to one-element set in Equals") } if Equals(theSet, theSet) != true { panic("Error handling comparing set to itself in Equals") } theOtherSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error handling comparing one-element sets in Equals") } // Testing Equals with one and zero-element sets. theSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error in handling same-element appending in Append") } testPowerSet(PowerSet(theSet), theSet) if len(PowerSet(emptySet)) != 1 { panic("Error in handling power set of empty set") } if Equals(theSet.RelCompl(emptySet), theSet) != true { panic("Error in handling relative complement of set and empty set in RelCompl") } if Equals(theSet.RelCompl(theOtherSet), emptySet) != true { panic("Error in handling relative complement of one-element sets in RelCompl") } // Testing RelCompl with one- and zero-element sets. if Equals(emptySet.Intersection(emptySet), emptySet) != true { panic("Error in handling intersection of empty set with itself in Intersection") } if Equals(emptySet.Union(emptySet), emptySet) != true { panic("Error in handling union of non-empty with itself in Union") } // Testing of methods on emptySet. if Equals(theSet.Intersection(theSet), theSet) != true { panic("Error in handling intersection of non-empty set with itself in Intersection") } if Equals(theSet.Union(theSet), theSet) != true { panic("Error in handling union of non-empty set with itself in Union") } // Testing of methods on non-empty set. theOtherSet.Remove(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theOtherSet, emptySet) != true { panic("Error in handling removal of element in one-element set in Remove") } theOtherSet.Remove("k") if Equals(theOtherSet, emptySet) != true { panic("Error in handling removal of non-existent element in Remove") } // Testing Remove. // --------------------------------------------------------------------------- // TESTING MULTI-ELEMENT SET OPERATIONS: theSet.Append(2) theSet.Append(3) theSet.Append("a") theSet.Append("b") theOtherSet.Append(1) theOtherSet.Append(2) theOtherSet.Append(3) theOtherSet.Append("a") theOtherSet.Append("b") // Adding some elements to the sets. testPowerSet(PowerSet(theSet), theSet) if Equals(theSet, theOtherSet) != true { panic("Error in handling multi-element set in Equals") } theOtherSet.Append("c") if Equals(theSet, theOtherSet) != false { panic("Error in handling multi-element set in Equals") } // Testing Equals. for i := 0; i < 12; i++ { theOtherSet.Append(i) } testPowerSet(PowerSet(theOtherSet), theOtherSet) // Testing PowerSet. if Equals(theSet.Intersection(theOtherSet), theSet) != true { panic("Error in handling multi-element set intersection") } if Equals(theSet.Union(theOtherSet), theOtherSet) != true { panic("Error in handling multi-element set union") } /* Testing union and intersection. Since in this state, all elements of theSet are members of theOtherSet, these comparisons are valid. */ if Equals(theSet.RelCompl(theOtherSet), emptySet) != true { panic("Error in handling multi-element set relative complement") } // Case all elements of theSet are elments in theOtherSet. oneElementSet := NewSet() oneElementSet.Append("d") theSet.Append("d") if Equals(theSet.RelCompl(theOtherSet), oneElementSet) != true { panic("Error in handling multi-element set relative complement") } // Case not all elements of theSet are elments in theOtherSet. testSetString(emptySet.SetString(), emptySet) testSetString(theSet.SetString(), theSet) // Testing the SetString method. }
{ delete(s.mem, removeMem) }
identifier_body
Set.go
// PRELIMINARIES: package Set import ( "fmt" "strconv" ) // ***************************************************************************** // STRUCTS AND INTERFACES: type setMember interface{} // Lets the members of a set be, in effect, of arbitrary type. type Set struct { mem map[setMember]bool } /* The members of a set are a hashmap seeing if a set contains a certain element. */ /* I do not allow multiplicites. This is a bit of a philosophical standpoint, but I think the elements of a set are a kind of representation of an object rather than objects in and of themselves. E.g. in the set {1, 1, 2, 3} there is no discernible difference between the two ones, and so there is no real difference between {1, 1, 2, 3} and {1, 2, 3}, since the additional one does not bring any new information. This standpoint is consistent with the usual mathematical definition of being a member of a set.*/ // ***************************************************************************** // METHODS AND FUNCTIONS: // Constructor of the set. func NewSet() Set { var emptySet Set emptySet.mem = make(map[setMember]bool) return emptySet } // ----------------------------------------------------------------------------- // Add an element to the set. func (s Set) Append(newMem setMember) { s.mem[newMem] = true } // ----------------------------------------------------------------------------- /* Remove an element from the set. If it is already not in the set, the method does nothing. */ func (s Set) Remove(removeMem setMember) { delete(s.mem, removeMem) } // ----------------------------------------------------------------------------- // Set equality: Two sets are equal if they have the same elements. func Equals(oneSet, otherSet Set) bool { if len(oneSet.mem) != len(otherSet.mem) { return false } /* Obviously, if the sets have different numbers of elements, they are not equal. */ for k := range oneSet.mem { if otherSet.mem[k] == false { return false } } /* If the sets have an equal number of elements and an element in one of the sets is not in the other set, they are not equal. */ return true } // ----------------------------------------------------------------------------- /* The union of two sets is the set of members of either (or both) sets. This function is a method which acts on a set and returns the union of it and another set specified in the argument of the function. */ func (s Set) Union(otherSet Set) Set { unionSet := NewSet() for k := range s.mem { unionSet.mem[k] = true } // Adds the elements of the set to the union set. for k := range otherSet.mem
// Adds the elements of the set in the argument to the union set. return unionSet } // ----------------------------------------------------------------------------- /* The intersection of two sets is the set of members of both sets. This function is a method which acts on a set and returns the intersection of it and another set specified in the argument of the function. */ func (s Set) Intersection(otherSet Set) Set { interSet := NewSet() for k := range s.mem { if otherSet.mem[k] == true { interSet.mem[k] = true // Adds an element to the intersection set if it is a member of both sets. } } return interSet } // ----------------------------------------------------------------------------- /* The relative complement of a set relative to another set (call it universe) is the set of members of set which are not members of universe. This method returns the relative complement of the set it acts upon relative to the set specified in the argument.*/ func (s Set) RelCompl(universe Set) Set { relSet := NewSet() for k := range s.mem { if universe.mem[k] == false { relSet.mem[k] = true // Adds a member to the relative set if it is in s but not in universe. } } return relSet } // ----------------------------------------------------------------------------- // Return a slice containing all subsets of mySet. func PowerSet(mySet Set) []Set { powSet := []Set{} mySlice := []setMember{} mySubSlices := [][]setMember{} /* The function first breaks the set down into a slice which is easier to work with. The problem then amounts to finding all subslices of this slice. This can be done by realizing that, since the power set of a set with N elements has 2^N elements, these elements can be represented by binary strings with N digits. This is what the subset() function below does. */ for k := range mySet.mem { mySlice = append(mySlice, k) } mySubSlices = subsets(mySlice) // Generate the slice containing all subslices of this slice. for i := 0; i < len(mySubSlices); i++ { newSubSet := NewSet() for k := 0; k < len(mySubSlices[i]); k++ { newSubSet.mem[mySubSlices[i][k]] = true } powSet = append(powSet, newSubSet) } return powSet } /* This function takes a slice as input and returns a slice containing all subslices of this slice. */ func subsets(mySlice []setMember) [][]setMember { indexSlice := []string{} subSetSlice := [][]setMember{} binaryLen := "%0" + strconv.Itoa(len(mySlice)) + "b" for i := 0; i < pow(2, len(mySlice)); i++ { indexSlice = append(indexSlice, fmt.Sprintf(binaryLen, i)) } /* Generate all binary numbers from 0 to 2^len(mySlice)-1 and set their number of digits to len(mySlice). */ for i := 0; i < pow(2, len(mySlice)); i++ { subsets := []setMember{} for k := 0; k < len(mySlice); k++ { if indexSlice[i][k] == byte('1') { subsets = append(subsets, mySlice[k]) } /* Every binary number from 0 to 2^(len(mySLice))-1 represents a subset of mySlice: iterate through every binary number, and if the k:th element of the binary number is 1, append the k:th element of mySlice to subsets, which represents a given subset. When all the binary numbers are iterated through, every possible subset of mySlice will have been generated through this process, since there are 2^len(mySlice) unique binary numbers and 2^len(mySlice) subsets of mySlice, counting the empty slice. */ } subSetSlice = append(subSetSlice, subsets) // Append the generated subset to the subSetSlice. } return subSetSlice } /* There is no native integer exponential method in Go. I used this basic implementation I found on: http://grokbase.com/t/gg/golang-nuts/139n3edtq3/go-nuts-integer-exponentiation-in-go */ func pow(a, exp int) int { p := 1 for exp > 0 { if exp&1 != 0 { p *= a } exp >>= 1 a *= a } return p } // Testing some conditions put on the power set. func testPowerSet(subSetSlice []Set, theSet Set) { allElemsInPowSet := NewSet() if len(subSetSlice) != pow(2, len(theSet.mem)) { panic("Error in number of elements in PowerSet") } // Check if the length of the set relative to the power set align. for i := 0; i < len(subSetSlice); i++ { for k := range subSetSlice[i].mem { allElemsInPowSet.Append(k) // Collect all elements found in the power set. if theSet.mem[k] == false { panic("Error in allowed set members in PowerSet") // Checks if some elements of the power set are not in the set. } } } for i := range allElemsInPowSet.mem { if theSet.mem[i] == false { panic("Error: elements missing in PowerSet") } } // Check if some elements of the set are not in the power set. } // ----------------------------------------------------------------------------- /* Return a string representation of the set. */ func (s *Set) SetString() string { mySetSlice := []setMember{} for i := range s.mem { mySetSlice = append(mySetSlice, i) } myString := fmt.Sprint(mySetSlice) return myString } func testSetString(theSetString string, theSet Set) { counter := 0 for i := range theSet.mem { for k := 0; k < len(theSetString); k++ { if fmt.Sprint(i) == string(theSetString[k]) { counter += 1 } } } if counter != len(theSet.mem) { panic("Error in SetString: wrong number of elements") } // Seeing if every element of theSet can be found as a string in theSetString. } // ***************************************************************************** // TESTING: func main() { theSet := NewSet() theOtherSet := NewSet() emptySet := NewSet() // --------------------------------------------------------------------------- // TESTING OF EMPTY SET AND ONE-ELEMENT SET OPERATIONS: if Equals(theSet, theOtherSet) != true { panic("Error handling empty set in Equals") } testPowerSet(PowerSet(emptySet), emptySet) theSet.Append(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theSet.Intersection(emptySet), emptySet) != true { panic("Error handling intersection of set with emptySet") } if Equals(theSet.Union(emptySet), theSet) != true { panic("Error handling union of set with emptySet") } // Testing union and intersection on emptySet. if Equals(theSet, theOtherSet) != false { panic("Error handling comparing empty to one-element set in Equals") } if Equals(theSet, theSet) != true { panic("Error handling comparing set to itself in Equals") } theOtherSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error handling comparing one-element sets in Equals") } // Testing Equals with one and zero-element sets. theSet.Append(1) if Equals(theSet, theOtherSet) != true { panic("Error in handling same-element appending in Append") } testPowerSet(PowerSet(theSet), theSet) if len(PowerSet(emptySet)) != 1 { panic("Error in handling power set of empty set") } if Equals(theSet.RelCompl(emptySet), theSet) != true { panic("Error in handling relative complement of set and empty set in RelCompl") } if Equals(theSet.RelCompl(theOtherSet), emptySet) != true { panic("Error in handling relative complement of one-element sets in RelCompl") } // Testing RelCompl with one- and zero-element sets. if Equals(emptySet.Intersection(emptySet), emptySet) != true { panic("Error in handling intersection of empty set with itself in Intersection") } if Equals(emptySet.Union(emptySet), emptySet) != true { panic("Error in handling union of non-empty with itself in Union") } // Testing of methods on emptySet. if Equals(theSet.Intersection(theSet), theSet) != true { panic("Error in handling intersection of non-empty set with itself in Intersection") } if Equals(theSet.Union(theSet), theSet) != true { panic("Error in handling union of non-empty set with itself in Union") } // Testing of methods on non-empty set. theOtherSet.Remove(1) testPowerSet(PowerSet(theSet), theSet) if Equals(theOtherSet, emptySet) != true { panic("Error in handling removal of element in one-element set in Remove") } theOtherSet.Remove("k") if Equals(theOtherSet, emptySet) != true { panic("Error in handling removal of non-existent element in Remove") } // Testing Remove. // --------------------------------------------------------------------------- // TESTING MULTI-ELEMENT SET OPERATIONS: theSet.Append(2) theSet.Append(3) theSet.Append("a") theSet.Append("b") theOtherSet.Append(1) theOtherSet.Append(2) theOtherSet.Append(3) theOtherSet.Append("a") theOtherSet.Append("b") // Adding some elements to the sets. testPowerSet(PowerSet(theSet), theSet) if Equals(theSet, theOtherSet) != true { panic("Error in handling multi-element set in Equals") } theOtherSet.Append("c") if Equals(theSet, theOtherSet) != false { panic("Error in handling multi-element set in Equals") } // Testing Equals. for i := 0; i < 12; i++ { theOtherSet.Append(i) } testPowerSet(PowerSet(theOtherSet), theOtherSet) // Testing PowerSet. if Equals(theSet.Intersection(theOtherSet), theSet) != true { panic("Error in handling multi-element set intersection") } if Equals(theSet.Union(theOtherSet), theOtherSet) != true { panic("Error in handling multi-element set union") } /* Testing union and intersection. Since in this state, all elements of theSet are members of theOtherSet, these comparisons are valid. */ if Equals(theSet.RelCompl(theOtherSet), emptySet) != true { panic("Error in handling multi-element set relative complement") } // Case all elements of theSet are elments in theOtherSet. oneElementSet := NewSet() oneElementSet.Append("d") theSet.Append("d") if Equals(theSet.RelCompl(theOtherSet), oneElementSet) != true { panic("Error in handling multi-element set relative complement") } // Case not all elements of theSet are elments in theOtherSet. testSetString(emptySet.SetString(), emptySet) testSetString(theSet.SetString(), theSet) // Testing the SetString method. }
{ unionSet.mem[k] = true }
conditional_block
gitiles.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: go.chromium.org/luci/common/proto/gitiles/gitiles.proto package gitiles import prpc "go.chromium.org/luci/grpc/prpc" import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import git "go.chromium.org/luci/common/proto/git" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // LogRequest is request message for Gitiles.Log rpc. type LogRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // The commit where to start the listing from. // The value can be: // - a git revision as 40-char string or its prefix so long as its unique in repo. // - a ref such as "refs/heads/branch" or just "branch" // - a ref defined as n-th parent of R in the form "R~n". // For example, "master~2" or "deadbeef~1". // Required. Treeish string `protobuf:"bytes,3,opt,name=treeish" json:"treeish,omitempty"` // The commit where to stop listing at. Together with treeish, // the represent git's "ancestor..treeish" notation. Ancestor string `protobuf:"bytes,2,opt,name=ancestor" json:"ancestor,omitempty"` // If true, include tree diff in commits. TreeDiff bool `protobuf:"varint,4,opt,name=tree_diff,json=treeDiff" json:"tree_diff,omitempty"` // Value of next_page_token in LogResponse to continue. PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` // If > 0, number of commits to retrieve. PageSize int32 `protobuf:"varint,11,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LogRequest) Reset() { *m = LogRequest{} } func (m *LogRequest) String() string { return proto.CompactTextString(m) } func (*LogRequest) ProtoMessage() {} func (*LogRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{0} } func (m *LogRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogRequest.Unmarshal(m, b) } func (m *LogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogRequest.Marshal(b, m, deterministic) } func (dst *LogRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_LogRequest.Merge(dst, src) } func (m *LogRequest) XXX_Size() int { return xxx_messageInfo_LogRequest.Size(m) } func (m *LogRequest) XXX_DiscardUnknown() { xxx_messageInfo_LogRequest.DiscardUnknown(m) } var xxx_messageInfo_LogRequest proto.InternalMessageInfo func (m *LogRequest) GetProject() string { if m != nil { return m.Project } return "" } func (m *LogRequest) GetTreeish() string { if m != nil { return m.Treeish } return "" } func (m *LogRequest) GetAncestor() string { if m != nil { return m.Ancestor } return "" } func (m *LogRequest) GetTreeDiff() bool { if m != nil { return m.TreeDiff } return false } func (m *LogRequest) GetPageToken() string { if m != nil { return m.PageToken } return "" } func (m *LogRequest) GetPageSize() int32 { if m != nil { return m.PageSize } return 0 } // LogRequest is response message for Gitiles.Log rpc. type LogResponse struct { // Retrieved commits. Log []*git.Commit `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` // A page token for next LogRequest to fetch next page of commits. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LogResponse) Reset() { *m = LogResponse{} } func (m *LogResponse) String() string { return proto.CompactTextString(m) } func (*LogResponse) ProtoMessage() {} func (*LogResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{1} } func (m *LogResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogResponse.Unmarshal(m, b) } func (m *LogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogResponse.Marshal(b, m, deterministic) } func (dst *LogResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LogResponse.Merge(dst, src) } func (m *LogResponse) XXX_Size() int { return xxx_messageInfo_LogResponse.Size(m) } func (m *LogResponse) XXX_DiscardUnknown() { xxx_messageInfo_LogResponse.DiscardUnknown(m) } var xxx_messageInfo_LogResponse proto.InternalMessageInfo func (m *LogResponse) GetLog() []*git.Commit { if m != nil { return m.Log } return nil } func (m *LogResponse) GetNextPageToken() string { if m != nil { return m.NextPageToken } return "" } // RefsRequest is a request message of Gitiles.Refs RPC. type RefsRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // Limits which refs to resolve to only those matching {refsPath}/*. // // Must be "refs" or start with "refs/". // Must not include glob '*'. // Use "refs/heads" to retrieve all branches. // // To fetch **all** refs in a repo, specify just "refs" but beware of two // caveats: // * refs returned include a ref for each patchset for each Gerrit change // associated with the repo. // * returned map will contain special "HEAD" ref whose value in resulting map // will be name of the actual ref to which "HEAD" points, which is typically // "refs/heads/master". // // Thus, if you are looking for all tags and all branches of repo, it's // recommended to issue two Refs calls limited to "refs/tags" and "refs/heads" // instead of one call for "refs". // // Since Gerrit allows per-ref ACLs, it is possible that some refs matching // refPrefix would not be present in results because current user isn't granted // read permission on them. RefsPath string `protobuf:"bytes,2,opt,name=refs_path,json=refsPath" json:"refs_path,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsRequest) Reset() { *m = RefsRequest{} } func (m *RefsRequest) String() string { return proto.CompactTextString(m) } func (*RefsRequest) ProtoMessage() {} func (*RefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{2} } func (m *RefsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsRequest.Unmarshal(m, b) } func (m *RefsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsRequest.Marshal(b, m, deterministic) } func (dst *RefsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsRequest.Merge(dst, src) } func (m *RefsRequest) XXX_Size() int { return xxx_messageInfo_RefsRequest.Size(m) } func (m *RefsRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefsRequest.DiscardUnknown(m) } var xxx_messageInfo_RefsRequest proto.InternalMessageInfo func (m *RefsRequest)
() string { if m != nil { return m.Project } return "" } func (m *RefsRequest) GetRefsPath() string { if m != nil { return m.RefsPath } return "" } // RefsResponse is a response message of Gitiles.Refs RPC. type RefsResponse struct { // revisions maps a ref to a revision. // Git branches have keys start with "refs/heads/". Revisions map[string]string `protobuf:"bytes,2,rep,name=revisions" json:"revisions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsResponse) Reset() { *m = RefsResponse{} } func (m *RefsResponse) String() string { return proto.CompactTextString(m) } func (*RefsResponse) ProtoMessage() {} func (*RefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{3} } func (m *RefsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsResponse.Unmarshal(m, b) } func (m *RefsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsResponse.Marshal(b, m, deterministic) } func (dst *RefsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsResponse.Merge(dst, src) } func (m *RefsResponse) XXX_Size() int { return xxx_messageInfo_RefsResponse.Size(m) } func (m *RefsResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefsResponse.DiscardUnknown(m) } var xxx_messageInfo_RefsResponse proto.InternalMessageInfo func (m *RefsResponse) GetRevisions() map[string]string { if m != nil { return m.Revisions } return nil } func init() { proto.RegisterType((*LogRequest)(nil), "gitiles.LogRequest") proto.RegisterType((*LogResponse)(nil), "gitiles.LogResponse") proto.RegisterType((*RefsRequest)(nil), "gitiles.RefsRequest") proto.RegisterType((*RefsResponse)(nil), "gitiles.RefsResponse") proto.RegisterMapType((map[string]string)(nil), "gitiles.RefsResponse.RevisionsEntry") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // GitilesClient is the client API for Gitiles service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GitilesClient interface { // Log retrieves commit log. Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) // Refs retrieves repo refs. Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) } type gitilesPRPCClient struct { client *prpc.Client } func NewGitilesPRPCClient(client *prpc.Client) GitilesClient { return &gitilesPRPCClient{client} } func (c *gitilesPRPCClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesPRPCClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } type gitilesClient struct { cc *grpc.ClientConn } func NewGitilesClient(cc *grpc.ClientConn) GitilesClient { return &gitilesClient{cc} } func (c *gitilesClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } // GitilesServer is the server API for Gitiles service. type GitilesServer interface { // Log retrieves commit log. Log(context.Context, *LogRequest) (*LogResponse, error) // Refs retrieves repo refs. Refs(context.Context, *RefsRequest) (*RefsResponse, error) } func RegisterGitilesServer(s prpc.Registrar, srv GitilesServer) { s.RegisterService(&_Gitiles_serviceDesc, srv) } func _Gitiles_Log_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(LogRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GitilesServer).Log(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitiles.Gitiles/Log", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GitilesServer).Log(ctx, req.(*LogRequest)) } return interceptor(ctx, in, info, handler) } func _Gitiles_Refs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RefsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GitilesServer).Refs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitiles.Gitiles/Refs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GitilesServer).Refs(ctx, req.(*RefsRequest)) } return interceptor(ctx, in, info, handler) } var _Gitiles_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitiles.Gitiles", HandlerType: (*GitilesServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Log", Handler: _Gitiles_Log_Handler, }, { MethodName: "Refs", Handler: _Gitiles_Refs_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "go.chromium.org/luci/common/proto/gitiles/gitiles.proto", } func init() { proto.RegisterFile("go.chromium.org/luci/common/proto/gitiles/gitiles.proto", fileDescriptor_gitiles_e833c2c096a9c6f8) } var fileDescriptor_gitiles_e833c2c096a9c6f8 = []byte{ // 410 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6e, 0xd4, 0x30, 0x10, 0x80, 0xeb, 0x4d, 0xcb, 0xee, 0x4e, 0xf8, 0x93, 0x59, 0x24, 0x6b, 0xab, 0x4a, 0xab, 0x08, 0xa1, 0x3d, 0x65, 0xa5, 0x20, 0x04, 0x42, 0x9c, 0xa0, 0x88, 0x4b, 0x0f, 0x95, 0xe9, 0x3d, 0x0a, 0x61, 0xe2, 0x98, 0x26, 0x71, 0xb0, 0x9d, 0x8a, 0xf6, 0x29, 0x78, 0x19, 0xde, 0x0f, 0xd9, 0x4e, 0xfa, 0x83, 0x90, 0xe8, 0x29, 0x99, 0x6f, 0x66, 0x3c, 0xdf, 0xc4, 0x81, 0x37, 0x42, 0xa5, 0x65, 0xad, 0x55, 0x2b, 0x87, 0x36, 0x55, 0x5a, 0xec, 0x9a, 0xa1, 0x94, 0xbb, 0x52, 0xb5, 0xad, 0xea, 0x76, 0xbd, 0x56, 0x56, 0xed, 0x84, 0xb4, 0xb2, 0x41, 0x33, 0x3d, 0x53, 0x4f, 0xe9, 0x7c, 0x0c, 0xd7, 0xd9, 0xbd, 0x4e, 0xf0, 0x40, 0xda, 0xd0, 0x9c, 0xfc, 0x26, 0x00, 0x27, 0x4a, 0x70, 0xfc, 0x31, 0xa0, 0xb1, 0x94, 0xc1, 0xbc, 0xd7, 0xea, 0x3b, 0x96, 0x96, 0x91, 0x0d, 0xd9, 0x2e, 0xf9, 0x14, 0xba, 0x8c, 0xd5, 0x88, 0xd2, 0xd4, 0x2c, 0x0a, 0x99, 0x31, 0xa4, 0x6b, 0x58, 0x14, 0x5d, 0x89, 0xc6, 0x2a, 0xcd, 0x66, 0x3e, 0x75, 0x1d, 0xd3, 0x43, 0x58, 0xba, 0xb2, 0xfc, 0x9b, 0xac, 0x2a, 0xb6, 0xbf, 0x21, 0xdb, 0x05, 0x5f, 0x38, 0x70, 0x2c, 0xab, 0x8a, 0x1e, 0x01, 0xf4, 0x85, 0xc0, 0xdc, 0xaa, 0x73, 0xec, 0x18, 0xf8, 0xd6, 0xa5, 0x23, 0x67, 0x0e, 0xb8, 0x5e, 0x9f, 0x36, 0xf2, 0x0a, 0x59, 0xbc, 0x21, 0xdb, 0x03, 0xbe, 0x70, 0xe0, 0x8b, 0xbc, 0xc2, 0xe4, 0x0c, 0x62, 0xaf, 0x6d, 0x7a, 0xd5, 0x19, 0xa4, 0x47, 0x10, 0x35, 0x4a, 0x30, 0xb2, 0x89, 0xb6, 0x71, 0x16, 0xa7, 0x42, 0xda, 0xf4, 0xa3, 0x5f, 0x93, 0x3b, 0x4e, 0x5f, 0xc2, 0x93, 0x0e, 0x7f, 0xda, 0xfc, 0xd6, 0xb8, 0x60, 0xfa, 0xc8, 0xe1, 0xd3, 0x69, 0x64, 0x72, 0x0c, 0x31, 0xc7, 0xca, 0xfc, 0xff, 0x6b, 0x1c, 0xc2, 0x52, 0x63, 0x65, 0xf2, 0xbe, 0xb0, 0xf5, 0xb4, 0xb4, 0x03, 0xa7, 0x85, 0xad, 0x93, 0x5f, 0x04, 0x1e, 0x86, 0x63, 0x46, 0xbb, 0x0f, 0xae, 0xfa, 0x42, 0x1a, 0xa9, 0x3a, 0xc3, 0x66, 0xde, 0xf1, 0x45, 0x3a, 0x5d, 0xe2, 0xed, 0xca, 0x94, 0x4f, 0x65, 0x9f, 0x3a, 0xab, 0x2f, 0xf9, 0x4d, 0xdb, 0xfa, 0x3d, 0x3c, 0xbe, 0x9b, 0xa4, 0x4f, 0x21, 0x3a, 0xc7, 0xcb, 0xd1, 0xcc, 0xbd, 0xd2, 0x15, 0x1c, 0x5c, 0x14, 0xcd, 0x80, 0xa3, 0x51, 0x08, 0xde, 0xcd, 0xde, 0x92, 0xcc, 0xc2, 0xfc, 0x73, 0x98, 0x47, 0x33, 0x88, 0x4e, 0x94, 0xa0, 0xcf, 0xae, 0x05, 0x6e, 0xae, 0x7f, 0xbd, 0xba, 0x0b, 0x83, 0x54, 0xb2, 0x47, 0x5f, 0xc3, 0xbe, 0xd3, 0xa4, 0xab, 0xbf, 0xac, 0x43, 0xd7, 0xf3, 0x7f, 0xee, 0x92, 0xec, 0x7d, 0x7d, 0xe0, 0xff, 0xb1, 0x57, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x52, 0xc6, 0x1d, 0xdb, 0x02, 0x00, 0x00, }
GetProject
identifier_name
gitiles.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: go.chromium.org/luci/common/proto/gitiles/gitiles.proto package gitiles import prpc "go.chromium.org/luci/grpc/prpc" import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import git "go.chromium.org/luci/common/proto/git" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // LogRequest is request message for Gitiles.Log rpc. type LogRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // The commit where to start the listing from. // The value can be: // - a git revision as 40-char string or its prefix so long as its unique in repo. // - a ref such as "refs/heads/branch" or just "branch" // - a ref defined as n-th parent of R in the form "R~n". // For example, "master~2" or "deadbeef~1". // Required. Treeish string `protobuf:"bytes,3,opt,name=treeish" json:"treeish,omitempty"` // The commit where to stop listing at. Together with treeish, // the represent git's "ancestor..treeish" notation. Ancestor string `protobuf:"bytes,2,opt,name=ancestor" json:"ancestor,omitempty"` // If true, include tree diff in commits. TreeDiff bool `protobuf:"varint,4,opt,name=tree_diff,json=treeDiff" json:"tree_diff,omitempty"` // Value of next_page_token in LogResponse to continue. PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` // If > 0, number of commits to retrieve. PageSize int32 `protobuf:"varint,11,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LogRequest) Reset() { *m = LogRequest{} } func (m *LogRequest) String() string { return proto.CompactTextString(m) } func (*LogRequest) ProtoMessage() {} func (*LogRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{0} } func (m *LogRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogRequest.Unmarshal(m, b) } func (m *LogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogRequest.Marshal(b, m, deterministic) } func (dst *LogRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_LogRequest.Merge(dst, src) } func (m *LogRequest) XXX_Size() int { return xxx_messageInfo_LogRequest.Size(m) } func (m *LogRequest) XXX_DiscardUnknown() { xxx_messageInfo_LogRequest.DiscardUnknown(m) } var xxx_messageInfo_LogRequest proto.InternalMessageInfo func (m *LogRequest) GetProject() string { if m != nil { return m.Project } return "" } func (m *LogRequest) GetTreeish() string { if m != nil { return m.Treeish } return "" } func (m *LogRequest) GetAncestor() string { if m != nil { return m.Ancestor } return "" } func (m *LogRequest) GetTreeDiff() bool { if m != nil { return m.TreeDiff } return false } func (m *LogRequest) GetPageToken() string { if m != nil { return m.PageToken } return "" } func (m *LogRequest) GetPageSize() int32 { if m != nil { return m.PageSize } return 0 } // LogRequest is response message for Gitiles.Log rpc. type LogResponse struct { // Retrieved commits. Log []*git.Commit `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` // A page token for next LogRequest to fetch next page of commits. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LogResponse) Reset() { *m = LogResponse{} } func (m *LogResponse) String() string { return proto.CompactTextString(m) } func (*LogResponse) ProtoMessage() {} func (*LogResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{1} } func (m *LogResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogResponse.Unmarshal(m, b) } func (m *LogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogResponse.Marshal(b, m, deterministic) } func (dst *LogResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LogResponse.Merge(dst, src) } func (m *LogResponse) XXX_Size() int { return xxx_messageInfo_LogResponse.Size(m) } func (m *LogResponse) XXX_DiscardUnknown() { xxx_messageInfo_LogResponse.DiscardUnknown(m) } var xxx_messageInfo_LogResponse proto.InternalMessageInfo func (m *LogResponse) GetLog() []*git.Commit { if m != nil { return m.Log } return nil } func (m *LogResponse) GetNextPageToken() string { if m != nil { return m.NextPageToken } return "" } // RefsRequest is a request message of Gitiles.Refs RPC. type RefsRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // Limits which refs to resolve to only those matching {refsPath}/*. // // Must be "refs" or start with "refs/". // Must not include glob '*'. // Use "refs/heads" to retrieve all branches. // // To fetch **all** refs in a repo, specify just "refs" but beware of two // caveats: // * refs returned include a ref for each patchset for each Gerrit change // associated with the repo. // * returned map will contain special "HEAD" ref whose value in resulting map // will be name of the actual ref to which "HEAD" points, which is typically // "refs/heads/master". // // Thus, if you are looking for all tags and all branches of repo, it's // recommended to issue two Refs calls limited to "refs/tags" and "refs/heads" // instead of one call for "refs". // // Since Gerrit allows per-ref ACLs, it is possible that some refs matching // refPrefix would not be present in results because current user isn't granted // read permission on them. RefsPath string `protobuf:"bytes,2,opt,name=refs_path,json=refsPath" json:"refs_path,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsRequest) Reset() { *m = RefsRequest{} } func (m *RefsRequest) String() string { return proto.CompactTextString(m) } func (*RefsRequest) ProtoMessage() {} func (*RefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{2} } func (m *RefsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsRequest.Unmarshal(m, b) } func (m *RefsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsRequest.Marshal(b, m, deterministic) } func (dst *RefsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsRequest.Merge(dst, src) } func (m *RefsRequest) XXX_Size() int { return xxx_messageInfo_RefsRequest.Size(m) } func (m *RefsRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefsRequest.DiscardUnknown(m) } var xxx_messageInfo_RefsRequest proto.InternalMessageInfo func (m *RefsRequest) GetProject() string { if m != nil { return m.Project } return "" } func (m *RefsRequest) GetRefsPath() string { if m != nil { return m.RefsPath } return "" } // RefsResponse is a response message of Gitiles.Refs RPC. type RefsResponse struct { // revisions maps a ref to a revision. // Git branches have keys start with "refs/heads/". Revisions map[string]string `protobuf:"bytes,2,rep,name=revisions" json:"revisions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsResponse) Reset() { *m = RefsResponse{} } func (m *RefsResponse) String() string { return proto.CompactTextString(m) } func (*RefsResponse) ProtoMessage() {} func (*RefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{3} } func (m *RefsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsResponse.Unmarshal(m, b) } func (m *RefsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsResponse.Marshal(b, m, deterministic) } func (dst *RefsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsResponse.Merge(dst, src) } func (m *RefsResponse) XXX_Size() int { return xxx_messageInfo_RefsResponse.Size(m) } func (m *RefsResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefsResponse.DiscardUnknown(m) } var xxx_messageInfo_RefsResponse proto.InternalMessageInfo func (m *RefsResponse) GetRevisions() map[string]string { if m != nil { return m.Revisions } return nil } func init() { proto.RegisterType((*LogRequest)(nil), "gitiles.LogRequest") proto.RegisterType((*LogResponse)(nil), "gitiles.LogResponse") proto.RegisterType((*RefsRequest)(nil), "gitiles.RefsRequest") proto.RegisterType((*RefsResponse)(nil), "gitiles.RefsResponse") proto.RegisterMapType((map[string]string)(nil), "gitiles.RefsResponse.RevisionsEntry") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // GitilesClient is the client API for Gitiles service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GitilesClient interface { // Log retrieves commit log. Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) // Refs retrieves repo refs. Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) } type gitilesPRPCClient struct { client *prpc.Client } func NewGitilesPRPCClient(client *prpc.Client) GitilesClient { return &gitilesPRPCClient{client} } func (c *gitilesPRPCClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Log", in, out, opts...) if err != nil
return out, nil } func (c *gitilesPRPCClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } type gitilesClient struct { cc *grpc.ClientConn } func NewGitilesClient(cc *grpc.ClientConn) GitilesClient { return &gitilesClient{cc} } func (c *gitilesClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } // GitilesServer is the server API for Gitiles service. type GitilesServer interface { // Log retrieves commit log. Log(context.Context, *LogRequest) (*LogResponse, error) // Refs retrieves repo refs. Refs(context.Context, *RefsRequest) (*RefsResponse, error) } func RegisterGitilesServer(s prpc.Registrar, srv GitilesServer) { s.RegisterService(&_Gitiles_serviceDesc, srv) } func _Gitiles_Log_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(LogRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GitilesServer).Log(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitiles.Gitiles/Log", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GitilesServer).Log(ctx, req.(*LogRequest)) } return interceptor(ctx, in, info, handler) } func _Gitiles_Refs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RefsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GitilesServer).Refs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitiles.Gitiles/Refs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GitilesServer).Refs(ctx, req.(*RefsRequest)) } return interceptor(ctx, in, info, handler) } var _Gitiles_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitiles.Gitiles", HandlerType: (*GitilesServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Log", Handler: _Gitiles_Log_Handler, }, { MethodName: "Refs", Handler: _Gitiles_Refs_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "go.chromium.org/luci/common/proto/gitiles/gitiles.proto", } func init() { proto.RegisterFile("go.chromium.org/luci/common/proto/gitiles/gitiles.proto", fileDescriptor_gitiles_e833c2c096a9c6f8) } var fileDescriptor_gitiles_e833c2c096a9c6f8 = []byte{ // 410 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6e, 0xd4, 0x30, 0x10, 0x80, 0xeb, 0x4d, 0xcb, 0xee, 0x4e, 0xf8, 0x93, 0x59, 0x24, 0x6b, 0xab, 0x4a, 0xab, 0x08, 0xa1, 0x3d, 0x65, 0xa5, 0x20, 0x04, 0x42, 0x9c, 0xa0, 0x88, 0x4b, 0x0f, 0x95, 0xe9, 0x3d, 0x0a, 0x61, 0xe2, 0x98, 0x26, 0x71, 0xb0, 0x9d, 0x8a, 0xf6, 0x29, 0x78, 0x19, 0xde, 0x0f, 0xd9, 0x4e, 0xfa, 0x83, 0x90, 0xe8, 0x29, 0x99, 0x6f, 0x66, 0x3c, 0xdf, 0xc4, 0x81, 0x37, 0x42, 0xa5, 0x65, 0xad, 0x55, 0x2b, 0x87, 0x36, 0x55, 0x5a, 0xec, 0x9a, 0xa1, 0x94, 0xbb, 0x52, 0xb5, 0xad, 0xea, 0x76, 0xbd, 0x56, 0x56, 0xed, 0x84, 0xb4, 0xb2, 0x41, 0x33, 0x3d, 0x53, 0x4f, 0xe9, 0x7c, 0x0c, 0xd7, 0xd9, 0xbd, 0x4e, 0xf0, 0x40, 0xda, 0xd0, 0x9c, 0xfc, 0x26, 0x00, 0x27, 0x4a, 0x70, 0xfc, 0x31, 0xa0, 0xb1, 0x94, 0xc1, 0xbc, 0xd7, 0xea, 0x3b, 0x96, 0x96, 0x91, 0x0d, 0xd9, 0x2e, 0xf9, 0x14, 0xba, 0x8c, 0xd5, 0x88, 0xd2, 0xd4, 0x2c, 0x0a, 0x99, 0x31, 0xa4, 0x6b, 0x58, 0x14, 0x5d, 0x89, 0xc6, 0x2a, 0xcd, 0x66, 0x3e, 0x75, 0x1d, 0xd3, 0x43, 0x58, 0xba, 0xb2, 0xfc, 0x9b, 0xac, 0x2a, 0xb6, 0xbf, 0x21, 0xdb, 0x05, 0x5f, 0x38, 0x70, 0x2c, 0xab, 0x8a, 0x1e, 0x01, 0xf4, 0x85, 0xc0, 0xdc, 0xaa, 0x73, 0xec, 0x18, 0xf8, 0xd6, 0xa5, 0x23, 0x67, 0x0e, 0xb8, 0x5e, 0x9f, 0x36, 0xf2, 0x0a, 0x59, 0xbc, 0x21, 0xdb, 0x03, 0xbe, 0x70, 0xe0, 0x8b, 0xbc, 0xc2, 0xe4, 0x0c, 0x62, 0xaf, 0x6d, 0x7a, 0xd5, 0x19, 0xa4, 0x47, 0x10, 0x35, 0x4a, 0x30, 0xb2, 0x89, 0xb6, 0x71, 0x16, 0xa7, 0x42, 0xda, 0xf4, 0xa3, 0x5f, 0x93, 0x3b, 0x4e, 0x5f, 0xc2, 0x93, 0x0e, 0x7f, 0xda, 0xfc, 0xd6, 0xb8, 0x60, 0xfa, 0xc8, 0xe1, 0xd3, 0x69, 0x64, 0x72, 0x0c, 0x31, 0xc7, 0xca, 0xfc, 0xff, 0x6b, 0x1c, 0xc2, 0x52, 0x63, 0x65, 0xf2, 0xbe, 0xb0, 0xf5, 0xb4, 0xb4, 0x03, 0xa7, 0x85, 0xad, 0x93, 0x5f, 0x04, 0x1e, 0x86, 0x63, 0x46, 0xbb, 0x0f, 0xae, 0xfa, 0x42, 0x1a, 0xa9, 0x3a, 0xc3, 0x66, 0xde, 0xf1, 0x45, 0x3a, 0x5d, 0xe2, 0xed, 0xca, 0x94, 0x4f, 0x65, 0x9f, 0x3a, 0xab, 0x2f, 0xf9, 0x4d, 0xdb, 0xfa, 0x3d, 0x3c, 0xbe, 0x9b, 0xa4, 0x4f, 0x21, 0x3a, 0xc7, 0xcb, 0xd1, 0xcc, 0xbd, 0xd2, 0x15, 0x1c, 0x5c, 0x14, 0xcd, 0x80, 0xa3, 0x51, 0x08, 0xde, 0xcd, 0xde, 0x92, 0xcc, 0xc2, 0xfc, 0x73, 0x98, 0x47, 0x33, 0x88, 0x4e, 0x94, 0xa0, 0xcf, 0xae, 0x05, 0x6e, 0xae, 0x7f, 0xbd, 0xba, 0x0b, 0x83, 0x54, 0xb2, 0x47, 0x5f, 0xc3, 0xbe, 0xd3, 0xa4, 0xab, 0xbf, 0xac, 0x43, 0xd7, 0xf3, 0x7f, 0xee, 0x92, 0xec, 0x7d, 0x7d, 0xe0, 0xff, 0xb1, 0x57, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x52, 0xc6, 0x1d, 0xdb, 0x02, 0x00, 0x00, }
{ return nil, err }
conditional_block
gitiles.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: go.chromium.org/luci/common/proto/gitiles/gitiles.proto package gitiles import prpc "go.chromium.org/luci/grpc/prpc" import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import git "go.chromium.org/luci/common/proto/git" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // LogRequest is request message for Gitiles.Log rpc. type LogRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // The commit where to start the listing from. // The value can be: // - a git revision as 40-char string or its prefix so long as its unique in repo. // - a ref such as "refs/heads/branch" or just "branch" // - a ref defined as n-th parent of R in the form "R~n". // For example, "master~2" or "deadbeef~1". // Required. Treeish string `protobuf:"bytes,3,opt,name=treeish" json:"treeish,omitempty"` // The commit where to stop listing at. Together with treeish, // the represent git's "ancestor..treeish" notation. Ancestor string `protobuf:"bytes,2,opt,name=ancestor" json:"ancestor,omitempty"` // If true, include tree diff in commits. TreeDiff bool `protobuf:"varint,4,opt,name=tree_diff,json=treeDiff" json:"tree_diff,omitempty"` // Value of next_page_token in LogResponse to continue. PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` // If > 0, number of commits to retrieve. PageSize int32 `protobuf:"varint,11,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LogRequest) Reset() { *m = LogRequest{} } func (m *LogRequest) String() string { return proto.CompactTextString(m) } func (*LogRequest) ProtoMessage() {} func (*LogRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{0} } func (m *LogRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogRequest.Unmarshal(m, b) } func (m *LogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogRequest.Marshal(b, m, deterministic) } func (dst *LogRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_LogRequest.Merge(dst, src) } func (m *LogRequest) XXX_Size() int { return xxx_messageInfo_LogRequest.Size(m) } func (m *LogRequest) XXX_DiscardUnknown() { xxx_messageInfo_LogRequest.DiscardUnknown(m) } var xxx_messageInfo_LogRequest proto.InternalMessageInfo func (m *LogRequest) GetProject() string { if m != nil { return m.Project } return "" } func (m *LogRequest) GetTreeish() string { if m != nil { return m.Treeish } return "" } func (m *LogRequest) GetAncestor() string
func (m *LogRequest) GetTreeDiff() bool { if m != nil { return m.TreeDiff } return false } func (m *LogRequest) GetPageToken() string { if m != nil { return m.PageToken } return "" } func (m *LogRequest) GetPageSize() int32 { if m != nil { return m.PageSize } return 0 } // LogRequest is response message for Gitiles.Log rpc. type LogResponse struct { // Retrieved commits. Log []*git.Commit `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` // A page token for next LogRequest to fetch next page of commits. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LogResponse) Reset() { *m = LogResponse{} } func (m *LogResponse) String() string { return proto.CompactTextString(m) } func (*LogResponse) ProtoMessage() {} func (*LogResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{1} } func (m *LogResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogResponse.Unmarshal(m, b) } func (m *LogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogResponse.Marshal(b, m, deterministic) } func (dst *LogResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LogResponse.Merge(dst, src) } func (m *LogResponse) XXX_Size() int { return xxx_messageInfo_LogResponse.Size(m) } func (m *LogResponse) XXX_DiscardUnknown() { xxx_messageInfo_LogResponse.DiscardUnknown(m) } var xxx_messageInfo_LogResponse proto.InternalMessageInfo func (m *LogResponse) GetLog() []*git.Commit { if m != nil { return m.Log } return nil } func (m *LogResponse) GetNextPageToken() string { if m != nil { return m.NextPageToken } return "" } // RefsRequest is a request message of Gitiles.Refs RPC. type RefsRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // Limits which refs to resolve to only those matching {refsPath}/*. // // Must be "refs" or start with "refs/". // Must not include glob '*'. // Use "refs/heads" to retrieve all branches. // // To fetch **all** refs in a repo, specify just "refs" but beware of two // caveats: // * refs returned include a ref for each patchset for each Gerrit change // associated with the repo. // * returned map will contain special "HEAD" ref whose value in resulting map // will be name of the actual ref to which "HEAD" points, which is typically // "refs/heads/master". // // Thus, if you are looking for all tags and all branches of repo, it's // recommended to issue two Refs calls limited to "refs/tags" and "refs/heads" // instead of one call for "refs". // // Since Gerrit allows per-ref ACLs, it is possible that some refs matching // refPrefix would not be present in results because current user isn't granted // read permission on them. RefsPath string `protobuf:"bytes,2,opt,name=refs_path,json=refsPath" json:"refs_path,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsRequest) Reset() { *m = RefsRequest{} } func (m *RefsRequest) String() string { return proto.CompactTextString(m) } func (*RefsRequest) ProtoMessage() {} func (*RefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{2} } func (m *RefsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsRequest.Unmarshal(m, b) } func (m *RefsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsRequest.Marshal(b, m, deterministic) } func (dst *RefsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsRequest.Merge(dst, src) } func (m *RefsRequest) XXX_Size() int { return xxx_messageInfo_RefsRequest.Size(m) } func (m *RefsRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefsRequest.DiscardUnknown(m) } var xxx_messageInfo_RefsRequest proto.InternalMessageInfo func (m *RefsRequest) GetProject() string { if m != nil { return m.Project } return "" } func (m *RefsRequest) GetRefsPath() string { if m != nil { return m.RefsPath } return "" } // RefsResponse is a response message of Gitiles.Refs RPC. type RefsResponse struct { // revisions maps a ref to a revision. // Git branches have keys start with "refs/heads/". Revisions map[string]string `protobuf:"bytes,2,rep,name=revisions" json:"revisions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsResponse) Reset() { *m = RefsResponse{} } func (m *RefsResponse) String() string { return proto.CompactTextString(m) } func (*RefsResponse) ProtoMessage() {} func (*RefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{3} } func (m *RefsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsResponse.Unmarshal(m, b) } func (m *RefsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsResponse.Marshal(b, m, deterministic) } func (dst *RefsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsResponse.Merge(dst, src) } func (m *RefsResponse) XXX_Size() int { return xxx_messageInfo_RefsResponse.Size(m) } func (m *RefsResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefsResponse.DiscardUnknown(m) } var xxx_messageInfo_RefsResponse proto.InternalMessageInfo func (m *RefsResponse) GetRevisions() map[string]string { if m != nil { return m.Revisions } return nil } func init() { proto.RegisterType((*LogRequest)(nil), "gitiles.LogRequest") proto.RegisterType((*LogResponse)(nil), "gitiles.LogResponse") proto.RegisterType((*RefsRequest)(nil), "gitiles.RefsRequest") proto.RegisterType((*RefsResponse)(nil), "gitiles.RefsResponse") proto.RegisterMapType((map[string]string)(nil), "gitiles.RefsResponse.RevisionsEntry") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // GitilesClient is the client API for Gitiles service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GitilesClient interface { // Log retrieves commit log. Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) // Refs retrieves repo refs. Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) } type gitilesPRPCClient struct { client *prpc.Client } func NewGitilesPRPCClient(client *prpc.Client) GitilesClient { return &gitilesPRPCClient{client} } func (c *gitilesPRPCClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesPRPCClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } type gitilesClient struct { cc *grpc.ClientConn } func NewGitilesClient(cc *grpc.ClientConn) GitilesClient { return &gitilesClient{cc} } func (c *gitilesClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } // GitilesServer is the server API for Gitiles service. type GitilesServer interface { // Log retrieves commit log. Log(context.Context, *LogRequest) (*LogResponse, error) // Refs retrieves repo refs. Refs(context.Context, *RefsRequest) (*RefsResponse, error) } func RegisterGitilesServer(s prpc.Registrar, srv GitilesServer) { s.RegisterService(&_Gitiles_serviceDesc, srv) } func _Gitiles_Log_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(LogRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GitilesServer).Log(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitiles.Gitiles/Log", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GitilesServer).Log(ctx, req.(*LogRequest)) } return interceptor(ctx, in, info, handler) } func _Gitiles_Refs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RefsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GitilesServer).Refs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitiles.Gitiles/Refs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GitilesServer).Refs(ctx, req.(*RefsRequest)) } return interceptor(ctx, in, info, handler) } var _Gitiles_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitiles.Gitiles", HandlerType: (*GitilesServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Log", Handler: _Gitiles_Log_Handler, }, { MethodName: "Refs", Handler: _Gitiles_Refs_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "go.chromium.org/luci/common/proto/gitiles/gitiles.proto", } func init() { proto.RegisterFile("go.chromium.org/luci/common/proto/gitiles/gitiles.proto", fileDescriptor_gitiles_e833c2c096a9c6f8) } var fileDescriptor_gitiles_e833c2c096a9c6f8 = []byte{ // 410 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6e, 0xd4, 0x30, 0x10, 0x80, 0xeb, 0x4d, 0xcb, 0xee, 0x4e, 0xf8, 0x93, 0x59, 0x24, 0x6b, 0xab, 0x4a, 0xab, 0x08, 0xa1, 0x3d, 0x65, 0xa5, 0x20, 0x04, 0x42, 0x9c, 0xa0, 0x88, 0x4b, 0x0f, 0x95, 0xe9, 0x3d, 0x0a, 0x61, 0xe2, 0x98, 0x26, 0x71, 0xb0, 0x9d, 0x8a, 0xf6, 0x29, 0x78, 0x19, 0xde, 0x0f, 0xd9, 0x4e, 0xfa, 0x83, 0x90, 0xe8, 0x29, 0x99, 0x6f, 0x66, 0x3c, 0xdf, 0xc4, 0x81, 0x37, 0x42, 0xa5, 0x65, 0xad, 0x55, 0x2b, 0x87, 0x36, 0x55, 0x5a, 0xec, 0x9a, 0xa1, 0x94, 0xbb, 0x52, 0xb5, 0xad, 0xea, 0x76, 0xbd, 0x56, 0x56, 0xed, 0x84, 0xb4, 0xb2, 0x41, 0x33, 0x3d, 0x53, 0x4f, 0xe9, 0x7c, 0x0c, 0xd7, 0xd9, 0xbd, 0x4e, 0xf0, 0x40, 0xda, 0xd0, 0x9c, 0xfc, 0x26, 0x00, 0x27, 0x4a, 0x70, 0xfc, 0x31, 0xa0, 0xb1, 0x94, 0xc1, 0xbc, 0xd7, 0xea, 0x3b, 0x96, 0x96, 0x91, 0x0d, 0xd9, 0x2e, 0xf9, 0x14, 0xba, 0x8c, 0xd5, 0x88, 0xd2, 0xd4, 0x2c, 0x0a, 0x99, 0x31, 0xa4, 0x6b, 0x58, 0x14, 0x5d, 0x89, 0xc6, 0x2a, 0xcd, 0x66, 0x3e, 0x75, 0x1d, 0xd3, 0x43, 0x58, 0xba, 0xb2, 0xfc, 0x9b, 0xac, 0x2a, 0xb6, 0xbf, 0x21, 0xdb, 0x05, 0x5f, 0x38, 0x70, 0x2c, 0xab, 0x8a, 0x1e, 0x01, 0xf4, 0x85, 0xc0, 0xdc, 0xaa, 0x73, 0xec, 0x18, 0xf8, 0xd6, 0xa5, 0x23, 0x67, 0x0e, 0xb8, 0x5e, 0x9f, 0x36, 0xf2, 0x0a, 0x59, 0xbc, 0x21, 0xdb, 0x03, 0xbe, 0x70, 0xe0, 0x8b, 0xbc, 0xc2, 0xe4, 0x0c, 0x62, 0xaf, 0x6d, 0x7a, 0xd5, 0x19, 0xa4, 0x47, 0x10, 0x35, 0x4a, 0x30, 0xb2, 0x89, 0xb6, 0x71, 0x16, 0xa7, 0x42, 0xda, 0xf4, 0xa3, 0x5f, 0x93, 0x3b, 0x4e, 0x5f, 0xc2, 0x93, 0x0e, 0x7f, 0xda, 0xfc, 0xd6, 0xb8, 0x60, 0xfa, 0xc8, 0xe1, 0xd3, 0x69, 0x64, 0x72, 0x0c, 0x31, 0xc7, 0xca, 0xfc, 0xff, 0x6b, 0x1c, 0xc2, 0x52, 0x63, 0x65, 0xf2, 0xbe, 0xb0, 0xf5, 0xb4, 0xb4, 0x03, 0xa7, 0x85, 0xad, 0x93, 0x5f, 0x04, 0x1e, 0x86, 0x63, 0x46, 0xbb, 0x0f, 0xae, 0xfa, 0x42, 0x1a, 0xa9, 0x3a, 0xc3, 0x66, 0xde, 0xf1, 0x45, 0x3a, 0x5d, 0xe2, 0xed, 0xca, 0x94, 0x4f, 0x65, 0x9f, 0x3a, 0xab, 0x2f, 0xf9, 0x4d, 0xdb, 0xfa, 0x3d, 0x3c, 0xbe, 0x9b, 0xa4, 0x4f, 0x21, 0x3a, 0xc7, 0xcb, 0xd1, 0xcc, 0xbd, 0xd2, 0x15, 0x1c, 0x5c, 0x14, 0xcd, 0x80, 0xa3, 0x51, 0x08, 0xde, 0xcd, 0xde, 0x92, 0xcc, 0xc2, 0xfc, 0x73, 0x98, 0x47, 0x33, 0x88, 0x4e, 0x94, 0xa0, 0xcf, 0xae, 0x05, 0x6e, 0xae, 0x7f, 0xbd, 0xba, 0x0b, 0x83, 0x54, 0xb2, 0x47, 0x5f, 0xc3, 0xbe, 0xd3, 0xa4, 0xab, 0xbf, 0xac, 0x43, 0xd7, 0xf3, 0x7f, 0xee, 0x92, 0xec, 0x7d, 0x7d, 0xe0, 0xff, 0xb1, 0x57, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x52, 0xc6, 0x1d, 0xdb, 0x02, 0x00, 0x00, }
{ if m != nil { return m.Ancestor } return "" }
identifier_body
gitiles.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: go.chromium.org/luci/common/proto/gitiles/gitiles.proto package gitiles import prpc "go.chromium.org/luci/grpc/prpc" import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import git "go.chromium.org/luci/common/proto/git" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // LogRequest is request message for Gitiles.Log rpc. type LogRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // The commit where to start the listing from. // The value can be: // - a git revision as 40-char string or its prefix so long as its unique in repo. // - a ref such as "refs/heads/branch" or just "branch" // - a ref defined as n-th parent of R in the form "R~n". // For example, "master~2" or "deadbeef~1". // Required. Treeish string `protobuf:"bytes,3,opt,name=treeish" json:"treeish,omitempty"` // The commit where to stop listing at. Together with treeish, // the represent git's "ancestor..treeish" notation. Ancestor string `protobuf:"bytes,2,opt,name=ancestor" json:"ancestor,omitempty"` // If true, include tree diff in commits. TreeDiff bool `protobuf:"varint,4,opt,name=tree_diff,json=treeDiff" json:"tree_diff,omitempty"` // Value of next_page_token in LogResponse to continue. PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` // If > 0, number of commits to retrieve. PageSize int32 `protobuf:"varint,11,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LogRequest) Reset() { *m = LogRequest{} } func (m *LogRequest) String() string { return proto.CompactTextString(m) } func (*LogRequest) ProtoMessage() {} func (*LogRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{0} } func (m *LogRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogRequest.Unmarshal(m, b) } func (m *LogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogRequest.Marshal(b, m, deterministic) } func (dst *LogRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_LogRequest.Merge(dst, src) } func (m *LogRequest) XXX_Size() int { return xxx_messageInfo_LogRequest.Size(m) } func (m *LogRequest) XXX_DiscardUnknown() { xxx_messageInfo_LogRequest.DiscardUnknown(m) } var xxx_messageInfo_LogRequest proto.InternalMessageInfo func (m *LogRequest) GetProject() string { if m != nil { return m.Project } return "" } func (m *LogRequest) GetTreeish() string { if m != nil { return m.Treeish } return "" } func (m *LogRequest) GetAncestor() string { if m != nil { return m.Ancestor } return "" } func (m *LogRequest) GetTreeDiff() bool { if m != nil { return m.TreeDiff } return false } func (m *LogRequest) GetPageToken() string { if m != nil { return m.PageToken } return "" } func (m *LogRequest) GetPageSize() int32 { if m != nil { return m.PageSize } return 0 } // LogRequest is response message for Gitiles.Log rpc. type LogResponse struct { // Retrieved commits. Log []*git.Commit `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` // A page token for next LogRequest to fetch next page of commits. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LogResponse) Reset() { *m = LogResponse{} } func (m *LogResponse) String() string { return proto.CompactTextString(m) } func (*LogResponse) ProtoMessage() {} func (*LogResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{1} } func (m *LogResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LogResponse.Unmarshal(m, b) } func (m *LogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LogResponse.Marshal(b, m, deterministic) } func (dst *LogResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LogResponse.Merge(dst, src) } func (m *LogResponse) XXX_Size() int { return xxx_messageInfo_LogResponse.Size(m) } func (m *LogResponse) XXX_DiscardUnknown() { xxx_messageInfo_LogResponse.DiscardUnknown(m) } var xxx_messageInfo_LogResponse proto.InternalMessageInfo func (m *LogResponse) GetLog() []*git.Commit { if m != nil { return m.Log } return nil } func (m *LogResponse) GetNextPageToken() string { if m != nil { return m.NextPageToken } return "" } // RefsRequest is a request message of Gitiles.Refs RPC. type RefsRequest struct { // Gitiles project, e.g. "chromium/src" part in // https://chromium.googlesource.com/chromium/src/+/master // Required. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // Limits which refs to resolve to only those matching {refsPath}/*. // // Must be "refs" or start with "refs/". // Must not include glob '*'. // Use "refs/heads" to retrieve all branches. // // To fetch **all** refs in a repo, specify just "refs" but beware of two // caveats: // * refs returned include a ref for each patchset for each Gerrit change // associated with the repo. // * returned map will contain special "HEAD" ref whose value in resulting map // will be name of the actual ref to which "HEAD" points, which is typically // "refs/heads/master". // // Thus, if you are looking for all tags and all branches of repo, it's // recommended to issue two Refs calls limited to "refs/tags" and "refs/heads" // instead of one call for "refs". // // Since Gerrit allows per-ref ACLs, it is possible that some refs matching // refPrefix would not be present in results because current user isn't granted // read permission on them. RefsPath string `protobuf:"bytes,2,opt,name=refs_path,json=refsPath" json:"refs_path,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsRequest) Reset() { *m = RefsRequest{} } func (m *RefsRequest) String() string { return proto.CompactTextString(m) } func (*RefsRequest) ProtoMessage() {} func (*RefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{2} } func (m *RefsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsRequest.Unmarshal(m, b) } func (m *RefsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsRequest.Marshal(b, m, deterministic) } func (dst *RefsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsRequest.Merge(dst, src) } func (m *RefsRequest) XXX_Size() int { return xxx_messageInfo_RefsRequest.Size(m) } func (m *RefsRequest) XXX_DiscardUnknown() { xxx_messageInfo_RefsRequest.DiscardUnknown(m) } var xxx_messageInfo_RefsRequest proto.InternalMessageInfo
} return "" } func (m *RefsRequest) GetRefsPath() string { if m != nil { return m.RefsPath } return "" } // RefsResponse is a response message of Gitiles.Refs RPC. type RefsResponse struct { // revisions maps a ref to a revision. // Git branches have keys start with "refs/heads/". Revisions map[string]string `protobuf:"bytes,2,rep,name=revisions" json:"revisions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RefsResponse) Reset() { *m = RefsResponse{} } func (m *RefsResponse) String() string { return proto.CompactTextString(m) } func (*RefsResponse) ProtoMessage() {} func (*RefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_gitiles_e833c2c096a9c6f8, []int{3} } func (m *RefsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RefsResponse.Unmarshal(m, b) } func (m *RefsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RefsResponse.Marshal(b, m, deterministic) } func (dst *RefsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RefsResponse.Merge(dst, src) } func (m *RefsResponse) XXX_Size() int { return xxx_messageInfo_RefsResponse.Size(m) } func (m *RefsResponse) XXX_DiscardUnknown() { xxx_messageInfo_RefsResponse.DiscardUnknown(m) } var xxx_messageInfo_RefsResponse proto.InternalMessageInfo func (m *RefsResponse) GetRevisions() map[string]string { if m != nil { return m.Revisions } return nil } func init() { proto.RegisterType((*LogRequest)(nil), "gitiles.LogRequest") proto.RegisterType((*LogResponse)(nil), "gitiles.LogResponse") proto.RegisterType((*RefsRequest)(nil), "gitiles.RefsRequest") proto.RegisterType((*RefsResponse)(nil), "gitiles.RefsResponse") proto.RegisterMapType((map[string]string)(nil), "gitiles.RefsResponse.RevisionsEntry") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // GitilesClient is the client API for Gitiles service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GitilesClient interface { // Log retrieves commit log. Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) // Refs retrieves repo refs. Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) } type gitilesPRPCClient struct { client *prpc.Client } func NewGitilesPRPCClient(client *prpc.Client) GitilesClient { return &gitilesPRPCClient{client} } func (c *gitilesPRPCClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesPRPCClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.client.Call(ctx, "gitiles.Gitiles", "Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } type gitilesClient struct { cc *grpc.ClientConn } func NewGitilesClient(cc *grpc.ClientConn) GitilesClient { return &gitilesClient{cc} } func (c *gitilesClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*LogResponse, error) { out := new(LogResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Log", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *gitilesClient) Refs(ctx context.Context, in *RefsRequest, opts ...grpc.CallOption) (*RefsResponse, error) { out := new(RefsResponse) err := c.cc.Invoke(ctx, "/gitiles.Gitiles/Refs", in, out, opts...) if err != nil { return nil, err } return out, nil } // GitilesServer is the server API for Gitiles service. type GitilesServer interface { // Log retrieves commit log. Log(context.Context, *LogRequest) (*LogResponse, error) // Refs retrieves repo refs. Refs(context.Context, *RefsRequest) (*RefsResponse, error) } func RegisterGitilesServer(s prpc.Registrar, srv GitilesServer) { s.RegisterService(&_Gitiles_serviceDesc, srv) } func _Gitiles_Log_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(LogRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GitilesServer).Log(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitiles.Gitiles/Log", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GitilesServer).Log(ctx, req.(*LogRequest)) } return interceptor(ctx, in, info, handler) } func _Gitiles_Refs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RefsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GitilesServer).Refs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitiles.Gitiles/Refs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GitilesServer).Refs(ctx, req.(*RefsRequest)) } return interceptor(ctx, in, info, handler) } var _Gitiles_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitiles.Gitiles", HandlerType: (*GitilesServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Log", Handler: _Gitiles_Log_Handler, }, { MethodName: "Refs", Handler: _Gitiles_Refs_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "go.chromium.org/luci/common/proto/gitiles/gitiles.proto", } func init() { proto.RegisterFile("go.chromium.org/luci/common/proto/gitiles/gitiles.proto", fileDescriptor_gitiles_e833c2c096a9c6f8) } var fileDescriptor_gitiles_e833c2c096a9c6f8 = []byte{ // 410 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6e, 0xd4, 0x30, 0x10, 0x80, 0xeb, 0x4d, 0xcb, 0xee, 0x4e, 0xf8, 0x93, 0x59, 0x24, 0x6b, 0xab, 0x4a, 0xab, 0x08, 0xa1, 0x3d, 0x65, 0xa5, 0x20, 0x04, 0x42, 0x9c, 0xa0, 0x88, 0x4b, 0x0f, 0x95, 0xe9, 0x3d, 0x0a, 0x61, 0xe2, 0x98, 0x26, 0x71, 0xb0, 0x9d, 0x8a, 0xf6, 0x29, 0x78, 0x19, 0xde, 0x0f, 0xd9, 0x4e, 0xfa, 0x83, 0x90, 0xe8, 0x29, 0x99, 0x6f, 0x66, 0x3c, 0xdf, 0xc4, 0x81, 0x37, 0x42, 0xa5, 0x65, 0xad, 0x55, 0x2b, 0x87, 0x36, 0x55, 0x5a, 0xec, 0x9a, 0xa1, 0x94, 0xbb, 0x52, 0xb5, 0xad, 0xea, 0x76, 0xbd, 0x56, 0x56, 0xed, 0x84, 0xb4, 0xb2, 0x41, 0x33, 0x3d, 0x53, 0x4f, 0xe9, 0x7c, 0x0c, 0xd7, 0xd9, 0xbd, 0x4e, 0xf0, 0x40, 0xda, 0xd0, 0x9c, 0xfc, 0x26, 0x00, 0x27, 0x4a, 0x70, 0xfc, 0x31, 0xa0, 0xb1, 0x94, 0xc1, 0xbc, 0xd7, 0xea, 0x3b, 0x96, 0x96, 0x91, 0x0d, 0xd9, 0x2e, 0xf9, 0x14, 0xba, 0x8c, 0xd5, 0x88, 0xd2, 0xd4, 0x2c, 0x0a, 0x99, 0x31, 0xa4, 0x6b, 0x58, 0x14, 0x5d, 0x89, 0xc6, 0x2a, 0xcd, 0x66, 0x3e, 0x75, 0x1d, 0xd3, 0x43, 0x58, 0xba, 0xb2, 0xfc, 0x9b, 0xac, 0x2a, 0xb6, 0xbf, 0x21, 0xdb, 0x05, 0x5f, 0x38, 0x70, 0x2c, 0xab, 0x8a, 0x1e, 0x01, 0xf4, 0x85, 0xc0, 0xdc, 0xaa, 0x73, 0xec, 0x18, 0xf8, 0xd6, 0xa5, 0x23, 0x67, 0x0e, 0xb8, 0x5e, 0x9f, 0x36, 0xf2, 0x0a, 0x59, 0xbc, 0x21, 0xdb, 0x03, 0xbe, 0x70, 0xe0, 0x8b, 0xbc, 0xc2, 0xe4, 0x0c, 0x62, 0xaf, 0x6d, 0x7a, 0xd5, 0x19, 0xa4, 0x47, 0x10, 0x35, 0x4a, 0x30, 0xb2, 0x89, 0xb6, 0x71, 0x16, 0xa7, 0x42, 0xda, 0xf4, 0xa3, 0x5f, 0x93, 0x3b, 0x4e, 0x5f, 0xc2, 0x93, 0x0e, 0x7f, 0xda, 0xfc, 0xd6, 0xb8, 0x60, 0xfa, 0xc8, 0xe1, 0xd3, 0x69, 0x64, 0x72, 0x0c, 0x31, 0xc7, 0xca, 0xfc, 0xff, 0x6b, 0x1c, 0xc2, 0x52, 0x63, 0x65, 0xf2, 0xbe, 0xb0, 0xf5, 0xb4, 0xb4, 0x03, 0xa7, 0x85, 0xad, 0x93, 0x5f, 0x04, 0x1e, 0x86, 0x63, 0x46, 0xbb, 0x0f, 0xae, 0xfa, 0x42, 0x1a, 0xa9, 0x3a, 0xc3, 0x66, 0xde, 0xf1, 0x45, 0x3a, 0x5d, 0xe2, 0xed, 0xca, 0x94, 0x4f, 0x65, 0x9f, 0x3a, 0xab, 0x2f, 0xf9, 0x4d, 0xdb, 0xfa, 0x3d, 0x3c, 0xbe, 0x9b, 0xa4, 0x4f, 0x21, 0x3a, 0xc7, 0xcb, 0xd1, 0xcc, 0xbd, 0xd2, 0x15, 0x1c, 0x5c, 0x14, 0xcd, 0x80, 0xa3, 0x51, 0x08, 0xde, 0xcd, 0xde, 0x92, 0xcc, 0xc2, 0xfc, 0x73, 0x98, 0x47, 0x33, 0x88, 0x4e, 0x94, 0xa0, 0xcf, 0xae, 0x05, 0x6e, 0xae, 0x7f, 0xbd, 0xba, 0x0b, 0x83, 0x54, 0xb2, 0x47, 0x5f, 0xc3, 0xbe, 0xd3, 0xa4, 0xab, 0xbf, 0xac, 0x43, 0xd7, 0xf3, 0x7f, 0xee, 0x92, 0xec, 0x7d, 0x7d, 0xe0, 0xff, 0xb1, 0x57, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x52, 0xc6, 0x1d, 0xdb, 0x02, 0x00, 0x00, }
func (m *RefsRequest) GetProject() string { if m != nil { return m.Project
random_line_split
lib.rs
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. // BEGIN LINT CONFIG // DO NOT EDIT. Automatically generated by bin/gen-lints. // Have complaints about the noise? See the note in misc/python/materialize/cli/gen-lints.py first. #![allow(clippy::style)] #![allow(clippy::complexity)] #![allow(clippy::large_enum_variant)] #![allow(clippy::mutable_key_type)] #![allow(clippy::stable_sort_primitive)] #![allow(clippy::map_entry)] #![allow(clippy::box_default)] #![warn(clippy::bool_comparison)] #![warn(clippy::clone_on_ref_ptr)] #![warn(clippy::no_effect)] #![warn(clippy::unnecessary_unwrap)] #![warn(clippy::dbg_macro)] #![warn(clippy::todo)] #![warn(clippy::wildcard_dependencies)] #![warn(clippy::zero_prefixed_literal)] #![warn(clippy::borrowed_box)] #![warn(clippy::deref_addrof)] #![warn(clippy::double_must_use)] #![warn(clippy::double_parens)] #![warn(clippy::extra_unused_lifetimes)] #![warn(clippy::needless_borrow)] #![warn(clippy::needless_question_mark)] #![warn(clippy::needless_return)] #![warn(clippy::redundant_pattern)] #![warn(clippy::redundant_slicing)] #![warn(clippy::redundant_static_lifetimes)] #![warn(clippy::single_component_path_imports)] #![warn(clippy::unnecessary_cast)] #![warn(clippy::useless_asref)] #![warn(clippy::useless_conversion)] #![warn(clippy::builtin_type_shadow)] #![warn(clippy::duplicate_underscore_argument)] #![warn(clippy::double_neg)] #![warn(clippy::unnecessary_mut_passed)] #![warn(clippy::wildcard_in_or_patterns)] #![warn(clippy::crosspointer_transmute)] #![warn(clippy::excessive_precision)] #![warn(clippy::overflow_check_conditional)] #![warn(clippy::as_conversions)] #![warn(clippy::match_overlapping_arm)] #![warn(clippy::zero_divided_by_zero)] #![warn(clippy::must_use_unit)] #![warn(clippy::suspicious_assignment_formatting)] #![warn(clippy::suspicious_else_formatting)] #![warn(clippy::suspicious_unary_op_formatting)] #![warn(clippy::mut_mutex_lock)] #![warn(clippy::print_literal)] #![warn(clippy::same_item_push)] #![warn(clippy::useless_format)] #![warn(clippy::write_literal)] #![warn(clippy::redundant_closure)] #![warn(clippy::redundant_closure_call)] #![warn(clippy::unnecessary_lazy_evaluations)] #![warn(clippy::partialeq_ne_impl)] #![warn(clippy::redundant_field_names)] #![warn(clippy::transmutes_expressible_as_ptr_casts)] #![warn(clippy::unused_async)] #![warn(clippy::disallowed_methods)] #![warn(clippy::disallowed_macros)] #![warn(clippy::disallowed_types)] #![warn(clippy::from_over_into)] // END LINT CONFIG //! An API client for [Metabase]. //! //! Only the features presently required are implemented. Documentation is //! sparse to avoid duplicating information in the upstream API documentation. //! See: //! //! * [Using the REST API](https://github.com/metabase/metabase/wiki/Using-the-REST-API) //! * [Auto-generated API documentation](https://github.com/metabase/metabase/blob/master/docs/api-documentation.md) //! //! [Metabase]: https://metabase.com #![warn(missing_debug_implementations)] use std::fmt; use std::time::Duration; use reqwest::{IntoUrl, Url}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; /// A Metabase API client. #[derive(Debug)] pub struct Client { inner: reqwest::Client, url: Url, session_id: Option<String>, } impl Client { /// Constructs a new `Client` that will target a Metabase instance at `url`. /// /// `url` must not contain a path nor be a [cannot-be-a-base] URL. /// /// [cannot-be-a-base]: https://url.spec.whatwg.org/#url-cannot-be-a-base-url-flag pub fn new<U>(url: U) -> Result<Self, Error> where U: IntoUrl, { let mut url = url.into_url()?; if url.path() != "/" { return Err(Error::InvalidUrl("base URL cannot have path".into())); } assert!(!url.cannot_be_a_base()); url.path_segments_mut() .expect("cannot-be-a-base checked to be false") .push("api"); Ok(Client { inner: reqwest::Client::new(), url, session_id: None, }) } /// Sets the session ID to include in future requests made by this client. pub fn set_session_id(&mut self, session_id: String) { self.session_id = Some(session_id); } /// Fetches public, global properties. /// /// The underlying API call is `GET /api/session/properties`. pub async fn session_properties(&self) -> Result<SessionPropertiesResponse, reqwest::Error> { let url = self.api_url(&["session", "properties"]); self.send_request(self.inner.get(url)).await } /// Requests a session ID for the username and password named in `request`. /// /// Note that usernames are typically email addresses. To authenticate /// future requests with the returned session ID, call `set_session_id`. /// /// The underlying API call is `POST /api/session`. pub async fn login(&self, request: &LoginRequest) -> Result<LoginResponse, reqwest::Error> { let url = self.api_url(&["session"]); self.send_request(self.inner.post(url).json(request)).await } /// Creates a user and database connection if the Metabase instance has not /// yet been set up. /// /// The request must include the `setup_token` from a /// `SessionPropertiesResponse`. If the setup token returned by /// [`Client::session_properties`] is `None`, the cluster is already set up, /// and this request will fail. /// /// The underlying API call is `POST /api/setup`. pub async fn setup(&self, request: &SetupRequest) -> Result<LoginResponse, reqwest::Error> { let url = self.api_url(&["setup"]); self.send_request(self.inner.post(url).json(request)).await } /// Fetches the list of databases known to Metabase. /// /// The underlying API call is `GET /database`. pub async fn databases(&self) -> Result<Vec<Database>, reqwest::Error> { let url = self.api_url(&["database"]); let res: ListWrapper<_> = self.send_request(self.inner.get(url)).await?; Ok(res.data) } /// Fetches metadata about a particular database. /// /// The underlying API call is `GET /database/:id/metadata`. pub async fn database_metadata(&self, id: usize) -> Result<DatabaseMetadata, reqwest::Error> { let url = self.api_url(&["database", &id.to_string(), "metadata"]); self.send_request(self.inner.get(url)).await } fn api_url(&self, endpoint: &[&str]) -> Url { let mut url = self.url.clone(); url.path_segments_mut() .expect("url validated on construction") .extend(endpoint); url } async fn send_request<T>(&self, mut req: reqwest::RequestBuilder) -> Result<T, reqwest::Error> where T: DeserializeOwned, { req = req.timeout(Duration::from_secs(5)); if let Some(session_id) = &self.session_id { req = req.header("X-Metabase-Session", session_id); } let res = req.send().await?.error_for_status()?; res.json().await } } /// A Metabase error. #[derive(Debug)] pub enum Error { /// The provided URL was invalid. InvalidUrl(String), /// The underlying transport mechanism returned na error. Transport(reqwest::Error), } impl From<reqwest::Error> for Error { fn from(e: reqwest::Error) -> Error { Error::Transport(e) } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Error::InvalidUrl(_) => None, Error::Transport(e) => Some(e), } } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::InvalidUrl(msg) => write!(f, "invalid url: {}", msg), Error::Transport(e) => write!(f, "transport: {}", e), } } } #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] struct ListWrapper<T> { data: Vec<T>, } /// The response to [`Client::session_properties`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] #[serde(rename_all = "kebab-case")] pub struct SessionPropertiesResponse { pub setup_token: Option<String>, } /// The request for [`Client::setup`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupRequest { pub allow_tracking: bool, pub database: SetupDatabase, pub token: String, pub prefs: SetupPrefs, pub user: SetupUser, } /// A database to create as part of a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupDatabase { pub engine: String, pub name: String, pub details: SetupDatabaseDetails, } /// Details for a [`SetupDatabase`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupDatabaseDetails { pub host: String, pub port: usize, pub dbname: String, pub user: String, } /// Preferences for a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupPrefs { pub site_name: String, } /// A user to create as part of a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupUser { pub email: String, pub first_name: String, pub last_name: String, pub password: String, pub site_name: String, } /// The request for [`Client::login`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct LoginRequest { pub username: String, pub password: String, } /// The response to [`Client::login`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct LoginResponse { pub id: String, } /// A database returned by [`Client::databases`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct
{ pub name: String, pub id: usize, } /// The response to [`Client::database_metadata`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct DatabaseMetadata { pub tables: Vec<Table>, } /// A table that is part of [`DatabaseMetadata`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct Table { pub name: String, pub schema: String, pub fields: Vec<TableField>, } /// A field of a [`Table`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct TableField { pub name: String, pub database_type: String, pub base_type: String, pub special_type: Option<String>, }
Database
identifier_name
lib.rs
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. // BEGIN LINT CONFIG // DO NOT EDIT. Automatically generated by bin/gen-lints. // Have complaints about the noise? See the note in misc/python/materialize/cli/gen-lints.py first. #![allow(clippy::style)] #![allow(clippy::complexity)] #![allow(clippy::large_enum_variant)] #![allow(clippy::mutable_key_type)] #![allow(clippy::stable_sort_primitive)] #![allow(clippy::map_entry)] #![allow(clippy::box_default)] #![warn(clippy::bool_comparison)] #![warn(clippy::clone_on_ref_ptr)] #![warn(clippy::no_effect)] #![warn(clippy::unnecessary_unwrap)] #![warn(clippy::dbg_macro)] #![warn(clippy::todo)] #![warn(clippy::wildcard_dependencies)] #![warn(clippy::zero_prefixed_literal)] #![warn(clippy::borrowed_box)] #![warn(clippy::deref_addrof)] #![warn(clippy::double_must_use)] #![warn(clippy::double_parens)] #![warn(clippy::extra_unused_lifetimes)] #![warn(clippy::needless_borrow)] #![warn(clippy::needless_question_mark)] #![warn(clippy::needless_return)] #![warn(clippy::redundant_pattern)] #![warn(clippy::redundant_slicing)] #![warn(clippy::redundant_static_lifetimes)] #![warn(clippy::single_component_path_imports)] #![warn(clippy::unnecessary_cast)] #![warn(clippy::useless_asref)] #![warn(clippy::useless_conversion)] #![warn(clippy::builtin_type_shadow)] #![warn(clippy::duplicate_underscore_argument)] #![warn(clippy::double_neg)] #![warn(clippy::unnecessary_mut_passed)] #![warn(clippy::wildcard_in_or_patterns)] #![warn(clippy::crosspointer_transmute)] #![warn(clippy::excessive_precision)] #![warn(clippy::overflow_check_conditional)] #![warn(clippy::as_conversions)] #![warn(clippy::match_overlapping_arm)] #![warn(clippy::zero_divided_by_zero)] #![warn(clippy::must_use_unit)] #![warn(clippy::suspicious_assignment_formatting)] #![warn(clippy::suspicious_else_formatting)] #![warn(clippy::suspicious_unary_op_formatting)] #![warn(clippy::mut_mutex_lock)] #![warn(clippy::print_literal)] #![warn(clippy::same_item_push)] #![warn(clippy::useless_format)] #![warn(clippy::write_literal)] #![warn(clippy::redundant_closure)] #![warn(clippy::redundant_closure_call)] #![warn(clippy::unnecessary_lazy_evaluations)] #![warn(clippy::partialeq_ne_impl)] #![warn(clippy::redundant_field_names)] #![warn(clippy::transmutes_expressible_as_ptr_casts)] #![warn(clippy::unused_async)] #![warn(clippy::disallowed_methods)] #![warn(clippy::disallowed_macros)] #![warn(clippy::disallowed_types)] #![warn(clippy::from_over_into)] // END LINT CONFIG //! An API client for [Metabase]. //! //! Only the features presently required are implemented. Documentation is //! sparse to avoid duplicating information in the upstream API documentation. //! See: //! //! * [Using the REST API](https://github.com/metabase/metabase/wiki/Using-the-REST-API) //! * [Auto-generated API documentation](https://github.com/metabase/metabase/blob/master/docs/api-documentation.md) //! //! [Metabase]: https://metabase.com #![warn(missing_debug_implementations)] use std::fmt; use std::time::Duration; use reqwest::{IntoUrl, Url}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; /// A Metabase API client. #[derive(Debug)] pub struct Client { inner: reqwest::Client, url: Url, session_id: Option<String>, } impl Client { /// Constructs a new `Client` that will target a Metabase instance at `url`. /// /// `url` must not contain a path nor be a [cannot-be-a-base] URL. /// /// [cannot-be-a-base]: https://url.spec.whatwg.org/#url-cannot-be-a-base-url-flag pub fn new<U>(url: U) -> Result<Self, Error> where U: IntoUrl, { let mut url = url.into_url()?; if url.path() != "/" { return Err(Error::InvalidUrl("base URL cannot have path".into())); } assert!(!url.cannot_be_a_base()); url.path_segments_mut() .expect("cannot-be-a-base checked to be false") .push("api"); Ok(Client { inner: reqwest::Client::new(), url, session_id: None, }) } /// Sets the session ID to include in future requests made by this client. pub fn set_session_id(&mut self, session_id: String) { self.session_id = Some(session_id); } /// Fetches public, global properties. /// /// The underlying API call is `GET /api/session/properties`. pub async fn session_properties(&self) -> Result<SessionPropertiesResponse, reqwest::Error> { let url = self.api_url(&["session", "properties"]); self.send_request(self.inner.get(url)).await } /// Requests a session ID for the username and password named in `request`. /// /// Note that usernames are typically email addresses. To authenticate /// future requests with the returned session ID, call `set_session_id`. /// /// The underlying API call is `POST /api/session`. pub async fn login(&self, request: &LoginRequest) -> Result<LoginResponse, reqwest::Error> { let url = self.api_url(&["session"]); self.send_request(self.inner.post(url).json(request)).await } /// Creates a user and database connection if the Metabase instance has not /// yet been set up. /// /// The request must include the `setup_token` from a /// `SessionPropertiesResponse`. If the setup token returned by /// [`Client::session_properties`] is `None`, the cluster is already set up, /// and this request will fail. /// /// The underlying API call is `POST /api/setup`. pub async fn setup(&self, request: &SetupRequest) -> Result<LoginResponse, reqwest::Error> { let url = self.api_url(&["setup"]); self.send_request(self.inner.post(url).json(request)).await } /// Fetches the list of databases known to Metabase. /// /// The underlying API call is `GET /database`. pub async fn databases(&self) -> Result<Vec<Database>, reqwest::Error> { let url = self.api_url(&["database"]); let res: ListWrapper<_> = self.send_request(self.inner.get(url)).await?; Ok(res.data) } /// Fetches metadata about a particular database. /// /// The underlying API call is `GET /database/:id/metadata`. pub async fn database_metadata(&self, id: usize) -> Result<DatabaseMetadata, reqwest::Error> { let url = self.api_url(&["database", &id.to_string(), "metadata"]); self.send_request(self.inner.get(url)).await } fn api_url(&self, endpoint: &[&str]) -> Url { let mut url = self.url.clone(); url.path_segments_mut() .expect("url validated on construction") .extend(endpoint); url } async fn send_request<T>(&self, mut req: reqwest::RequestBuilder) -> Result<T, reqwest::Error> where T: DeserializeOwned, { req = req.timeout(Duration::from_secs(5)); if let Some(session_id) = &self.session_id { req = req.header("X-Metabase-Session", session_id); } let res = req.send().await?.error_for_status()?; res.json().await } } /// A Metabase error. #[derive(Debug)] pub enum Error { /// The provided URL was invalid. InvalidUrl(String), /// The underlying transport mechanism returned na error. Transport(reqwest::Error), } impl From<reqwest::Error> for Error { fn from(e: reqwest::Error) -> Error { Error::Transport(e) } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Error::InvalidUrl(_) => None, Error::Transport(e) => Some(e), } } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::InvalidUrl(msg) => write!(f, "invalid url: {}", msg), Error::Transport(e) => write!(f, "transport: {}", e), } } } #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] struct ListWrapper<T> { data: Vec<T>, } /// The response to [`Client::session_properties`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] #[serde(rename_all = "kebab-case")] pub struct SessionPropertiesResponse { pub setup_token: Option<String>, } /// The request for [`Client::setup`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupRequest { pub allow_tracking: bool, pub database: SetupDatabase, pub token: String, pub prefs: SetupPrefs, pub user: SetupUser, } /// A database to create as part of a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupDatabase { pub engine: String, pub name: String, pub details: SetupDatabaseDetails, } /// Details for a [`SetupDatabase`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupDatabaseDetails { pub host: String, pub port: usize, pub dbname: String, pub user: String, } /// Preferences for a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupPrefs { pub site_name: String, } /// A user to create as part of a [`SetupRequest`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct SetupUser { pub email: String, pub first_name: String, pub last_name: String, pub password: String, pub site_name: String, } /// The request for [`Client::login`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct LoginRequest { pub username: String, pub password: String, } /// The response to [`Client::login`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct LoginResponse { pub id: String, } /// A database returned by [`Client::databases`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct Database { pub name: String, pub id: usize, } /// The response to [`Client::database_metadata`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct DatabaseMetadata { pub tables: Vec<Table>, } /// A table that is part of [`DatabaseMetadata`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct Table {
/// A field of a [`Table`]. #[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct TableField { pub name: String, pub database_type: String, pub base_type: String, pub special_type: Option<String>, }
pub name: String, pub schema: String, pub fields: Vec<TableField>, }
random_line_split
transaction.rs
use std::collections::HashMap; use std::hash::Hash; use std::ops::Range; use rusqlite::Connection; use ::serde::Serialize; use chrono::{Datelike, Duration, Local, NaiveDate, NaiveDateTime}; use crate::db::account_dao::AccountDao; use crate::db::transaction_dao::{Split, Transaction, TransactionDao}; use crate::db::account_dao::Account; use itertools::Itertools; fn parse_nd(s: &str) -> NaiveDate { let with_day = |s: &str| format!("{}-01", s); NaiveDate::parse_from_str(&with_day(&s.replace(" ", "")), "%Y-%m-%d").unwrap() } pub fn expense_splits( conn: &Connection, expense_name: String, month: String ) -> Vec<TranSplit> { let since_nd = parse_nd(&month); let until_nd = ( if (since_nd.month() == 12) { NaiveDate::from_ymd(since_nd.year() + 1, 1, 1) } else { NaiveDate::from_ymd(since_nd.year(), since_nd.month() + 1, 1) } ).pred(); let tran_dao = TransactionDao { conn: &conn }; let trans = tran_dao.list(&since_nd, &until_nd); let account_dao = AccountDao { conn: &conn }; let account = account_dao.list().into_iter().find(|acct| { acct.qualified_name() == expense_name }).unwrap(); // :(:( let mut splits = Vec::new(); for t in trans { for s in t.splits { if (s.account_guid == account.guid) { splits.push( TranSplit { account_guid: s.account_guid, transaction_guid: s.transaction_guid, value_num: s.value_num, memo: s.memo, date: t.post_date, description: t.description.clone(), }); } } } splits } #[derive(Debug)] #[derive(Serialize)] pub struct TranSplit { pub account_guid: String, pub transaction_guid: String, pub value_num: i64, pub memo: String, pub date: NaiveDateTime, pub description: String } pub fn list( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> Vec<Transaction>
#[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotals { summaries: Vec<MonthTotal>, totalSpent: i64, pub acctSums: Vec<MonthlyExpenseGroup> } #[derive(Debug)] #[derive(Serialize)] pub struct AccountSummary { name: String, monthlyTotals: Vec<MonthlyTotal>, } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotal { month: NaiveDate, total: i64 } pub fn monthly_totals<'a>( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> MonthlyTotals { let trans_dao = TransactionDao { conn: &conn }; let account_dao = AccountDao { conn: &conn }; let (since_nd, until_nd) = since_until(since, until, months, year); let trans = trans_dao.list(&since_nd, &until_nd); let mut accounts = account_dao.list(); let mut unfilled_months = expenses_by_month(&trans, &accounts); let mut months = fill_empty_months(&since_nd, &until_nd, &unfilled_months); months.sort_by(|a, b| b.total.cmp(&a.total)); let all_months = months.iter().flat_map(|m| &m.monthlyTotals); let grouped = group_by(all_months.collect::<Vec<_>>(), |m| m.month.clone()); let mut summed = grouped.into_iter().map(|(i, month_summary)| { MonthTotal { month: i, total: month_summary.into_iter().map(|m| m.total).sum() } }).collect::<Vec<_>>(); summed.sort_by(|a, b| parse_nd(&b.month).cmp(&parse_nd(&a.month))); let total_spent = summed.iter().map(|m| m.total).sum(); //let mut acct_sums = months.clone(); MonthlyTotals { summaries: summed, totalSpent: total_spent, acctSums: months.clone() } } // TODO need to understand the type option for 'function overloading' because // the following is not good fn since_until( since_p: Option<String>, until_p: Option<String>, mut months_p: Option<String>, // :(:( year_p: Option<String> ) -> (NaiveDate, NaiveDate) { let until = year_p.as_ref().map(|y| NaiveDate::from_ymd(y.parse::<i32>().unwrap(), 12, 31)) .unwrap_or({ until_p.map(|s| parse_nd(&s)).unwrap_or({ let now = Local::now().naive_local().date(); if (now.month() == 12) { NaiveDate::from_ymd(now.year(), 12, 31) } else { NaiveDate::from_ymd(now.year(), now.month() + 1, 1).pred() } }) }); months_p = year_p.map(|y| "12".to_string()).or(months_p); let since = since_p.map(|s| parse_nd(&s)).unwrap_or({ let months_since = months_p.map(|m| m.parse().unwrap()).unwrap_or(6); // yes I've (sort of) done the following twice, and it's crappy both times let mut curr_year = until.year(); let mut curr_month = until.month(); (0..months_since - 1).for_each(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; }); NaiveDate::from_ymd(curr_year, curr_month, 1) }); (since, until) } fn months_between(since: &NaiveDate, until: &NaiveDate) -> u32 { let mut curr_year = until.year(); let mut curr_month = until.month(); let mut ctr = 0; while curr_year > since.year() || curr_year == since.year() && curr_month > since.month() { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; ctr += 1; } ctr } fn fill_empty_months( since: &NaiveDate, until: &NaiveDate, expenses: &Vec<MonthlyExpenseGroup> ) -> Vec<MonthlyExpenseGroup> { // don't have moment like we do in node:( let mut curr_year = until.year(); let mut curr_month = until.month(); let num_months = months_between(since, until); let mut desired_months = (0..num_months).map(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; NaiveDate::from_ymd(curr_year, curr_month, 1) }).collect::<Vec<_>>(); desired_months.insert(0, NaiveDate::from_ymd(until.year(), until.month(), 1)); let mut cloned_expenses = expenses.clone(); (0..cloned_expenses.len()).for_each(|i| { let mut exp = &mut cloned_expenses[i]; (0..num_months+1).for_each(|_j| { let j = _j as usize; let month_str = format_nd(desired_months[j]); let exp_month = exp.monthlyTotals.get(j).map(|mt| mt.clone()); if (exp_month.is_none() || month_str != exp_month.unwrap().month) { exp.monthlyTotals.insert(j, MonthTotal { month: month_str, total: 0 }); } }); }); cloned_expenses } pub struct MonthlyExpense { name: String, date: NaiveDate, amount: i64, memo: String } struct ExpenseSplit { name: String, date: NaiveDateTime, amount: i64, memo: String } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] struct MonthTotal { month: String, total: i64 } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyExpenseGroup { pub name: String, pub total: i64, monthlyTotals: Vec<MonthTotal>, } fn expenses_by_month( transactions: &Vec<Transaction>, accounts: &Vec<Account> ) -> Vec<MonthlyExpenseGroup> { let mut accounts_map = HashMap::new(); for a in accounts { accounts_map.insert(&a.guid, a); } // No need to fold/reduce here like we do in the node version. // That was probably just a mistake there. let mut splits = transactions.iter().flat_map(|tran| { let expenses = tran.splits.iter().filter(|s| accounts_map[&s.account_guid].is_expense()).collect::<Vec<&Split>>(); expenses.iter().map(|e| { ExpenseSplit { name: accounts_map[&e.account_guid].qualified_name(), date: tran.post_date, amount: e.value_num, memo: e.memo.clone() } }).collect::<Vec<_>>() }).collect::<Vec<_>>(); splits.sort_by(|a,b| a.name.cmp(&b.name)); let expense_groups = group_by(splits, |s| s.name.to_string()); let expense_groups_by_month = expense_groups.into_iter().map(|(name, exp_group)| { let mut start = HashMap::<String, Vec<ExpenseSplit>>::new(); let mut exp_splits = group_by(exp_group.into_iter().collect::<Vec<ExpenseSplit>>(), |item| { format_ndt(item.date) }).into_iter().collect::<Vec<_>>(); exp_splits.sort_by(|a,b| b.0.cmp(&a.0)); let monthly_totals = exp_splits.into_iter().map(|(month, splits)| { MonthTotal { month: month, total: splits.iter().map(|s| s.amount).sum() } }).collect::<Vec<_>>(); MonthlyExpenseGroup { name: name.to_string(), total: monthly_totals.iter().map(|mt| mt.total).sum(), monthlyTotals: monthly_totals } }); expense_groups_by_month.collect::<Vec<_>>() } fn format_ndt(d: NaiveDateTime) -> String { format_nd(d.date()) } fn format_nd(d: NaiveDate) -> String { let year = d.year(); let month = d.month(); format!("{}-{:02}", year, month) } pub fn group_by<T, K : Eq + Hash>(items: Vec<T>, to_key: fn(&T) -> K) -> HashMap<K, Vec<T>> { let mut start: HashMap<K, Vec<T>> = HashMap::new(); items.into_iter().for_each(|item| { let key = to_key(&item); let mut result = start.entry(key).or_insert(Vec::new()); result.push(item); }); start } #[cfg(test)] mod tests { use chrono::{Datelike, Local, NaiveDate}; #[test] fn since_until_with_year() { let year_param = Some("2017".to_string()); let (since, until) = super::since_until(None, None, None, year_param); assert_eq!(NaiveDate::from_ymd(2017, 1, 1), since); assert_eq!(NaiveDate::from_ymd(2017, 12, 31), until); } #[test] fn since_until_with_month() { let month_param = Some("1".to_string()); let (since, until) = super::since_until(None, None, month_param, None); let now = Local::now().naive_local().date(); let tup = |d:NaiveDate| (d.year(), d.month()); assert_eq!((tup(now), 0), (tup(since), 0)); assert_eq!(tup(now), tup(since)); // todo verify end day:( } #[test] fn since_until_december() { let since_param = Some("2017-12".to_string()); let (since, until) = super::since_until(since_param, None, None, None); assert_eq!(NaiveDate::from_ymd(2017, 12, 1), since); assert_eq!(NaiveDate::from_ymd(2019, 12, 31), until); } }
{ let (since_nd, until_nd) = since_until(since, until, months, year); let dao = TransactionDao { conn: &conn }; dao.list(&since_nd, &until_nd) }
identifier_body
transaction.rs
use std::collections::HashMap; use std::hash::Hash; use std::ops::Range; use rusqlite::Connection; use ::serde::Serialize; use chrono::{Datelike, Duration, Local, NaiveDate, NaiveDateTime}; use crate::db::account_dao::AccountDao; use crate::db::transaction_dao::{Split, Transaction, TransactionDao}; use crate::db::account_dao::Account; use itertools::Itertools; fn parse_nd(s: &str) -> NaiveDate { let with_day = |s: &str| format!("{}-01", s); NaiveDate::parse_from_str(&with_day(&s.replace(" ", "")), "%Y-%m-%d").unwrap() } pub fn expense_splits( conn: &Connection, expense_name: String, month: String ) -> Vec<TranSplit> { let since_nd = parse_nd(&month); let until_nd = ( if (since_nd.month() == 12) { NaiveDate::from_ymd(since_nd.year() + 1, 1, 1) } else { NaiveDate::from_ymd(since_nd.year(), since_nd.month() + 1, 1) } ).pred(); let tran_dao = TransactionDao { conn: &conn }; let trans = tran_dao.list(&since_nd, &until_nd); let account_dao = AccountDao { conn: &conn }; let account = account_dao.list().into_iter().find(|acct| { acct.qualified_name() == expense_name }).unwrap(); // :(:( let mut splits = Vec::new(); for t in trans { for s in t.splits { if (s.account_guid == account.guid) { splits.push( TranSplit { account_guid: s.account_guid, transaction_guid: s.transaction_guid, value_num: s.value_num, memo: s.memo, date: t.post_date, description: t.description.clone(), }); } } } splits } #[derive(Debug)] #[derive(Serialize)] pub struct TranSplit { pub account_guid: String, pub transaction_guid: String, pub value_num: i64, pub memo: String, pub date: NaiveDateTime, pub description: String } pub fn list( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> Vec<Transaction> { let (since_nd, until_nd) = since_until(since, until, months, year); let dao = TransactionDao { conn: &conn }; dao.list(&since_nd, &until_nd) } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotals { summaries: Vec<MonthTotal>, totalSpent: i64, pub acctSums: Vec<MonthlyExpenseGroup> } #[derive(Debug)] #[derive(Serialize)] pub struct AccountSummary { name: String, monthlyTotals: Vec<MonthlyTotal>, } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotal { month: NaiveDate, total: i64 } pub fn monthly_totals<'a>( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> MonthlyTotals { let trans_dao = TransactionDao { conn: &conn }; let account_dao = AccountDao { conn: &conn }; let (since_nd, until_nd) = since_until(since, until, months, year); let trans = trans_dao.list(&since_nd, &until_nd); let mut accounts = account_dao.list(); let mut unfilled_months = expenses_by_month(&trans, &accounts); let mut months = fill_empty_months(&since_nd, &until_nd, &unfilled_months); months.sort_by(|a, b| b.total.cmp(&a.total)); let all_months = months.iter().flat_map(|m| &m.monthlyTotals); let grouped = group_by(all_months.collect::<Vec<_>>(), |m| m.month.clone()); let mut summed = grouped.into_iter().map(|(i, month_summary)| { MonthTotal { month: i, total: month_summary.into_iter().map(|m| m.total).sum() } }).collect::<Vec<_>>(); summed.sort_by(|a, b| parse_nd(&b.month).cmp(&parse_nd(&a.month))); let total_spent = summed.iter().map(|m| m.total).sum(); //let mut acct_sums = months.clone(); MonthlyTotals { summaries: summed, totalSpent: total_spent, acctSums: months.clone() } } // TODO need to understand the type option for 'function overloading' because // the following is not good fn since_until( since_p: Option<String>, until_p: Option<String>, mut months_p: Option<String>, // :(:( year_p: Option<String> ) -> (NaiveDate, NaiveDate) { let until = year_p.as_ref().map(|y| NaiveDate::from_ymd(y.parse::<i32>().unwrap(), 12, 31)) .unwrap_or({ until_p.map(|s| parse_nd(&s)).unwrap_or({ let now = Local::now().naive_local().date(); if (now.month() == 12) { NaiveDate::from_ymd(now.year(), 12, 31) } else { NaiveDate::from_ymd(now.year(), now.month() + 1, 1).pred() } }) }); months_p = year_p.map(|y| "12".to_string()).or(months_p); let since = since_p.map(|s| parse_nd(&s)).unwrap_or({ let months_since = months_p.map(|m| m.parse().unwrap()).unwrap_or(6); // yes I've (sort of) done the following twice, and it's crappy both times let mut curr_year = until.year(); let mut curr_month = until.month(); (0..months_since - 1).for_each(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; }); NaiveDate::from_ymd(curr_year, curr_month, 1) }); (since, until) } fn months_between(since: &NaiveDate, until: &NaiveDate) -> u32 { let mut curr_year = until.year(); let mut curr_month = until.month(); let mut ctr = 0; while curr_year > since.year() || curr_year == since.year() && curr_month > since.month() { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; ctr += 1; } ctr } fn fill_empty_months( since: &NaiveDate, until: &NaiveDate, expenses: &Vec<MonthlyExpenseGroup> ) -> Vec<MonthlyExpenseGroup> { // don't have moment like we do in node:( let mut curr_year = until.year(); let mut curr_month = until.month(); let num_months = months_between(since, until); let mut desired_months = (0..num_months).map(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else
; NaiveDate::from_ymd(curr_year, curr_month, 1) }).collect::<Vec<_>>(); desired_months.insert(0, NaiveDate::from_ymd(until.year(), until.month(), 1)); let mut cloned_expenses = expenses.clone(); (0..cloned_expenses.len()).for_each(|i| { let mut exp = &mut cloned_expenses[i]; (0..num_months+1).for_each(|_j| { let j = _j as usize; let month_str = format_nd(desired_months[j]); let exp_month = exp.monthlyTotals.get(j).map(|mt| mt.clone()); if (exp_month.is_none() || month_str != exp_month.unwrap().month) { exp.monthlyTotals.insert(j, MonthTotal { month: month_str, total: 0 }); } }); }); cloned_expenses } pub struct MonthlyExpense { name: String, date: NaiveDate, amount: i64, memo: String } struct ExpenseSplit { name: String, date: NaiveDateTime, amount: i64, memo: String } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] struct MonthTotal { month: String, total: i64 } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyExpenseGroup { pub name: String, pub total: i64, monthlyTotals: Vec<MonthTotal>, } fn expenses_by_month( transactions: &Vec<Transaction>, accounts: &Vec<Account> ) -> Vec<MonthlyExpenseGroup> { let mut accounts_map = HashMap::new(); for a in accounts { accounts_map.insert(&a.guid, a); } // No need to fold/reduce here like we do in the node version. // That was probably just a mistake there. let mut splits = transactions.iter().flat_map(|tran| { let expenses = tran.splits.iter().filter(|s| accounts_map[&s.account_guid].is_expense()).collect::<Vec<&Split>>(); expenses.iter().map(|e| { ExpenseSplit { name: accounts_map[&e.account_guid].qualified_name(), date: tran.post_date, amount: e.value_num, memo: e.memo.clone() } }).collect::<Vec<_>>() }).collect::<Vec<_>>(); splits.sort_by(|a,b| a.name.cmp(&b.name)); let expense_groups = group_by(splits, |s| s.name.to_string()); let expense_groups_by_month = expense_groups.into_iter().map(|(name, exp_group)| { let mut start = HashMap::<String, Vec<ExpenseSplit>>::new(); let mut exp_splits = group_by(exp_group.into_iter().collect::<Vec<ExpenseSplit>>(), |item| { format_ndt(item.date) }).into_iter().collect::<Vec<_>>(); exp_splits.sort_by(|a,b| b.0.cmp(&a.0)); let monthly_totals = exp_splits.into_iter().map(|(month, splits)| { MonthTotal { month: month, total: splits.iter().map(|s| s.amount).sum() } }).collect::<Vec<_>>(); MonthlyExpenseGroup { name: name.to_string(), total: monthly_totals.iter().map(|mt| mt.total).sum(), monthlyTotals: monthly_totals } }); expense_groups_by_month.collect::<Vec<_>>() } fn format_ndt(d: NaiveDateTime) -> String { format_nd(d.date()) } fn format_nd(d: NaiveDate) -> String { let year = d.year(); let month = d.month(); format!("{}-{:02}", year, month) } pub fn group_by<T, K : Eq + Hash>(items: Vec<T>, to_key: fn(&T) -> K) -> HashMap<K, Vec<T>> { let mut start: HashMap<K, Vec<T>> = HashMap::new(); items.into_iter().for_each(|item| { let key = to_key(&item); let mut result = start.entry(key).or_insert(Vec::new()); result.push(item); }); start } #[cfg(test)] mod tests { use chrono::{Datelike, Local, NaiveDate}; #[test] fn since_until_with_year() { let year_param = Some("2017".to_string()); let (since, until) = super::since_until(None, None, None, year_param); assert_eq!(NaiveDate::from_ymd(2017, 1, 1), since); assert_eq!(NaiveDate::from_ymd(2017, 12, 31), until); } #[test] fn since_until_with_month() { let month_param = Some("1".to_string()); let (since, until) = super::since_until(None, None, month_param, None); let now = Local::now().naive_local().date(); let tup = |d:NaiveDate| (d.year(), d.month()); assert_eq!((tup(now), 0), (tup(since), 0)); assert_eq!(tup(now), tup(since)); // todo verify end day:( } #[test] fn since_until_december() { let since_param = Some("2017-12".to_string()); let (since, until) = super::since_until(since_param, None, None, None); assert_eq!(NaiveDate::from_ymd(2017, 12, 1), since); assert_eq!(NaiveDate::from_ymd(2019, 12, 31), until); } }
{ curr_month -= 1; }
conditional_block
transaction.rs
use std::collections::HashMap; use std::hash::Hash; use std::ops::Range; use rusqlite::Connection; use ::serde::Serialize; use chrono::{Datelike, Duration, Local, NaiveDate, NaiveDateTime}; use crate::db::account_dao::AccountDao; use crate::db::transaction_dao::{Split, Transaction, TransactionDao}; use crate::db::account_dao::Account; use itertools::Itertools; fn parse_nd(s: &str) -> NaiveDate { let with_day = |s: &str| format!("{}-01", s); NaiveDate::parse_from_str(&with_day(&s.replace(" ", "")), "%Y-%m-%d").unwrap() } pub fn expense_splits( conn: &Connection, expense_name: String, month: String ) -> Vec<TranSplit> { let since_nd = parse_nd(&month); let until_nd = ( if (since_nd.month() == 12) { NaiveDate::from_ymd(since_nd.year() + 1, 1, 1) } else { NaiveDate::from_ymd(since_nd.year(), since_nd.month() + 1, 1) } ).pred(); let tran_dao = TransactionDao { conn: &conn }; let trans = tran_dao.list(&since_nd, &until_nd); let account_dao = AccountDao { conn: &conn }; let account = account_dao.list().into_iter().find(|acct| { acct.qualified_name() == expense_name }).unwrap(); // :(:( let mut splits = Vec::new(); for t in trans { for s in t.splits { if (s.account_guid == account.guid) { splits.push( TranSplit { account_guid: s.account_guid, transaction_guid: s.transaction_guid, value_num: s.value_num, memo: s.memo, date: t.post_date, description: t.description.clone(), }); } } } splits } #[derive(Debug)] #[derive(Serialize)] pub struct TranSplit { pub account_guid: String, pub transaction_guid: String, pub value_num: i64, pub memo: String, pub date: NaiveDateTime, pub description: String } pub fn list( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> Vec<Transaction> { let (since_nd, until_nd) = since_until(since, until, months, year); let dao = TransactionDao { conn: &conn }; dao.list(&since_nd, &until_nd) } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotals { summaries: Vec<MonthTotal>, totalSpent: i64, pub acctSums: Vec<MonthlyExpenseGroup> } #[derive(Debug)] #[derive(Serialize)] pub struct AccountSummary { name: String, monthlyTotals: Vec<MonthlyTotal>, } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotal { month: NaiveDate, total: i64 } pub fn monthly_totals<'a>( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> MonthlyTotals { let trans_dao = TransactionDao { conn: &conn }; let account_dao = AccountDao { conn: &conn }; let (since_nd, until_nd) = since_until(since, until, months, year); let trans = trans_dao.list(&since_nd, &until_nd); let mut accounts = account_dao.list(); let mut unfilled_months = expenses_by_month(&trans, &accounts); let mut months = fill_empty_months(&since_nd, &until_nd, &unfilled_months); months.sort_by(|a, b| b.total.cmp(&a.total)); let all_months = months.iter().flat_map(|m| &m.monthlyTotals); let grouped = group_by(all_months.collect::<Vec<_>>(), |m| m.month.clone()); let mut summed = grouped.into_iter().map(|(i, month_summary)| { MonthTotal { month: i, total: month_summary.into_iter().map(|m| m.total).sum() } }).collect::<Vec<_>>(); summed.sort_by(|a, b| parse_nd(&b.month).cmp(&parse_nd(&a.month))); let total_spent = summed.iter().map(|m| m.total).sum(); //let mut acct_sums = months.clone(); MonthlyTotals { summaries: summed, totalSpent: total_spent, acctSums: months.clone() } } // TODO need to understand the type option for 'function overloading' because // the following is not good fn since_until( since_p: Option<String>, until_p: Option<String>, mut months_p: Option<String>, // :(:( year_p: Option<String> ) -> (NaiveDate, NaiveDate) { let until = year_p.as_ref().map(|y| NaiveDate::from_ymd(y.parse::<i32>().unwrap(), 12, 31)) .unwrap_or({ until_p.map(|s| parse_nd(&s)).unwrap_or({ let now = Local::now().naive_local().date(); if (now.month() == 12) { NaiveDate::from_ymd(now.year(), 12, 31) } else { NaiveDate::from_ymd(now.year(), now.month() + 1, 1).pred() } }) }); months_p = year_p.map(|y| "12".to_string()).or(months_p); let since = since_p.map(|s| parse_nd(&s)).unwrap_or({ let months_since = months_p.map(|m| m.parse().unwrap()).unwrap_or(6); // yes I've (sort of) done the following twice, and it's crappy both times let mut curr_year = until.year(); let mut curr_month = until.month(); (0..months_since - 1).for_each(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; }); NaiveDate::from_ymd(curr_year, curr_month, 1) }); (since, until) } fn months_between(since: &NaiveDate, until: &NaiveDate) -> u32 { let mut curr_year = until.year(); let mut curr_month = until.month(); let mut ctr = 0; while curr_year > since.year() || curr_year == since.year() && curr_month > since.month() { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; ctr += 1; } ctr } fn fill_empty_months( since: &NaiveDate, until: &NaiveDate, expenses: &Vec<MonthlyExpenseGroup> ) -> Vec<MonthlyExpenseGroup> { // don't have moment like we do in node:( let mut curr_year = until.year(); let mut curr_month = until.month(); let num_months = months_between(since, until); let mut desired_months = (0..num_months).map(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; NaiveDate::from_ymd(curr_year, curr_month, 1) }).collect::<Vec<_>>(); desired_months.insert(0, NaiveDate::from_ymd(until.year(), until.month(), 1)); let mut cloned_expenses = expenses.clone(); (0..cloned_expenses.len()).for_each(|i| { let mut exp = &mut cloned_expenses[i]; (0..num_months+1).for_each(|_j| { let j = _j as usize; let month_str = format_nd(desired_months[j]); let exp_month = exp.monthlyTotals.get(j).map(|mt| mt.clone()); if (exp_month.is_none() || month_str != exp_month.unwrap().month) { exp.monthlyTotals.insert(j, MonthTotal { month: month_str, total: 0 }); } }); }); cloned_expenses } pub struct MonthlyExpense { name: String, date: NaiveDate, amount: i64, memo: String } struct ExpenseSplit { name: String, date: NaiveDateTime, amount: i64, memo: String } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] struct MonthTotal { month: String, total: i64 } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] pub struct
{ pub name: String, pub total: i64, monthlyTotals: Vec<MonthTotal>, } fn expenses_by_month( transactions: &Vec<Transaction>, accounts: &Vec<Account> ) -> Vec<MonthlyExpenseGroup> { let mut accounts_map = HashMap::new(); for a in accounts { accounts_map.insert(&a.guid, a); } // No need to fold/reduce here like we do in the node version. // That was probably just a mistake there. let mut splits = transactions.iter().flat_map(|tran| { let expenses = tran.splits.iter().filter(|s| accounts_map[&s.account_guid].is_expense()).collect::<Vec<&Split>>(); expenses.iter().map(|e| { ExpenseSplit { name: accounts_map[&e.account_guid].qualified_name(), date: tran.post_date, amount: e.value_num, memo: e.memo.clone() } }).collect::<Vec<_>>() }).collect::<Vec<_>>(); splits.sort_by(|a,b| a.name.cmp(&b.name)); let expense_groups = group_by(splits, |s| s.name.to_string()); let expense_groups_by_month = expense_groups.into_iter().map(|(name, exp_group)| { let mut start = HashMap::<String, Vec<ExpenseSplit>>::new(); let mut exp_splits = group_by(exp_group.into_iter().collect::<Vec<ExpenseSplit>>(), |item| { format_ndt(item.date) }).into_iter().collect::<Vec<_>>(); exp_splits.sort_by(|a,b| b.0.cmp(&a.0)); let monthly_totals = exp_splits.into_iter().map(|(month, splits)| { MonthTotal { month: month, total: splits.iter().map(|s| s.amount).sum() } }).collect::<Vec<_>>(); MonthlyExpenseGroup { name: name.to_string(), total: monthly_totals.iter().map(|mt| mt.total).sum(), monthlyTotals: monthly_totals } }); expense_groups_by_month.collect::<Vec<_>>() } fn format_ndt(d: NaiveDateTime) -> String { format_nd(d.date()) } fn format_nd(d: NaiveDate) -> String { let year = d.year(); let month = d.month(); format!("{}-{:02}", year, month) } pub fn group_by<T, K : Eq + Hash>(items: Vec<T>, to_key: fn(&T) -> K) -> HashMap<K, Vec<T>> { let mut start: HashMap<K, Vec<T>> = HashMap::new(); items.into_iter().for_each(|item| { let key = to_key(&item); let mut result = start.entry(key).or_insert(Vec::new()); result.push(item); }); start } #[cfg(test)] mod tests { use chrono::{Datelike, Local, NaiveDate}; #[test] fn since_until_with_year() { let year_param = Some("2017".to_string()); let (since, until) = super::since_until(None, None, None, year_param); assert_eq!(NaiveDate::from_ymd(2017, 1, 1), since); assert_eq!(NaiveDate::from_ymd(2017, 12, 31), until); } #[test] fn since_until_with_month() { let month_param = Some("1".to_string()); let (since, until) = super::since_until(None, None, month_param, None); let now = Local::now().naive_local().date(); let tup = |d:NaiveDate| (d.year(), d.month()); assert_eq!((tup(now), 0), (tup(since), 0)); assert_eq!(tup(now), tup(since)); // todo verify end day:( } #[test] fn since_until_december() { let since_param = Some("2017-12".to_string()); let (since, until) = super::since_until(since_param, None, None, None); assert_eq!(NaiveDate::from_ymd(2017, 12, 1), since); assert_eq!(NaiveDate::from_ymd(2019, 12, 31), until); } }
MonthlyExpenseGroup
identifier_name
transaction.rs
use std::collections::HashMap; use std::hash::Hash; use std::ops::Range; use rusqlite::Connection; use ::serde::Serialize; use chrono::{Datelike, Duration, Local, NaiveDate, NaiveDateTime}; use crate::db::account_dao::AccountDao; use crate::db::transaction_dao::{Split, Transaction, TransactionDao}; use crate::db::account_dao::Account; use itertools::Itertools; fn parse_nd(s: &str) -> NaiveDate { let with_day = |s: &str| format!("{}-01", s); NaiveDate::parse_from_str(&with_day(&s.replace(" ", "")), "%Y-%m-%d").unwrap() } pub fn expense_splits( conn: &Connection, expense_name: String, month: String ) -> Vec<TranSplit> { let since_nd = parse_nd(&month); let until_nd = ( if (since_nd.month() == 12) { NaiveDate::from_ymd(since_nd.year() + 1, 1, 1) } else { NaiveDate::from_ymd(since_nd.year(), since_nd.month() + 1, 1) } ).pred(); let tran_dao = TransactionDao { conn: &conn }; let trans = tran_dao.list(&since_nd, &until_nd); let account_dao = AccountDao { conn: &conn }; let account = account_dao.list().into_iter().find(|acct| { acct.qualified_name() == expense_name }).unwrap(); // :(:( let mut splits = Vec::new(); for t in trans { for s in t.splits { if (s.account_guid == account.guid) { splits.push( TranSplit { account_guid: s.account_guid, transaction_guid: s.transaction_guid, value_num: s.value_num, memo: s.memo, date: t.post_date, description: t.description.clone(), }); } } } splits } #[derive(Debug)] #[derive(Serialize)] pub struct TranSplit { pub account_guid: String, pub transaction_guid: String, pub value_num: i64, pub memo: String, pub date: NaiveDateTime, pub description: String } pub fn list( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> Vec<Transaction> { let (since_nd, until_nd) = since_until(since, until, months, year); let dao = TransactionDao { conn: &conn }; dao.list(&since_nd, &until_nd) } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotals { summaries: Vec<MonthTotal>, totalSpent: i64, pub acctSums: Vec<MonthlyExpenseGroup> } #[derive(Debug)] #[derive(Serialize)] pub struct AccountSummary { name: String, monthlyTotals: Vec<MonthlyTotal>, } #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyTotal { month: NaiveDate, total: i64 } pub fn monthly_totals<'a>( conn: &Connection, since: Option<String>, until: Option<String>, months: Option<String>, year: Option<String> ) -> MonthlyTotals { let trans_dao = TransactionDao { conn: &conn }; let account_dao = AccountDao { conn: &conn }; let (since_nd, until_nd) = since_until(since, until, months, year); let trans = trans_dao.list(&since_nd, &until_nd); let mut accounts = account_dao.list(); let mut unfilled_months = expenses_by_month(&trans, &accounts); let mut months = fill_empty_months(&since_nd, &until_nd, &unfilled_months); months.sort_by(|a, b| b.total.cmp(&a.total)); let all_months = months.iter().flat_map(|m| &m.monthlyTotals); let grouped = group_by(all_months.collect::<Vec<_>>(), |m| m.month.clone()); let mut summed = grouped.into_iter().map(|(i, month_summary)| { MonthTotal {
}).collect::<Vec<_>>(); summed.sort_by(|a, b| parse_nd(&b.month).cmp(&parse_nd(&a.month))); let total_spent = summed.iter().map(|m| m.total).sum(); //let mut acct_sums = months.clone(); MonthlyTotals { summaries: summed, totalSpent: total_spent, acctSums: months.clone() } } // TODO need to understand the type option for 'function overloading' because // the following is not good fn since_until( since_p: Option<String>, until_p: Option<String>, mut months_p: Option<String>, // :(:( year_p: Option<String> ) -> (NaiveDate, NaiveDate) { let until = year_p.as_ref().map(|y| NaiveDate::from_ymd(y.parse::<i32>().unwrap(), 12, 31)) .unwrap_or({ until_p.map(|s| parse_nd(&s)).unwrap_or({ let now = Local::now().naive_local().date(); if (now.month() == 12) { NaiveDate::from_ymd(now.year(), 12, 31) } else { NaiveDate::from_ymd(now.year(), now.month() + 1, 1).pred() } }) }); months_p = year_p.map(|y| "12".to_string()).or(months_p); let since = since_p.map(|s| parse_nd(&s)).unwrap_or({ let months_since = months_p.map(|m| m.parse().unwrap()).unwrap_or(6); // yes I've (sort of) done the following twice, and it's crappy both times let mut curr_year = until.year(); let mut curr_month = until.month(); (0..months_since - 1).for_each(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; }); NaiveDate::from_ymd(curr_year, curr_month, 1) }); (since, until) } fn months_between(since: &NaiveDate, until: &NaiveDate) -> u32 { let mut curr_year = until.year(); let mut curr_month = until.month(); let mut ctr = 0; while curr_year > since.year() || curr_year == since.year() && curr_month > since.month() { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; ctr += 1; } ctr } fn fill_empty_months( since: &NaiveDate, until: &NaiveDate, expenses: &Vec<MonthlyExpenseGroup> ) -> Vec<MonthlyExpenseGroup> { // don't have moment like we do in node:( let mut curr_year = until.year(); let mut curr_month = until.month(); let num_months = months_between(since, until); let mut desired_months = (0..num_months).map(|i| { if curr_month == 1 { curr_year -= 1; curr_month = 12; } else { curr_month -= 1; }; NaiveDate::from_ymd(curr_year, curr_month, 1) }).collect::<Vec<_>>(); desired_months.insert(0, NaiveDate::from_ymd(until.year(), until.month(), 1)); let mut cloned_expenses = expenses.clone(); (0..cloned_expenses.len()).for_each(|i| { let mut exp = &mut cloned_expenses[i]; (0..num_months+1).for_each(|_j| { let j = _j as usize; let month_str = format_nd(desired_months[j]); let exp_month = exp.monthlyTotals.get(j).map(|mt| mt.clone()); if (exp_month.is_none() || month_str != exp_month.unwrap().month) { exp.monthlyTotals.insert(j, MonthTotal { month: month_str, total: 0 }); } }); }); cloned_expenses } pub struct MonthlyExpense { name: String, date: NaiveDate, amount: i64, memo: String } struct ExpenseSplit { name: String, date: NaiveDateTime, amount: i64, memo: String } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] struct MonthTotal { month: String, total: i64 } #[derive(Clone)] #[derive(Debug)] #[derive(Serialize)] pub struct MonthlyExpenseGroup { pub name: String, pub total: i64, monthlyTotals: Vec<MonthTotal>, } fn expenses_by_month( transactions: &Vec<Transaction>, accounts: &Vec<Account> ) -> Vec<MonthlyExpenseGroup> { let mut accounts_map = HashMap::new(); for a in accounts { accounts_map.insert(&a.guid, a); } // No need to fold/reduce here like we do in the node version. // That was probably just a mistake there. let mut splits = transactions.iter().flat_map(|tran| { let expenses = tran.splits.iter().filter(|s| accounts_map[&s.account_guid].is_expense()).collect::<Vec<&Split>>(); expenses.iter().map(|e| { ExpenseSplit { name: accounts_map[&e.account_guid].qualified_name(), date: tran.post_date, amount: e.value_num, memo: e.memo.clone() } }).collect::<Vec<_>>() }).collect::<Vec<_>>(); splits.sort_by(|a,b| a.name.cmp(&b.name)); let expense_groups = group_by(splits, |s| s.name.to_string()); let expense_groups_by_month = expense_groups.into_iter().map(|(name, exp_group)| { let mut start = HashMap::<String, Vec<ExpenseSplit>>::new(); let mut exp_splits = group_by(exp_group.into_iter().collect::<Vec<ExpenseSplit>>(), |item| { format_ndt(item.date) }).into_iter().collect::<Vec<_>>(); exp_splits.sort_by(|a,b| b.0.cmp(&a.0)); let monthly_totals = exp_splits.into_iter().map(|(month, splits)| { MonthTotal { month: month, total: splits.iter().map(|s| s.amount).sum() } }).collect::<Vec<_>>(); MonthlyExpenseGroup { name: name.to_string(), total: monthly_totals.iter().map(|mt| mt.total).sum(), monthlyTotals: monthly_totals } }); expense_groups_by_month.collect::<Vec<_>>() } fn format_ndt(d: NaiveDateTime) -> String { format_nd(d.date()) } fn format_nd(d: NaiveDate) -> String { let year = d.year(); let month = d.month(); format!("{}-{:02}", year, month) } pub fn group_by<T, K : Eq + Hash>(items: Vec<T>, to_key: fn(&T) -> K) -> HashMap<K, Vec<T>> { let mut start: HashMap<K, Vec<T>> = HashMap::new(); items.into_iter().for_each(|item| { let key = to_key(&item); let mut result = start.entry(key).or_insert(Vec::new()); result.push(item); }); start } #[cfg(test)] mod tests { use chrono::{Datelike, Local, NaiveDate}; #[test] fn since_until_with_year() { let year_param = Some("2017".to_string()); let (since, until) = super::since_until(None, None, None, year_param); assert_eq!(NaiveDate::from_ymd(2017, 1, 1), since); assert_eq!(NaiveDate::from_ymd(2017, 12, 31), until); } #[test] fn since_until_with_month() { let month_param = Some("1".to_string()); let (since, until) = super::since_until(None, None, month_param, None); let now = Local::now().naive_local().date(); let tup = |d:NaiveDate| (d.year(), d.month()); assert_eq!((tup(now), 0), (tup(since), 0)); assert_eq!(tup(now), tup(since)); // todo verify end day:( } #[test] fn since_until_december() { let since_param = Some("2017-12".to_string()); let (since, until) = super::since_until(since_param, None, None, None); assert_eq!(NaiveDate::from_ymd(2017, 12, 1), since); assert_eq!(NaiveDate::from_ymd(2019, 12, 31), until); } }
month: i, total: month_summary.into_iter().map(|m| m.total).sum() }
random_line_split
glyph_brush.rs
mod builder; pub use self::builder::*; use super::*; use full_rusttype::gpu_cache::Cache; use hashbrown::hash_map::Entry; use log::error; use std::{ borrow::Cow, fmt, hash::{BuildHasher, BuildHasherDefault, Hash, Hasher}, i32, }; /// A hash of `Section` data type SectionHash = u64; /// A "practically collision free" `Section` hasher type DefaultSectionHasher = BuildHasherDefault<seahash::SeaHasher>; /// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing, /// glyph draw caching & efficient GPU texture cache updating. /// /// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html). /// /// # Caching behaviour /// /// Calls to [`GlyphBrush::queue`](#method.queue), /// [`GlyphBrush::pixel_bounds`](#method.pixel_bounds), [`GlyphBrush::glyphs`](#method.glyphs) /// calculate the positioned glyphs for a section. /// This is cached so future calls to any of the methods for the same section are much /// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be /// used for actual drawing. /// /// The cache for a section will be **cleared** after a /// [`GlyphBrush::process_queued`](#method.process_queued) call when that section has not been used /// since the previous call. pub struct GlyphBrush<'font, H = DefaultSectionHasher> { fonts: Vec<Font<'font>>, texture_cache: Cache<'font>, last_draw: LastDrawInfo, // cache of section-layout hash -> computed glyphs, this avoid repeated glyph computation // for identical layout/sections common to repeated frame rendering calculate_glyph_cache: hashbrown::HashMap<SectionHash, GlyphedSection<'font>>, // buffer of section-layout hashs (that must exist in the calculate_glyph_cache) // to be used on the next `process_queued` call section_buffer: Vec<SectionHash>, // Set of section hashs to keep in the glyph cache this frame even if they haven't been drawn keep_in_cache: hashbrown::HashSet<SectionHash>, // config cache_glyph_positioning: bool, cache_glyph_drawing: bool, section_hasher: H, } impl<H> fmt::Debug for GlyphBrush<'_, H> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "GlyphBrush") } } impl<'font, H: BuildHasher> GlyphCruncher<'font> for GlyphBrush<'font, H> { fn pixel_bounds_custom_layout<'a, S, L>( &mut self, section: S, custom_layout: &L, ) -> Option<Rect<i32>> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].pixel_bounds() } fn glyphs_custom_layout<'a, 'b, S, L>( &'b mut self, section: S, custom_layout: &L, ) -> PositionedGlyphIter<'b, 'font> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].glyphs() } fn fonts(&self) -> &[Font<'font>] { &self.fonts } } impl<'font, H: BuildHasher> GlyphBrush<'font, H> { /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Used to provide custom `GlyphPositioner` logic, if using built-in /// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue) /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G) where G: GlyphPositioner, S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } let section_hash = self.cache_glyphs(&section, custom_layout); self.section_buffer.push(section_hash); } /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.queue(Section { /// text: "Hello glyph_brush", /// ..Section::default() /// }); /// ``` pub fn queue<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { profile_scope!("glyph_brush_queue"); let section = section.into(); let layout = section.layout; self.queue_custom_layout(section, &layout) } #[inline] fn hash<T: Hash>(&self, hashable: &T) -> SectionHash { let mut s = self.section_hasher.build_hasher(); hashable.hash(&mut s); s.finish() } /// Returns the calculate_glyph_cache key for this sections glyphs fn cache_glyphs<L>(&mut self, section: &VariedSection<'_>, layout: &L) -> SectionHash where L: GlyphPositioner, { profile_scope!("glyph_brush_cache_glyphs"); let section_hash = self.hash(&(section, layout)); if self.cache_glyph_positioning { if let Entry::Vacant(entry) = self.calculate_glyph_cache.entry(section_hash) { let geometry = SectionGeometry::from(section); entry.insert(GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }); } } else { let geometry = SectionGeometry::from(section); self.calculate_glyph_cache.insert( section_hash, GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }, ); } section_hash } /// Processes all queued sections, calling texture update logic when necessary & /// returning a `BrushAction`. /// See [`queue`](struct.GlyphBrush.html#method.queue). /// /// Two closures are required: /// * `update_texture` is called when new glyph texture data has been drawn for update in the /// actual texture. /// The arguments are the rect position of the data in the texture & the byte data itself /// which is a single `u8` alpha value per pixel. /// * `to_vertex` maps a single glyph's `GlyphVertex` data into a generic vertex type. The /// mapped vertices are returned in an `Ok(BrushAction::Draw(vertices))` result. /// It's recommended to use a single vertex per glyph quad for best performance. /// /// Trims the cache, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # fn main() -> Result<(), BrushError> { /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// # let update_texture = |_, _| {}; /// # let into_vertex = |_| (); /// glyph_brush.process_queued( /// (1024, 768), /// |rect, tex_data| update_texture(rect, tex_data), /// |vertex_data| into_vertex(vertex_data), /// )? /// # ; /// # Ok(()) /// # } /// ``` pub fn process_queued<V, F1, F2>( &mut self, (screen_w, screen_h): (u32, u32), update_texture: F1, to_vertex: F2, ) -> Result<BrushAction<V>, BrushError> where F1: FnMut(Rect<u32>, &[u8]), F2: Fn(GlyphVertex) -> V, { profile_scope!("glyph_brush_process_queue"); let current_text_state = self.hash(&(&self.section_buffer, screen_w, screen_h)); let result = if !self.cache_glyph_drawing || self.last_draw.text_state != current_text_state { let mut some_text = false; for section_hash in &self.section_buffer { let GlyphedSection { ref glyphs, .. } = self.calculate_glyph_cache[section_hash]; for &(ref glyph, _, font_id) in glyphs { self.texture_cache.queue_glyph(font_id.0, glyph.clone()); some_text = true; } } if some_text && self.texture_cache.cache_queued(update_texture).is_err() { let (width, height) = self.texture_cache.dimensions(); return Err(BrushError::TextureTooSmall { suggested: (width * 2, height * 2), }); } let verts: Vec<V> = if some_text { let sections: Vec<_> = self .section_buffer .iter() .map(|hash| &self.calculate_glyph_cache[hash]) .collect(); let mut verts = Vec::with_capacity( sections .iter() .map(|section| section.glyphs.len()) .sum::<usize>(), ); for &GlyphedSection { ref glyphs, bounds, z, } in sections { verts.extend(glyphs.iter().filter_map(|(glyph, color, font_id)| { match self.texture_cache.rect_for(font_id.0, glyph) { Err(err) => { error!("Cache miss?: {:?}, {:?}: {}", font_id, glyph, err);
Ok(None) => None, Ok(Some((tex_coords, pixel_coords))) => { if pixel_coords.min.x as f32 > bounds.max.x || pixel_coords.min.y as f32 > bounds.max.y || bounds.min.x > pixel_coords.max.x as f32 || bounds.min.y > pixel_coords.max.y as f32 { // glyph is totally outside the bounds None } else { Some(to_vertex(GlyphVertex { tex_coords, pixel_coords, bounds, screen_dimensions: (screen_w as f32, screen_h as f32), color: *color, z, })) } } } })); } verts } else { vec![] }; self.last_draw.text_state = current_text_state; BrushAction::Draw(verts) } else { BrushAction::ReDraw }; self.clear_section_buffer(); Ok(result) } /// Rebuilds the logical texture cache with new dimensions. Should be avoided if possible. /// /// # Example /// /// ```no_run /// # use glyph_brush::GlyphBrushBuilder; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.resize_texture(512, 512); /// ``` pub fn resize_texture(&mut self, new_width: u32, new_height: u32) { self.texture_cache .to_builder() .dimensions(new_width, new_height) .rebuild(&mut self.texture_cache); self.last_draw = LastDrawInfo::default(); } /// Returns the logical texture cache pixel dimensions `(width, height)`. pub fn texture_dimensions(&self) -> (u32, u32) { self.texture_cache.dimensions() } fn clear_section_buffer(&mut self) { if self.cache_glyph_positioning { // clear section_buffer & trim calculate_glyph_cache to active sections let active: hashbrown::HashSet<_> = self .section_buffer .drain(..) .chain(self.keep_in_cache.drain()) .collect(); self.calculate_glyph_cache .retain(|key, _| active.contains(key)); } else { self.section_buffer.clear(); self.calculate_glyph_cache.clear(); self.keep_in_cache.clear(); } } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. /// /// # Example /// /// ```no_run /// use glyph_brush::{GlyphBrushBuilder, Section}; /// # fn main() { /// /// // dejavu is built as default `FontId(0)` /// let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// /// // some time later, add another font referenced by a new `FontId` /// let open_sans_italic: &[u8] = include_bytes!("../../fonts/OpenSans-Italic.ttf"); /// let open_sans_italic_id = glyph_brush.add_font_bytes(open_sans_italic); /// # } /// ``` pub fn add_font_bytes<'a: 'font, B: Into<SharedBytes<'a>>>(&mut self, font_data: B) -> FontId { self.add_font(Font::from_bytes(font_data.into()).unwrap()) } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. pub fn add_font<'a: 'font>(&mut self, font_data: Font<'a>) -> FontId { self.fonts.push(font_data); FontId(self.fonts.len() - 1) } /// Retains the section in the cache as if it had been used in the last draw-frame. /// /// Should not generally be necessary, see [caching behaviour](#caching-behaviour). pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G) where S: Into<Cow<'a, VariedSection<'a>>>, G: GlyphPositioner, { if !self.cache_glyph_positioning { return; } let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } self.keep_in_cache .insert(self.hash(&(section, custom_layout))); } /// Retains the section in the cache as if it had been used in the last draw-frame. /// /// Should not generally be necessary, see [caching behaviour](#caching-behaviour). pub fn keep_cached<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); let layout = section.layout; self.keep_cached_custom_layout(section, &layout); } } #[derive(Debug, Default)] struct LastDrawInfo { text_state: u64, } // glyph: &PositionedGlyph, // color: Color, // font_id: FontId, // cache: &Cache, // bounds: Rect<f32>, // z: f32, // (screen_width, screen_height): (f32, f32), /// Data used to generate vertex information for a single glyph #[derive(Debug)] pub struct GlyphVertex { pub tex_coords: Rect<f32>, pub pixel_coords: Rect<i32>, pub bounds: Rect<f32>, pub screen_dimensions: (f32, f32), pub color: Color, pub z: f32, } /// Actions that should be taken after processing queue data pub enum BrushAction<V> { /// Draw new/changed vertix data. Draw(Vec<V>), /// Re-draw last frame's vertices unmodified. ReDraw, } #[derive(Debug)] pub enum BrushError { /// Texture is too small to cache queued glyphs /// /// A larger suggested size is included. TextureTooSmall { suggested: (u32, u32) }, } impl fmt::Display for BrushError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", std::error::Error::description(self)) } } impl std::error::Error for BrushError { fn description(&self) -> &str { match self { BrushError::TextureTooSmall { .. } => "Texture is too small to cache queued glyphs", } } }
None }
random_line_split
glyph_brush.rs
mod builder; pub use self::builder::*; use super::*; use full_rusttype::gpu_cache::Cache; use hashbrown::hash_map::Entry; use log::error; use std::{ borrow::Cow, fmt, hash::{BuildHasher, BuildHasherDefault, Hash, Hasher}, i32, }; /// A hash of `Section` data type SectionHash = u64; /// A "practically collision free" `Section` hasher type DefaultSectionHasher = BuildHasherDefault<seahash::SeaHasher>; /// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing, /// glyph draw caching & efficient GPU texture cache updating. /// /// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html). /// /// # Caching behaviour /// /// Calls to [`GlyphBrush::queue`](#method.queue), /// [`GlyphBrush::pixel_bounds`](#method.pixel_bounds), [`GlyphBrush::glyphs`](#method.glyphs) /// calculate the positioned glyphs for a section. /// This is cached so future calls to any of the methods for the same section are much /// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be /// used for actual drawing. /// /// The cache for a section will be **cleared** after a /// [`GlyphBrush::process_queued`](#method.process_queued) call when that section has not been used /// since the previous call. pub struct GlyphBrush<'font, H = DefaultSectionHasher> { fonts: Vec<Font<'font>>, texture_cache: Cache<'font>, last_draw: LastDrawInfo, // cache of section-layout hash -> computed glyphs, this avoid repeated glyph computation // for identical layout/sections common to repeated frame rendering calculate_glyph_cache: hashbrown::HashMap<SectionHash, GlyphedSection<'font>>, // buffer of section-layout hashs (that must exist in the calculate_glyph_cache) // to be used on the next `process_queued` call section_buffer: Vec<SectionHash>, // Set of section hashs to keep in the glyph cache this frame even if they haven't been drawn keep_in_cache: hashbrown::HashSet<SectionHash>, // config cache_glyph_positioning: bool, cache_glyph_drawing: bool, section_hasher: H, } impl<H> fmt::Debug for GlyphBrush<'_, H> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "GlyphBrush") } } impl<'font, H: BuildHasher> GlyphCruncher<'font> for GlyphBrush<'font, H> { fn pixel_bounds_custom_layout<'a, S, L>( &mut self, section: S, custom_layout: &L, ) -> Option<Rect<i32>> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].pixel_bounds() } fn glyphs_custom_layout<'a, 'b, S, L>( &'b mut self, section: S, custom_layout: &L, ) -> PositionedGlyphIter<'b, 'font> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].glyphs() } fn fonts(&self) -> &[Font<'font>] { &self.fonts } } impl<'font, H: BuildHasher> GlyphBrush<'font, H> { /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Used to provide custom `GlyphPositioner` logic, if using built-in /// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue) /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G) where G: GlyphPositioner, S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } let section_hash = self.cache_glyphs(&section, custom_layout); self.section_buffer.push(section_hash); } /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.queue(Section { /// text: "Hello glyph_brush", /// ..Section::default() /// }); /// ``` pub fn queue<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { profile_scope!("glyph_brush_queue"); let section = section.into(); let layout = section.layout; self.queue_custom_layout(section, &layout) } #[inline] fn hash<T: Hash>(&self, hashable: &T) -> SectionHash { let mut s = self.section_hasher.build_hasher(); hashable.hash(&mut s); s.finish() } /// Returns the calculate_glyph_cache key for this sections glyphs fn cache_glyphs<L>(&mut self, section: &VariedSection<'_>, layout: &L) -> SectionHash where L: GlyphPositioner, { profile_scope!("glyph_brush_cache_glyphs"); let section_hash = self.hash(&(section, layout)); if self.cache_glyph_positioning { if let Entry::Vacant(entry) = self.calculate_glyph_cache.entry(section_hash) { let geometry = SectionGeometry::from(section); entry.insert(GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }); } } else { let geometry = SectionGeometry::from(section); self.calculate_glyph_cache.insert( section_hash, GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }, ); } section_hash } /// Processes all queued sections, calling texture update logic when necessary & /// returning a `BrushAction`. /// See [`queue`](struct.GlyphBrush.html#method.queue). /// /// Two closures are required: /// * `update_texture` is called when new glyph texture data has been drawn for update in the /// actual texture. /// The arguments are the rect position of the data in the texture & the byte data itself /// which is a single `u8` alpha value per pixel. /// * `to_vertex` maps a single glyph's `GlyphVertex` data into a generic vertex type. The /// mapped vertices are returned in an `Ok(BrushAction::Draw(vertices))` result. /// It's recommended to use a single vertex per glyph quad for best performance. /// /// Trims the cache, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # fn main() -> Result<(), BrushError> { /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// # let update_texture = |_, _| {}; /// # let into_vertex = |_| (); /// glyph_brush.process_queued( /// (1024, 768), /// |rect, tex_data| update_texture(rect, tex_data), /// |vertex_data| into_vertex(vertex_data), /// )? /// # ; /// # Ok(()) /// # } /// ``` pub fn process_queued<V, F1, F2>( &mut self, (screen_w, screen_h): (u32, u32), update_texture: F1, to_vertex: F2, ) -> Result<BrushAction<V>, BrushError> where F1: FnMut(Rect<u32>, &[u8]), F2: Fn(GlyphVertex) -> V, { profile_scope!("glyph_brush_process_queue"); let current_text_state = self.hash(&(&self.section_buffer, screen_w, screen_h)); let result = if !self.cache_glyph_drawing || self.last_draw.text_state != current_text_state { let mut some_text = false; for section_hash in &self.section_buffer { let GlyphedSection { ref glyphs, .. } = self.calculate_glyph_cache[section_hash]; for &(ref glyph, _, font_id) in glyphs { self.texture_cache.queue_glyph(font_id.0, glyph.clone()); some_text = true; } } if some_text && self.texture_cache.cache_queued(update_texture).is_err()
let verts: Vec<V> = if some_text { let sections: Vec<_> = self .section_buffer .iter() .map(|hash| &self.calculate_glyph_cache[hash]) .collect(); let mut verts = Vec::with_capacity( sections .iter() .map(|section| section.glyphs.len()) .sum::<usize>(), ); for &GlyphedSection { ref glyphs, bounds, z, } in sections { verts.extend(glyphs.iter().filter_map(|(glyph, color, font_id)| { match self.texture_cache.rect_for(font_id.0, glyph) { Err(err) => { error!("Cache miss?: {:?}, {:?}: {}", font_id, glyph, err); None } Ok(None) => None, Ok(Some((tex_coords, pixel_coords))) => { if pixel_coords.min.x as f32 > bounds.max.x || pixel_coords.min.y as f32 > bounds.max.y || bounds.min.x > pixel_coords.max.x as f32 || bounds.min.y > pixel_coords.max.y as f32 { // glyph is totally outside the bounds None } else { Some(to_vertex(GlyphVertex { tex_coords, pixel_coords, bounds, screen_dimensions: (screen_w as f32, screen_h as f32), color: *color, z, })) } } } })); } verts } else { vec![] }; self.last_draw.text_state = current_text_state; BrushAction::Draw(verts) } else { BrushAction::ReDraw }; self.clear_section_buffer(); Ok(result) } /// Rebuilds the logical texture cache with new dimensions. Should be avoided if possible. /// /// # Example /// /// ```no_run /// # use glyph_brush::GlyphBrushBuilder; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.resize_texture(512, 512); /// ``` pub fn resize_texture(&mut self, new_width: u32, new_height: u32) { self.texture_cache .to_builder() .dimensions(new_width, new_height) .rebuild(&mut self.texture_cache); self.last_draw = LastDrawInfo::default(); } /// Returns the logical texture cache pixel dimensions `(width, height)`. pub fn texture_dimensions(&self) -> (u32, u32) { self.texture_cache.dimensions() } fn clear_section_buffer(&mut self) { if self.cache_glyph_positioning { // clear section_buffer & trim calculate_glyph_cache to active sections let active: hashbrown::HashSet<_> = self .section_buffer .drain(..) .chain(self.keep_in_cache.drain()) .collect(); self.calculate_glyph_cache .retain(|key, _| active.contains(key)); } else { self.section_buffer.clear(); self.calculate_glyph_cache.clear(); self.keep_in_cache.clear(); } } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. /// /// # Example /// /// ```no_run /// use glyph_brush::{GlyphBrushBuilder, Section}; /// # fn main() { /// /// // dejavu is built as default `FontId(0)` /// let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// /// // some time later, add another font referenced by a new `FontId` /// let open_sans_italic: &[u8] = include_bytes!("../../fonts/OpenSans-Italic.ttf"); /// let open_sans_italic_id = glyph_brush.add_font_bytes(open_sans_italic); /// # } /// ``` pub fn add_font_bytes<'a: 'font, B: Into<SharedBytes<'a>>>(&mut self, font_data: B) -> FontId { self.add_font(Font::from_bytes(font_data.into()).unwrap()) } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. pub fn add_font<'a: 'font>(&mut self, font_data: Font<'a>) -> FontId { self.fonts.push(font_data); FontId(self.fonts.len() - 1) } /// Retains the section in the cache as if it had been used in the last draw-frame. /// /// Should not generally be necessary, see [caching behaviour](#caching-behaviour). pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G) where S: Into<Cow<'a, VariedSection<'a>>>, G: GlyphPositioner, { if !self.cache_glyph_positioning { return; } let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } self.keep_in_cache .insert(self.hash(&(section, custom_layout))); } /// Retains the section in the cache as if it had been used in the last draw-frame. /// /// Should not generally be necessary, see [caching behaviour](#caching-behaviour). pub fn keep_cached<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); let layout = section.layout; self.keep_cached_custom_layout(section, &layout); } } #[derive(Debug, Default)] struct LastDrawInfo { text_state: u64, } // glyph: &PositionedGlyph, // color: Color, // font_id: FontId, // cache: &Cache, // bounds: Rect<f32>, // z: f32, // (screen_width, screen_height): (f32, f32), /// Data used to generate vertex information for a single glyph #[derive(Debug)] pub struct GlyphVertex { pub tex_coords: Rect<f32>, pub pixel_coords: Rect<i32>, pub bounds: Rect<f32>, pub screen_dimensions: (f32, f32), pub color: Color, pub z: f32, } /// Actions that should be taken after processing queue data pub enum BrushAction<V> { /// Draw new/changed vertix data. Draw(Vec<V>), /// Re-draw last frame's vertices unmodified. ReDraw, } #[derive(Debug)] pub enum BrushError { /// Texture is too small to cache queued glyphs /// /// A larger suggested size is included. TextureTooSmall { suggested: (u32, u32) }, } impl fmt::Display for BrushError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", std::error::Error::description(self)) } } impl std::error::Error for BrushError { fn description(&self) -> &str { match self { BrushError::TextureTooSmall { .. } => "Texture is too small to cache queued glyphs", } } }
{ let (width, height) = self.texture_cache.dimensions(); return Err(BrushError::TextureTooSmall { suggested: (width * 2, height * 2), }); }
conditional_block
glyph_brush.rs
mod builder; pub use self::builder::*; use super::*; use full_rusttype::gpu_cache::Cache; use hashbrown::hash_map::Entry; use log::error; use std::{ borrow::Cow, fmt, hash::{BuildHasher, BuildHasherDefault, Hash, Hasher}, i32, }; /// A hash of `Section` data type SectionHash = u64; /// A "practically collision free" `Section` hasher type DefaultSectionHasher = BuildHasherDefault<seahash::SeaHasher>; /// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing, /// glyph draw caching & efficient GPU texture cache updating. /// /// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html). /// /// # Caching behaviour /// /// Calls to [`GlyphBrush::queue`](#method.queue), /// [`GlyphBrush::pixel_bounds`](#method.pixel_bounds), [`GlyphBrush::glyphs`](#method.glyphs) /// calculate the positioned glyphs for a section. /// This is cached so future calls to any of the methods for the same section are much /// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be /// used for actual drawing. /// /// The cache for a section will be **cleared** after a /// [`GlyphBrush::process_queued`](#method.process_queued) call when that section has not been used /// since the previous call. pub struct GlyphBrush<'font, H = DefaultSectionHasher> { fonts: Vec<Font<'font>>, texture_cache: Cache<'font>, last_draw: LastDrawInfo, // cache of section-layout hash -> computed glyphs, this avoid repeated glyph computation // for identical layout/sections common to repeated frame rendering calculate_glyph_cache: hashbrown::HashMap<SectionHash, GlyphedSection<'font>>, // buffer of section-layout hashs (that must exist in the calculate_glyph_cache) // to be used on the next `process_queued` call section_buffer: Vec<SectionHash>, // Set of section hashs to keep in the glyph cache this frame even if they haven't been drawn keep_in_cache: hashbrown::HashSet<SectionHash>, // config cache_glyph_positioning: bool, cache_glyph_drawing: bool, section_hasher: H, } impl<H> fmt::Debug for GlyphBrush<'_, H> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "GlyphBrush") } } impl<'font, H: BuildHasher> GlyphCruncher<'font> for GlyphBrush<'font, H> { fn pixel_bounds_custom_layout<'a, S, L>( &mut self, section: S, custom_layout: &L, ) -> Option<Rect<i32>> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].pixel_bounds() } fn glyphs_custom_layout<'a, 'b, S, L>( &'b mut self, section: S, custom_layout: &L, ) -> PositionedGlyphIter<'b, 'font> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].glyphs() } fn fonts(&self) -> &[Font<'font>] { &self.fonts } } impl<'font, H: BuildHasher> GlyphBrush<'font, H> { /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Used to provide custom `GlyphPositioner` logic, if using built-in /// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue) /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G) where G: GlyphPositioner, S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } let section_hash = self.cache_glyphs(&section, custom_layout); self.section_buffer.push(section_hash); } /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.queue(Section { /// text: "Hello glyph_brush", /// ..Section::default() /// }); /// ``` pub fn queue<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { profile_scope!("glyph_brush_queue"); let section = section.into(); let layout = section.layout; self.queue_custom_layout(section, &layout) } #[inline] fn hash<T: Hash>(&self, hashable: &T) -> SectionHash { let mut s = self.section_hasher.build_hasher(); hashable.hash(&mut s); s.finish() } /// Returns the calculate_glyph_cache key for this sections glyphs fn cache_glyphs<L>(&mut self, section: &VariedSection<'_>, layout: &L) -> SectionHash where L: GlyphPositioner, { profile_scope!("glyph_brush_cache_glyphs"); let section_hash = self.hash(&(section, layout)); if self.cache_glyph_positioning { if let Entry::Vacant(entry) = self.calculate_glyph_cache.entry(section_hash) { let geometry = SectionGeometry::from(section); entry.insert(GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }); } } else { let geometry = SectionGeometry::from(section); self.calculate_glyph_cache.insert( section_hash, GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }, ); } section_hash } /// Processes all queued sections, calling texture update logic when necessary & /// returning a `BrushAction`. /// See [`queue`](struct.GlyphBrush.html#method.queue). /// /// Two closures are required: /// * `update_texture` is called when new glyph texture data has been drawn for update in the /// actual texture. /// The arguments are the rect position of the data in the texture & the byte data itself /// which is a single `u8` alpha value per pixel. /// * `to_vertex` maps a single glyph's `GlyphVertex` data into a generic vertex type. The /// mapped vertices are returned in an `Ok(BrushAction::Draw(vertices))` result. /// It's recommended to use a single vertex per glyph quad for best performance. /// /// Trims the cache, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # fn main() -> Result<(), BrushError> { /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// # let update_texture = |_, _| {}; /// # let into_vertex = |_| (); /// glyph_brush.process_queued( /// (1024, 768), /// |rect, tex_data| update_texture(rect, tex_data), /// |vertex_data| into_vertex(vertex_data), /// )? /// # ; /// # Ok(()) /// # } /// ``` pub fn process_queued<V, F1, F2>( &mut self, (screen_w, screen_h): (u32, u32), update_texture: F1, to_vertex: F2, ) -> Result<BrushAction<V>, BrushError> where F1: FnMut(Rect<u32>, &[u8]), F2: Fn(GlyphVertex) -> V, { profile_scope!("glyph_brush_process_queue"); let current_text_state = self.hash(&(&self.section_buffer, screen_w, screen_h)); let result = if !self.cache_glyph_drawing || self.last_draw.text_state != current_text_state { let mut some_text = false; for section_hash in &self.section_buffer { let GlyphedSection { ref glyphs, .. } = self.calculate_glyph_cache[section_hash]; for &(ref glyph, _, font_id) in glyphs { self.texture_cache.queue_glyph(font_id.0, glyph.clone()); some_text = true; } } if some_text && self.texture_cache.cache_queued(update_texture).is_err() { let (width, height) = self.texture_cache.dimensions(); return Err(BrushError::TextureTooSmall { suggested: (width * 2, height * 2), }); } let verts: Vec<V> = if some_text { let sections: Vec<_> = self .section_buffer .iter() .map(|hash| &self.calculate_glyph_cache[hash]) .collect(); let mut verts = Vec::with_capacity( sections .iter() .map(|section| section.glyphs.len()) .sum::<usize>(), ); for &GlyphedSection { ref glyphs, bounds, z, } in sections { verts.extend(glyphs.iter().filter_map(|(glyph, color, font_id)| { match self.texture_cache.rect_for(font_id.0, glyph) { Err(err) => { error!("Cache miss?: {:?}, {:?}: {}", font_id, glyph, err); None } Ok(None) => None, Ok(Some((tex_coords, pixel_coords))) => { if pixel_coords.min.x as f32 > bounds.max.x || pixel_coords.min.y as f32 > bounds.max.y || bounds.min.x > pixel_coords.max.x as f32 || bounds.min.y > pixel_coords.max.y as f32 { // glyph is totally outside the bounds None } else { Some(to_vertex(GlyphVertex { tex_coords, pixel_coords, bounds, screen_dimensions: (screen_w as f32, screen_h as f32), color: *color, z, })) } } } })); } verts } else { vec![] }; self.last_draw.text_state = current_text_state; BrushAction::Draw(verts) } else { BrushAction::ReDraw }; self.clear_section_buffer(); Ok(result) } /// Rebuilds the logical texture cache with new dimensions. Should be avoided if possible. /// /// # Example /// /// ```no_run /// # use glyph_brush::GlyphBrushBuilder; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.resize_texture(512, 512); /// ``` pub fn resize_texture(&mut self, new_width: u32, new_height: u32) { self.texture_cache .to_builder() .dimensions(new_width, new_height) .rebuild(&mut self.texture_cache); self.last_draw = LastDrawInfo::default(); } /// Returns the logical texture cache pixel dimensions `(width, height)`. pub fn texture_dimensions(&self) -> (u32, u32) { self.texture_cache.dimensions() } fn clear_section_buffer(&mut self) { if self.cache_glyph_positioning { // clear section_buffer & trim calculate_glyph_cache to active sections let active: hashbrown::HashSet<_> = self .section_buffer .drain(..) .chain(self.keep_in_cache.drain()) .collect(); self.calculate_glyph_cache .retain(|key, _| active.contains(key)); } else { self.section_buffer.clear(); self.calculate_glyph_cache.clear(); self.keep_in_cache.clear(); } } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. /// /// # Example /// /// ```no_run /// use glyph_brush::{GlyphBrushBuilder, Section}; /// # fn main() { /// /// // dejavu is built as default `FontId(0)` /// let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// /// // some time later, add another font referenced by a new `FontId` /// let open_sans_italic: &[u8] = include_bytes!("../../fonts/OpenSans-Italic.ttf"); /// let open_sans_italic_id = glyph_brush.add_font_bytes(open_sans_italic); /// # } /// ``` pub fn add_font_bytes<'a: 'font, B: Into<SharedBytes<'a>>>(&mut self, font_data: B) -> FontId
/// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. pub fn add_font<'a: 'font>(&mut self, font_data: Font<'a>) -> FontId { self.fonts.push(font_data); FontId(self.fonts.len() - 1) } /// Retains the section in the cache as if it had been used in the last draw-frame. /// /// Should not generally be necessary, see [caching behaviour](#caching-behaviour). pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G) where S: Into<Cow<'a, VariedSection<'a>>>, G: GlyphPositioner, { if !self.cache_glyph_positioning { return; } let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } self.keep_in_cache .insert(self.hash(&(section, custom_layout))); } /// Retains the section in the cache as if it had been used in the last draw-frame. /// /// Should not generally be necessary, see [caching behaviour](#caching-behaviour). pub fn keep_cached<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); let layout = section.layout; self.keep_cached_custom_layout(section, &layout); } } #[derive(Debug, Default)] struct LastDrawInfo { text_state: u64, } // glyph: &PositionedGlyph, // color: Color, // font_id: FontId, // cache: &Cache, // bounds: Rect<f32>, // z: f32, // (screen_width, screen_height): (f32, f32), /// Data used to generate vertex information for a single glyph #[derive(Debug)] pub struct GlyphVertex { pub tex_coords: Rect<f32>, pub pixel_coords: Rect<i32>, pub bounds: Rect<f32>, pub screen_dimensions: (f32, f32), pub color: Color, pub z: f32, } /// Actions that should be taken after processing queue data pub enum BrushAction<V> { /// Draw new/changed vertix data. Draw(Vec<V>), /// Re-draw last frame's vertices unmodified. ReDraw, } #[derive(Debug)] pub enum BrushError { /// Texture is too small to cache queued glyphs /// /// A larger suggested size is included. TextureTooSmall { suggested: (u32, u32) }, } impl fmt::Display for BrushError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", std::error::Error::description(self)) } } impl std::error::Error for BrushError { fn description(&self) -> &str { match self { BrushError::TextureTooSmall { .. } => "Texture is too small to cache queued glyphs", } } }
{ self.add_font(Font::from_bytes(font_data.into()).unwrap()) }
identifier_body
glyph_brush.rs
mod builder; pub use self::builder::*; use super::*; use full_rusttype::gpu_cache::Cache; use hashbrown::hash_map::Entry; use log::error; use std::{ borrow::Cow, fmt, hash::{BuildHasher, BuildHasherDefault, Hash, Hasher}, i32, }; /// A hash of `Section` data type SectionHash = u64; /// A "practically collision free" `Section` hasher type DefaultSectionHasher = BuildHasherDefault<seahash::SeaHasher>; /// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing, /// glyph draw caching & efficient GPU texture cache updating. /// /// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html). /// /// # Caching behaviour /// /// Calls to [`GlyphBrush::queue`](#method.queue), /// [`GlyphBrush::pixel_bounds`](#method.pixel_bounds), [`GlyphBrush::glyphs`](#method.glyphs) /// calculate the positioned glyphs for a section. /// This is cached so future calls to any of the methods for the same section are much /// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be /// used for actual drawing. /// /// The cache for a section will be **cleared** after a /// [`GlyphBrush::process_queued`](#method.process_queued) call when that section has not been used /// since the previous call. pub struct GlyphBrush<'font, H = DefaultSectionHasher> { fonts: Vec<Font<'font>>, texture_cache: Cache<'font>, last_draw: LastDrawInfo, // cache of section-layout hash -> computed glyphs, this avoid repeated glyph computation // for identical layout/sections common to repeated frame rendering calculate_glyph_cache: hashbrown::HashMap<SectionHash, GlyphedSection<'font>>, // buffer of section-layout hashs (that must exist in the calculate_glyph_cache) // to be used on the next `process_queued` call section_buffer: Vec<SectionHash>, // Set of section hashs to keep in the glyph cache this frame even if they haven't been drawn keep_in_cache: hashbrown::HashSet<SectionHash>, // config cache_glyph_positioning: bool, cache_glyph_drawing: bool, section_hasher: H, } impl<H> fmt::Debug for GlyphBrush<'_, H> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "GlyphBrush") } } impl<'font, H: BuildHasher> GlyphCruncher<'font> for GlyphBrush<'font, H> { fn pixel_bounds_custom_layout<'a, S, L>( &mut self, section: S, custom_layout: &L, ) -> Option<Rect<i32>> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].pixel_bounds() } fn glyphs_custom_layout<'a, 'b, S, L>( &'b mut self, section: S, custom_layout: &L, ) -> PositionedGlyphIter<'b, 'font> where L: GlyphPositioner + Hash, S: Into<Cow<'a, VariedSection<'a>>>, { let section_hash = self.cache_glyphs(&section.into(), custom_layout); self.keep_in_cache.insert(section_hash); self.calculate_glyph_cache[&section_hash].glyphs() } fn fonts(&self) -> &[Font<'font>] { &self.fonts } } impl<'font, H: BuildHasher> GlyphBrush<'font, H> { /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Used to provide custom `GlyphPositioner` logic, if using built-in /// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue) /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G) where G: GlyphPositioner, S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } let section_hash = self.cache_glyphs(&section, custom_layout); self.section_buffer.push(section_hash); } /// Queues a section/layout to be processed by the next call of /// [`process_queued`](struct.GlyphBrush.html#method.process_queued). Can be called multiple /// times to queue multiple sections for drawing. /// /// Benefits from caching, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.queue(Section { /// text: "Hello glyph_brush", /// ..Section::default() /// }); /// ``` pub fn queue<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { profile_scope!("glyph_brush_queue"); let section = section.into(); let layout = section.layout; self.queue_custom_layout(section, &layout) } #[inline] fn hash<T: Hash>(&self, hashable: &T) -> SectionHash { let mut s = self.section_hasher.build_hasher(); hashable.hash(&mut s); s.finish() } /// Returns the calculate_glyph_cache key for this sections glyphs fn cache_glyphs<L>(&mut self, section: &VariedSection<'_>, layout: &L) -> SectionHash where L: GlyphPositioner, { profile_scope!("glyph_brush_cache_glyphs"); let section_hash = self.hash(&(section, layout)); if self.cache_glyph_positioning { if let Entry::Vacant(entry) = self.calculate_glyph_cache.entry(section_hash) { let geometry = SectionGeometry::from(section); entry.insert(GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }); } } else { let geometry = SectionGeometry::from(section); self.calculate_glyph_cache.insert( section_hash, GlyphedSection { bounds: layout.bounds_rect(&geometry), glyphs: layout.calculate_glyphs(&self.fonts, &geometry, &section.text), z: section.z, }, ); } section_hash } /// Processes all queued sections, calling texture update logic when necessary & /// returning a `BrushAction`. /// See [`queue`](struct.GlyphBrush.html#method.queue). /// /// Two closures are required: /// * `update_texture` is called when new glyph texture data has been drawn for update in the /// actual texture. /// The arguments are the rect position of the data in the texture & the byte data itself /// which is a single `u8` alpha value per pixel. /// * `to_vertex` maps a single glyph's `GlyphVertex` data into a generic vertex type. The /// mapped vertices are returned in an `Ok(BrushAction::Draw(vertices))` result. /// It's recommended to use a single vertex per glyph quad for best performance. /// /// Trims the cache, see [caching behaviour](#caching-behaviour). /// /// ```no_run /// # use glyph_brush::*; /// # fn main() -> Result<(), BrushError> { /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// # let update_texture = |_, _| {}; /// # let into_vertex = |_| (); /// glyph_brush.process_queued( /// (1024, 768), /// |rect, tex_data| update_texture(rect, tex_data), /// |vertex_data| into_vertex(vertex_data), /// )? /// # ; /// # Ok(()) /// # } /// ``` pub fn process_queued<V, F1, F2>( &mut self, (screen_w, screen_h): (u32, u32), update_texture: F1, to_vertex: F2, ) -> Result<BrushAction<V>, BrushError> where F1: FnMut(Rect<u32>, &[u8]), F2: Fn(GlyphVertex) -> V, { profile_scope!("glyph_brush_process_queue"); let current_text_state = self.hash(&(&self.section_buffer, screen_w, screen_h)); let result = if !self.cache_glyph_drawing || self.last_draw.text_state != current_text_state { let mut some_text = false; for section_hash in &self.section_buffer { let GlyphedSection { ref glyphs, .. } = self.calculate_glyph_cache[section_hash]; for &(ref glyph, _, font_id) in glyphs { self.texture_cache.queue_glyph(font_id.0, glyph.clone()); some_text = true; } } if some_text && self.texture_cache.cache_queued(update_texture).is_err() { let (width, height) = self.texture_cache.dimensions(); return Err(BrushError::TextureTooSmall { suggested: (width * 2, height * 2), }); } let verts: Vec<V> = if some_text { let sections: Vec<_> = self .section_buffer .iter() .map(|hash| &self.calculate_glyph_cache[hash]) .collect(); let mut verts = Vec::with_capacity( sections .iter() .map(|section| section.glyphs.len()) .sum::<usize>(), ); for &GlyphedSection { ref glyphs, bounds, z, } in sections { verts.extend(glyphs.iter().filter_map(|(glyph, color, font_id)| { match self.texture_cache.rect_for(font_id.0, glyph) { Err(err) => { error!("Cache miss?: {:?}, {:?}: {}", font_id, glyph, err); None } Ok(None) => None, Ok(Some((tex_coords, pixel_coords))) => { if pixel_coords.min.x as f32 > bounds.max.x || pixel_coords.min.y as f32 > bounds.max.y || bounds.min.x > pixel_coords.max.x as f32 || bounds.min.y > pixel_coords.max.y as f32 { // glyph is totally outside the bounds None } else { Some(to_vertex(GlyphVertex { tex_coords, pixel_coords, bounds, screen_dimensions: (screen_w as f32, screen_h as f32), color: *color, z, })) } } } })); } verts } else { vec![] }; self.last_draw.text_state = current_text_state; BrushAction::Draw(verts) } else { BrushAction::ReDraw }; self.clear_section_buffer(); Ok(result) } /// Rebuilds the logical texture cache with new dimensions. Should be avoided if possible. /// /// # Example /// /// ```no_run /// # use glyph_brush::GlyphBrushBuilder; /// # let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// # let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// glyph_brush.resize_texture(512, 512); /// ``` pub fn resize_texture(&mut self, new_width: u32, new_height: u32) { self.texture_cache .to_builder() .dimensions(new_width, new_height) .rebuild(&mut self.texture_cache); self.last_draw = LastDrawInfo::default(); } /// Returns the logical texture cache pixel dimensions `(width, height)`. pub fn texture_dimensions(&self) -> (u32, u32) { self.texture_cache.dimensions() } fn clear_section_buffer(&mut self) { if self.cache_glyph_positioning { // clear section_buffer & trim calculate_glyph_cache to active sections let active: hashbrown::HashSet<_> = self .section_buffer .drain(..) .chain(self.keep_in_cache.drain()) .collect(); self.calculate_glyph_cache .retain(|key, _| active.contains(key)); } else { self.section_buffer.clear(); self.calculate_glyph_cache.clear(); self.keep_in_cache.clear(); } } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. /// /// # Example /// /// ```no_run /// use glyph_brush::{GlyphBrushBuilder, Section}; /// # fn main() { /// /// // dejavu is built as default `FontId(0)` /// let dejavu: &[u8] = include_bytes!("../../fonts/DejaVuSans.ttf"); /// let mut glyph_brush = GlyphBrushBuilder::using_font_bytes(dejavu).build(); /// /// // some time later, add another font referenced by a new `FontId` /// let open_sans_italic: &[u8] = include_bytes!("../../fonts/OpenSans-Italic.ttf"); /// let open_sans_italic_id = glyph_brush.add_font_bytes(open_sans_italic); /// # } /// ``` pub fn add_font_bytes<'a: 'font, B: Into<SharedBytes<'a>>>(&mut self, font_data: B) -> FontId { self.add_font(Font::from_bytes(font_data.into()).unwrap()) } /// Adds an additional font to the one(s) initially added on build. /// /// Returns a new [`FontId`](struct.FontId.html) to reference this font. pub fn add_font<'a: 'font>(&mut self, font_data: Font<'a>) -> FontId { self.fonts.push(font_data); FontId(self.fonts.len() - 1) } /// Retains the section in the cache as if it had been used in the last draw-frame. /// /// Should not generally be necessary, see [caching behaviour](#caching-behaviour). pub fn
<'a, S, G>(&mut self, section: S, custom_layout: &G) where S: Into<Cow<'a, VariedSection<'a>>>, G: GlyphPositioner, { if !self.cache_glyph_positioning { return; } let section = section.into(); if cfg!(debug_assertions) { for text in &section.text { assert!(self.fonts.len() > text.font_id.0, "Invalid font id"); } } self.keep_in_cache .insert(self.hash(&(section, custom_layout))); } /// Retains the section in the cache as if it had been used in the last draw-frame. /// /// Should not generally be necessary, see [caching behaviour](#caching-behaviour). pub fn keep_cached<'a, S>(&mut self, section: S) where S: Into<Cow<'a, VariedSection<'a>>>, { let section = section.into(); let layout = section.layout; self.keep_cached_custom_layout(section, &layout); } } #[derive(Debug, Default)] struct LastDrawInfo { text_state: u64, } // glyph: &PositionedGlyph, // color: Color, // font_id: FontId, // cache: &Cache, // bounds: Rect<f32>, // z: f32, // (screen_width, screen_height): (f32, f32), /// Data used to generate vertex information for a single glyph #[derive(Debug)] pub struct GlyphVertex { pub tex_coords: Rect<f32>, pub pixel_coords: Rect<i32>, pub bounds: Rect<f32>, pub screen_dimensions: (f32, f32), pub color: Color, pub z: f32, } /// Actions that should be taken after processing queue data pub enum BrushAction<V> { /// Draw new/changed vertix data. Draw(Vec<V>), /// Re-draw last frame's vertices unmodified. ReDraw, } #[derive(Debug)] pub enum BrushError { /// Texture is too small to cache queued glyphs /// /// A larger suggested size is included. TextureTooSmall { suggested: (u32, u32) }, } impl fmt::Display for BrushError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", std::error::Error::description(self)) } } impl std::error::Error for BrushError { fn description(&self) -> &str { match self { BrushError::TextureTooSmall { .. } => "Texture is too small to cache queued glyphs", } } }
keep_cached_custom_layout
identifier_name
main.go
// Copyright 2018 Goole Inc. // Copyright 2020 Tobias Schwarz // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Binary explorer demo. Displays widgets for insights of blockchain behaviour. // Exist when 'q' or 'esc' is pressed. package main import ( "context" "flag" "fmt" "strconv" "strings" "time" "log" "gopkg.in/resty.v1" "github.com/google/uuid" "github.com/jedib0t/go-pretty/table" ga "github.com/ozgur-soft/google-analytics/src" "github.com/sacOO7/gowebsocket" "github.com/tidwall/gjson" "github.com/mum4k/termdash" "github.com/mum4k/termdash/cell" "github.com/mum4k/termdash/container" "github.com/mum4k/termdash/keyboard" "github.com/mum4k/termdash/linestyle" "github.com/mum4k/termdash/terminal/termbox" "github.com/mum4k/termdash/terminal/terminalapi" "github.com/mum4k/termdash/widgets/gauge" "github.com/mum4k/termdash/widgets/text" ) const ( appRPC = "http://localhost" tendermintRPC = "https://rpc.cosmos.network/" ) var givenPort = flag.String("p", "26657", "port to connect to as a string") func main() { view() connectionSignal := make(chan string) t, err := termbox.New() if err != nil { panic(err) } defer t.Close() flag.Parse() networkInfo := getFromRPC("status") networkStatus := gjson.Parse(networkInfo) if !networkStatus.Exists() { panic("Application not running on localhost:" + fmt.Sprintf("%s", *givenPort)) } ctx, cancel := context.WithCancel(context.Background()) // Blocks parsing widget blocksWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := blocksWidget.Write("Latest block height " + networkStatus.Get("result.sync_info.latest_block_height").String() + "\n"); err != nil { panic(err) } // Transaction parsing widget transactionWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := transactionWidget.Write("Transactions will appear as soon as they are confirmed in a block.\n\n"); err != nil { panic(err) } validatorWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := validatorWidget.Write("List available validators.\n\n"); err != nil { panic(err) } peerWidget, err := text.New() if err != nil { panic(err) } if err := peerWidget.Write("0"); err != nil { panic(err) } healthWidget, err := text.New() if err != nil { panic(err) } if err := healthWidget.Write("🔴 no connection"); err != nil { panic(err) } timeWidget, err := text.New() if err != nil { panic(err) } currentTime := time.Now() if err := timeWidget.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } maxBlocksizeWidget, err := text.New() maxBlockSize := gjson.Get(getFromRPC("consensus_params"), "result.consensus_params.block.max_bytes").Int() if err != nil { panic(err) } if err := maxBlocksizeWidget.Write(fmt.Sprintf("%s", byteCountDecimal(maxBlockSize))); err != nil { panic(err) } // system powered widgets go writeTime(ctx, timeWidget, 1*time.Second) // rpc widgets go writePeers(ctx, peerWidget, 1*time.Second) go writeHealth(ctx, healthWidget, 500*time.Millisecond, connectionSignal) // websocket powered widgets go writeValidators(ctx, validatorWidget, connectionSignal) go writeBlocks(ctx, blocksWidget, connectionSignal) go writeTransactions(ctx, transactionWidget, connectionSignal) // blockchain download gauge syncWidget, err := gauge.New( gauge.Height(1), gauge.Color(cell.ColorBlue), gauge.Border(linestyle.Light), gauge.BorderTitle("Blockchain download %"), ) if err != nil { panic(err) } if networkStatus.Get("result.sync_info.catching_up").String() == "false" { if err := syncWidget.Absolute(100, 100); err != nil { panic(err) } } else { if networkStatus.Get("result.node_info.network").String() == "cosmoshub-3" { go syncGauge(ctx, syncWidget, networkStatus.Get("result.sync_info.latest_block_height").Int()) } else { // There is no way to detect maximum height in the network via RPC or websocket yet if err := syncWidget.Absolute(70, 100); err != nil { panic(err) } } } // Draw Dashboard c, err := container.New( t, container.Border(linestyle.Light), container.BorderTitle("PRESS Q or ESC TO QUIT | Network "+networkStatus.Get("result.node_info.network").String()+" Version "+networkStatus.Get("result.node_info.version").String()), container.BorderColor(cell.ColorNumber(2)), container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Health"), container.PlaceWidget(healthWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("System Time"), container.PlaceWidget(timeWidget), ), ), ), container.Right( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Max Block Size"), container.PlaceWidget(maxBlocksizeWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Connected Peers"), container.PlaceWidget(peerWidget), ), ), ), ), ), container.Bottom( container.PlaceWidget(syncWidget), ), ), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Validators"), container.PlaceWidget(validatorWidget), ), ), ), container.Bottom( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Latest Blocks"), container.PlaceWidget(blocksWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Latest Confirmed Transactions"), container.PlaceWidget(transactionWidget), ), ), ), ), ) if err != nil { panic(err) } quitter := func(k *terminalapi.Keyboard) { if k.Key == 'q' || k.Key == 'Q' || k.Key == keyboard.KeyEsc { cancel() } } if err := termdash.Run(ctx, t, c, termdash.KeyboardSubscriber(quitter)); err != nil { panic(err) } } func getFromRPC(endpoint string) string { port := *givenPort resp, _ := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(appRPC + ":" + port + "/" + endpoint) return resp.String()
func getTendermintRPC(endpoint string) string { resp, err := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(tendermintRPC + endpoint) if err != nil { panic(err) } return resp.String() } // writeTime writes the current system time to the timeWidget. // Exits when the context expires. func writeTime(ctx context.Context, t *text.Text, delay time.Duration) { ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: currentTime := time.Now() t.Reset() if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } case <-ctx.Done(): return } } } // writeHealth writes the status to the healthWidget. // Exits when the context expires. func writeHealth(ctx context.Context, t *text.Text, delay time.Duration, connectionSignal chan string) { reconnect := false health := gjson.Get(getFromRPC("health"), "result") t.Reset() if health.Exists() { t.Write("🟢 good") } else { t.Write("🔴 no connection") } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: health := gjson.Get(getFromRPC("health"), "result") if health.Exists() { t.Write("🟢 good") if reconnect == true { connectionSignal <- "reconnect" connectionSignal <- "reconnect" connectionSignal <- "reconnect" reconnect = false } } else { t.Write("🔴 no connection") if reconnect == false { connectionSignal <- "no_connection" connectionSignal <- "no_connection" connectionSignal <- "no_connection" reconnect = true } } case <-ctx.Done(): return } } } // writePeers writes the connected Peers to the peerWidget. // Exits when the context expires. func writePeers(ctx context.Context, t *text.Text, delay time.Duration) { peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() t.Reset() if peers != "" { t.Write(peers) } if err := t.Write(peers); err != nil { panic(err) } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: t.Reset() peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() if peers != "" { t.Write(peers) } case <-ctx.Done(): return } } } // writeTransactions writes the latest Transactions to the transactionsWidget. // Exits when the context expires. func writeTransactions(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentTx := gjson.Get(message, "result.data.value.TxResult.result.log") currentTime := time.Now() if currentTx.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02 03:04:05 PM")+"\n"+currentTx.String())); err != nil { panic(err) } } } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='Tx'\"], \"id\": 2 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeTransactions(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // writeBlocks writes the latest Block to the blocksWidget. // Exits when the context expires. func writeBlocks(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentBlock := gjson.Get(message, "result.data.value.block.header.height") if currentBlock.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", "Latest block height "+currentBlock.String())); err != nil { panic(err) } } } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='NewBlock'\"], \"id\": 1 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeBlocks(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // writeValidators writes the current validator set to the validatoWidget // Exits when the context expires. func writeValidators(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnConnected = func(socket gowebsocket.Socket) { validators := gjson.Get(getFromRPC("validators"), "result.validators") t.Reset() i := 1 validators.ForEach(func(key, validator gjson.Result) bool { ta := table.NewWriter() ta.AppendRow([]interface{}{fmt.Sprintf("%d", i), validator.Get("address").String(), validator.Get("voting_power").String()}) if err := t.Write(fmt.Sprintf("%s\n", ta.Render())); err != nil { panic(err) } i++ return true // keep iterating }) } socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { validators := gjson.Get(getFromRPC("validators"), "result.validators") t.Reset() i := 1 validators.ForEach(func(key, validator gjson.Result) bool { ta := table.NewWriter() ta.AppendRow([]interface{}{fmt.Sprintf("%d", i), validator.Get("address").String(), validator.Get("voting_power").String()}) if err := t.Write(fmt.Sprintf("%s\n", ta.Render())); err != nil { panic(err) } i++ return true // keep iterating }) } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='ValidatorSetUpdates'\"], \"id\": 3 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeValidators(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // syncGauge displays the syncing status in the syncWidget // Exits when the context expires. func syncGauge(ctx context.Context, g *gauge.Gauge, blockHeight int64) { var progress int64 = 0 ticker := time.NewTicker(1000 * time.Millisecond) defer ticker.Stop() for { select { case <-ticker.C: maxHeight := gjson.Get(getTendermintRPC("consensus_state"), "result.round_state.height/round/step").String() maxHeightOnly := strings.Split(maxHeight, "/")[0] n, err := strconv.ParseInt(maxHeightOnly, 10, 64) if err != nil { panic(err) } progress = (blockHeight / n) * 100 if err := g.Absolute(int(progress), 100); err != nil { panic(err) } case <-ctx.Done(): return } } } // byteCountDecimal calculates bytes integer to a human readable decimal number func byteCountDecimal(b int64) string { const unit = 1000 if b < unit { return fmt.Sprintf("%d B", b) } div, exp := int64(unit), 0 for n := b / unit; n >= unit; n /= unit { div *= unit exp++ } return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "kMGTPE"[exp]) } func view() { api := new(ga.API) api.ContentType = "application/x-www-form-urlencoded" client := new(ga.Client) client.ProtocolVersion = "1" client.ClientID = uuid.New().String() client.TrackingID = "UA-183957259-1" client.HitType = "event" client.DocumentLocationURL = "https://github.com/cosmos/gex" client.DocumentTitle = "Dashboard" client.DocumentEncoding = "UTF-8" client.EventCategory = "Start" client.EventAction = "Dashboard" client.EventLabel = "start" api.Send(client) }
}
random_line_split
main.go
// Copyright 2018 Goole Inc. // Copyright 2020 Tobias Schwarz // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Binary explorer demo. Displays widgets for insights of blockchain behaviour. // Exist when 'q' or 'esc' is pressed. package main import ( "context" "flag" "fmt" "strconv" "strings" "time" "log" "gopkg.in/resty.v1" "github.com/google/uuid" "github.com/jedib0t/go-pretty/table" ga "github.com/ozgur-soft/google-analytics/src" "github.com/sacOO7/gowebsocket" "github.com/tidwall/gjson" "github.com/mum4k/termdash" "github.com/mum4k/termdash/cell" "github.com/mum4k/termdash/container" "github.com/mum4k/termdash/keyboard" "github.com/mum4k/termdash/linestyle" "github.com/mum4k/termdash/terminal/termbox" "github.com/mum4k/termdash/terminal/terminalapi" "github.com/mum4k/termdash/widgets/gauge" "github.com/mum4k/termdash/widgets/text" ) const ( appRPC = "http://localhost" tendermintRPC = "https://rpc.cosmos.network/" ) var givenPort = flag.String("p", "26657", "port to connect to as a string") func main() { view() connectionSignal := make(chan string) t, err := termbox.New() if err != nil { panic(err) } defer t.Close() flag.Parse() networkInfo := getFromRPC("status") networkStatus := gjson.Parse(networkInfo) if !networkStatus.Exists() { panic("Application not running on localhost:" + fmt.Sprintf("%s", *givenPort)) } ctx, cancel := context.WithCancel(context.Background()) // Blocks parsing widget blocksWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := blocksWidget.Write("Latest block height " + networkStatus.Get("result.sync_info.latest_block_height").String() + "\n"); err != nil { panic(err) } // Transaction parsing widget transactionWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := transactionWidget.Write("Transactions will appear as soon as they are confirmed in a block.\n\n"); err != nil { panic(err) } validatorWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := validatorWidget.Write("List available validators.\n\n"); err != nil { panic(err) } peerWidget, err := text.New() if err != nil { panic(err) } if err := peerWidget.Write("0"); err != nil { panic(err) } healthWidget, err := text.New() if err != nil { panic(err) } if err := healthWidget.Write("🔴 no connection"); err != nil { panic(err) } timeWidget, err := text.New() if err != nil { panic(err) } currentTime := time.Now() if err := timeWidget.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } maxBlocksizeWidget, err := text.New() maxBlockSize := gjson.Get(getFromRPC("consensus_params"), "result.consensus_params.block.max_bytes").Int() if err != nil { panic(err) } if err := maxBlocksizeWidget.Write(fmt.Sprintf("%s", byteCountDecimal(maxBlockSize))); err != nil { panic(err) } // system powered widgets go writeTime(ctx, timeWidget, 1*time.Second) // rpc widgets go writePeers(ctx, peerWidget, 1*time.Second) go writeHealth(ctx, healthWidget, 500*time.Millisecond, connectionSignal) // websocket powered widgets go writeValidators(ctx, validatorWidget, connectionSignal) go writeBlocks(ctx, blocksWidget, connectionSignal) go writeTransactions(ctx, transactionWidget, connectionSignal) // blockchain download gauge syncWidget, err := gauge.New( gauge.Height(1), gauge.Color(cell.ColorBlue), gauge.Border(linestyle.Light), gauge.BorderTitle("Blockchain download %"), ) if err != nil { panic(err) } if networkStatus.Get("result.sync_info.catching_up").String() == "false" { if err := syncWidget.Absolute(100, 100); err != nil { panic(err) } } else { if networkStatus.Get("result.node_info.network").String() == "cosmoshub-3" { go syncGauge(ctx, syncWidget, networkStatus.Get("result.sync_info.latest_block_height").Int()) } else { // There is no way to detect maximum height in the network via RPC or websocket yet if err := syncWidget.Absolute(70, 100); err != nil { panic(err) } } } // Draw Dashboard c, err := container.New( t, container.Border(linestyle.Light), container.BorderTitle("PRESS Q or ESC TO QUIT | Network "+networkStatus.Get("result.node_info.network").String()+" Version "+networkStatus.Get("result.node_info.version").String()), container.BorderColor(cell.ColorNumber(2)), container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Health"), container.PlaceWidget(healthWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("System Time"), container.PlaceWidget(timeWidget), ), ), ), container.Right( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Max Block Size"), container.PlaceWidget(maxBlocksizeWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Connected Peers"), container.PlaceWidget(peerWidget), ), ), ), ), ), container.Bottom( container.PlaceWidget(syncWidget), ), ), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Validators"), container.PlaceWidget(validatorWidget), ), ), ), container.Bottom( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Latest Blocks"), container.PlaceWidget(blocksWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Latest Confirmed Transactions"), container.PlaceWidget(transactionWidget), ), ), ), ), ) if err != nil { panic(err) } quitter := func(k *terminalapi.Keyboard) { if k.Key == 'q' || k.Key == 'Q' || k.Key == keyboard.KeyEsc { cancel() } } if err := termdash.Run(ctx, t, c, termdash.KeyboardSubscriber(quitter)); err != nil { panic(err) } } func getFromRPC(endpoint string) string { port := *givenPort resp, _ := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(appRPC + ":" + port + "/" + endpoint) return resp.String() } func getTendermintRPC(endpoint string) string { resp, err := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(tendermintRPC + endpoint) if err != nil { panic(err) } return resp.String() } // writeTime writes the current system time to the timeWidget. // Exits when the context expires. func writeTime(ctx context.Context, t *text.Text, delay time.Duration) { ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: currentTime := time.Now() t.Reset() if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } case <-ctx.Done(): return } } } // writeHealth writes the status to the healthWidget. // Exits when the context expires. func writeHealth(ctx context.Context, t *text.Text, delay time.Duration, connectionSignal chan string) { reconnect := false health := gjson.Get(getFromRPC("health"), "result") t.Reset() if health.Exists() { t.Write("🟢 good") } else { t.Write("🔴 no connection") } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: health := gjson.Get(getFromRPC("health"), "result") if health.Exists() { t.Write("🟢 good") if reconnect == true { connectionSignal <- "reconnect" connectionSignal <- "reconnect" connectionSignal <- "reconnect" reconnect = false } } else { t.Write("🔴 no connection") if reconnect == false { connectionSignal <- "no_connection" connectionSignal <- "no_connection" connectionSignal <- "no_connection" reconnect = true } } case <-ctx.Done(): return } } } // writePeers writes the connected Peers to the peerWidget. // Exits when the context expires. func writePeers(ctx
ntext, t *text.Text, delay time.Duration) { peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() t.Reset() if peers != "" { t.Write(peers) } if err := t.Write(peers); err != nil { panic(err) } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: t.Reset() peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() if peers != "" { t.Write(peers) } case <-ctx.Done(): return } } } // writeTransactions writes the latest Transactions to the transactionsWidget. // Exits when the context expires. func writeTransactions(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentTx := gjson.Get(message, "result.data.value.TxResult.result.log") currentTime := time.Now() if currentTx.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02 03:04:05 PM")+"\n"+currentTx.String())); err != nil { panic(err) } } } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='Tx'\"], \"id\": 2 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeTransactions(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // writeBlocks writes the latest Block to the blocksWidget. // Exits when the context expires. func writeBlocks(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentBlock := gjson.Get(message, "result.data.value.block.header.height") if currentBlock.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", "Latest block height "+currentBlock.String())); err != nil { panic(err) } } } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='NewBlock'\"], \"id\": 1 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeBlocks(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // writeValidators writes the current validator set to the validatoWidget // Exits when the context expires. func writeValidators(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnConnected = func(socket gowebsocket.Socket) { validators := gjson.Get(getFromRPC("validators"), "result.validators") t.Reset() i := 1 validators.ForEach(func(key, validator gjson.Result) bool { ta := table.NewWriter() ta.AppendRow([]interface{}{fmt.Sprintf("%d", i), validator.Get("address").String(), validator.Get("voting_power").String()}) if err := t.Write(fmt.Sprintf("%s\n", ta.Render())); err != nil { panic(err) } i++ return true // keep iterating }) } socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { validators := gjson.Get(getFromRPC("validators"), "result.validators") t.Reset() i := 1 validators.ForEach(func(key, validator gjson.Result) bool { ta := table.NewWriter() ta.AppendRow([]interface{}{fmt.Sprintf("%d", i), validator.Get("address").String(), validator.Get("voting_power").String()}) if err := t.Write(fmt.Sprintf("%s\n", ta.Render())); err != nil { panic(err) } i++ return true // keep iterating }) } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='ValidatorSetUpdates'\"], \"id\": 3 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeValidators(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // syncGauge displays the syncing status in the syncWidget // Exits when the context expires. func syncGauge(ctx context.Context, g *gauge.Gauge, blockHeight int64) { var progress int64 = 0 ticker := time.NewTicker(1000 * time.Millisecond) defer ticker.Stop() for { select { case <-ticker.C: maxHeight := gjson.Get(getTendermintRPC("consensus_state"), "result.round_state.height/round/step").String() maxHeightOnly := strings.Split(maxHeight, "/")[0] n, err := strconv.ParseInt(maxHeightOnly, 10, 64) if err != nil { panic(err) } progress = (blockHeight / n) * 100 if err := g.Absolute(int(progress), 100); err != nil { panic(err) } case <-ctx.Done(): return } } } // byteCountDecimal calculates bytes integer to a human readable decimal number func byteCountDecimal(b int64) string { const unit = 1000 if b < unit { return fmt.Sprintf("%d B", b) } div, exp := int64(unit), 0 for n := b / unit; n >= unit; n /= unit { div *= unit exp++ } return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "kMGTPE"[exp]) } func view() { api := new(ga.API) api.ContentType = "application/x-www-form-urlencoded" client := new(ga.Client) client.ProtocolVersion = "1" client.ClientID = uuid.New().String() client.TrackingID = "UA-183957259-1" client.HitType = "event" client.DocumentLocationURL = "https://github.com/cosmos/gex" client.DocumentTitle = "Dashboard" client.DocumentEncoding = "UTF-8" client.EventCategory = "Start" client.EventAction = "Dashboard" client.EventLabel = "start" api.Send(client) }
context.Co
identifier_name
main.go
// Copyright 2018 Goole Inc. // Copyright 2020 Tobias Schwarz // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Binary explorer demo. Displays widgets for insights of blockchain behaviour. // Exist when 'q' or 'esc' is pressed. package main import ( "context" "flag" "fmt" "strconv" "strings" "time" "log" "gopkg.in/resty.v1" "github.com/google/uuid" "github.com/jedib0t/go-pretty/table" ga "github.com/ozgur-soft/google-analytics/src" "github.com/sacOO7/gowebsocket" "github.com/tidwall/gjson" "github.com/mum4k/termdash" "github.com/mum4k/termdash/cell" "github.com/mum4k/termdash/container" "github.com/mum4k/termdash/keyboard" "github.com/mum4k/termdash/linestyle" "github.com/mum4k/termdash/terminal/termbox" "github.com/mum4k/termdash/terminal/terminalapi" "github.com/mum4k/termdash/widgets/gauge" "github.com/mum4k/termdash/widgets/text" ) const ( appRPC = "http://localhost" tendermintRPC = "https://rpc.cosmos.network/" ) var givenPort = flag.String("p", "26657", "port to connect to as a string") func main()
unc getFromRPC(endpoint string) string { port := *givenPort resp, _ := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(appRPC + ":" + port + "/" + endpoint) return resp.String() } func getTendermintRPC(endpoint string) string { resp, err := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(tendermintRPC + endpoint) if err != nil { panic(err) } return resp.String() } // writeTime writes the current system time to the timeWidget. // Exits when the context expires. func writeTime(ctx context.Context, t *text.Text, delay time.Duration) { ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: currentTime := time.Now() t.Reset() if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } case <-ctx.Done(): return } } } // writeHealth writes the status to the healthWidget. // Exits when the context expires. func writeHealth(ctx context.Context, t *text.Text, delay time.Duration, connectionSignal chan string) { reconnect := false health := gjson.Get(getFromRPC("health"), "result") t.Reset() if health.Exists() { t.Write("🟢 good") } else { t.Write("🔴 no connection") } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: health := gjson.Get(getFromRPC("health"), "result") if health.Exists() { t.Write("🟢 good") if reconnect == true { connectionSignal <- "reconnect" connectionSignal <- "reconnect" connectionSignal <- "reconnect" reconnect = false } } else { t.Write("🔴 no connection") if reconnect == false { connectionSignal <- "no_connection" connectionSignal <- "no_connection" connectionSignal <- "no_connection" reconnect = true } } case <-ctx.Done(): return } } } // writePeers writes the connected Peers to the peerWidget. // Exits when the context expires. func writePeers(ctx context.Context, t *text.Text, delay time.Duration) { peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() t.Reset() if peers != "" { t.Write(peers) } if err := t.Write(peers); err != nil { panic(err) } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: t.Reset() peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() if peers != "" { t.Write(peers) } case <-ctx.Done(): return } } } // writeTransactions writes the latest Transactions to the transactionsWidget. // Exits when the context expires. func writeTransactions(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentTx := gjson.Get(message, "result.data.value.TxResult.result.log") currentTime := time.Now() if currentTx.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02 03:04:05 PM")+"\n"+currentTx.String())); err != nil { panic(err) } } } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='Tx'\"], \"id\": 2 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeTransactions(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // writeBlocks writes the latest Block to the blocksWidget. // Exits when the context expires. func writeBlocks(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentBlock := gjson.Get(message, "result.data.value.block.header.height") if currentBlock.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", "Latest block height "+currentBlock.String())); err != nil { panic(err) } } } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='NewBlock'\"], \"id\": 1 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeBlocks(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // writeValidators writes the current validator set to the validatoWidget // Exits when the context expires. func writeValidators(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnConnected = func(socket gowebsocket.Socket) { validators := gjson.Get(getFromRPC("validators"), "result.validators") t.Reset() i := 1 validators.ForEach(func(key, validator gjson.Result) bool { ta := table.NewWriter() ta.AppendRow([]interface{}{fmt.Sprintf("%d", i), validator.Get("address").String(), validator.Get("voting_power").String()}) if err := t.Write(fmt.Sprintf("%s\n", ta.Render())); err != nil { panic(err) } i++ return true // keep iterating }) } socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { validators := gjson.Get(getFromRPC("validators"), "result.validators") t.Reset() i := 1 validators.ForEach(func(key, validator gjson.Result) bool { ta := table.NewWriter() ta.AppendRow([]interface{}{fmt.Sprintf("%d", i), validator.Get("address").String(), validator.Get("voting_power").String()}) if err := t.Write(fmt.Sprintf("%s\n", ta.Render())); err != nil { panic(err) } i++ return true // keep iterating }) } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='ValidatorSetUpdates'\"], \"id\": 3 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeValidators(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // syncGauge displays the syncing status in the syncWidget // Exits when the context expires. func syncGauge(ctx context.Context, g *gauge.Gauge, blockHeight int64) { var progress int64 = 0 ticker := time.NewTicker(1000 * time.Millisecond) defer ticker.Stop() for { select { case <-ticker.C: maxHeight := gjson.Get(getTendermintRPC("consensus_state"), "result.round_state.height/round/step").String() maxHeightOnly := strings.Split(maxHeight, "/")[0] n, err := strconv.ParseInt(maxHeightOnly, 10, 64) if err != nil { panic(err) } progress = (blockHeight / n) * 100 if err := g.Absolute(int(progress), 100); err != nil { panic(err) } case <-ctx.Done(): return } } } // byteCountDecimal calculates bytes integer to a human readable decimal number func byteCountDecimal(b int64) string { const unit = 1000 if b < unit { return fmt.Sprintf("%d B", b) } div, exp := int64(unit), 0 for n := b / unit; n >= unit; n /= unit { div *= unit exp++ } return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "kMGTPE"[exp]) } func view() { api := new(ga.API) api.ContentType = "application/x-www-form-urlencoded" client := new(ga.Client) client.ProtocolVersion = "1" client.ClientID = uuid.New().String() client.TrackingID = "UA-183957259-1" client.HitType = "event" client.DocumentLocationURL = "https://github.com/cosmos/gex" client.DocumentTitle = "Dashboard" client.DocumentEncoding = "UTF-8" client.EventCategory = "Start" client.EventAction = "Dashboard" client.EventLabel = "start" api.Send(client) }
{ view() connectionSignal := make(chan string) t, err := termbox.New() if err != nil { panic(err) } defer t.Close() flag.Parse() networkInfo := getFromRPC("status") networkStatus := gjson.Parse(networkInfo) if !networkStatus.Exists() { panic("Application not running on localhost:" + fmt.Sprintf("%s", *givenPort)) } ctx, cancel := context.WithCancel(context.Background()) // Blocks parsing widget blocksWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := blocksWidget.Write("Latest block height " + networkStatus.Get("result.sync_info.latest_block_height").String() + "\n"); err != nil { panic(err) } // Transaction parsing widget transactionWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := transactionWidget.Write("Transactions will appear as soon as they are confirmed in a block.\n\n"); err != nil { panic(err) } validatorWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := validatorWidget.Write("List available validators.\n\n"); err != nil { panic(err) } peerWidget, err := text.New() if err != nil { panic(err) } if err := peerWidget.Write("0"); err != nil { panic(err) } healthWidget, err := text.New() if err != nil { panic(err) } if err := healthWidget.Write("🔴 no connection"); err != nil { panic(err) } timeWidget, err := text.New() if err != nil { panic(err) } currentTime := time.Now() if err := timeWidget.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } maxBlocksizeWidget, err := text.New() maxBlockSize := gjson.Get(getFromRPC("consensus_params"), "result.consensus_params.block.max_bytes").Int() if err != nil { panic(err) } if err := maxBlocksizeWidget.Write(fmt.Sprintf("%s", byteCountDecimal(maxBlockSize))); err != nil { panic(err) } // system powered widgets go writeTime(ctx, timeWidget, 1*time.Second) // rpc widgets go writePeers(ctx, peerWidget, 1*time.Second) go writeHealth(ctx, healthWidget, 500*time.Millisecond, connectionSignal) // websocket powered widgets go writeValidators(ctx, validatorWidget, connectionSignal) go writeBlocks(ctx, blocksWidget, connectionSignal) go writeTransactions(ctx, transactionWidget, connectionSignal) // blockchain download gauge syncWidget, err := gauge.New( gauge.Height(1), gauge.Color(cell.ColorBlue), gauge.Border(linestyle.Light), gauge.BorderTitle("Blockchain download %"), ) if err != nil { panic(err) } if networkStatus.Get("result.sync_info.catching_up").String() == "false" { if err := syncWidget.Absolute(100, 100); err != nil { panic(err) } } else { if networkStatus.Get("result.node_info.network").String() == "cosmoshub-3" { go syncGauge(ctx, syncWidget, networkStatus.Get("result.sync_info.latest_block_height").Int()) } else { // There is no way to detect maximum height in the network via RPC or websocket yet if err := syncWidget.Absolute(70, 100); err != nil { panic(err) } } } // Draw Dashboard c, err := container.New( t, container.Border(linestyle.Light), container.BorderTitle("PRESS Q or ESC TO QUIT | Network "+networkStatus.Get("result.node_info.network").String()+" Version "+networkStatus.Get("result.node_info.version").String()), container.BorderColor(cell.ColorNumber(2)), container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Health"), container.PlaceWidget(healthWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("System Time"), container.PlaceWidget(timeWidget), ), ), ), container.Right( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Max Block Size"), container.PlaceWidget(maxBlocksizeWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Connected Peers"), container.PlaceWidget(peerWidget), ), ), ), ), ), container.Bottom( container.PlaceWidget(syncWidget), ), ), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Validators"), container.PlaceWidget(validatorWidget), ), ), ), container.Bottom( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Latest Blocks"), container.PlaceWidget(blocksWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Latest Confirmed Transactions"), container.PlaceWidget(transactionWidget), ), ), ), ), ) if err != nil { panic(err) } quitter := func(k *terminalapi.Keyboard) { if k.Key == 'q' || k.Key == 'Q' || k.Key == keyboard.KeyEsc { cancel() } } if err := termdash.Run(ctx, t, c, termdash.KeyboardSubscriber(quitter)); err != nil { panic(err) } } f
identifier_body
main.go
// Copyright 2018 Goole Inc. // Copyright 2020 Tobias Schwarz // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Binary explorer demo. Displays widgets for insights of blockchain behaviour. // Exist when 'q' or 'esc' is pressed. package main import ( "context" "flag" "fmt" "strconv" "strings" "time" "log" "gopkg.in/resty.v1" "github.com/google/uuid" "github.com/jedib0t/go-pretty/table" ga "github.com/ozgur-soft/google-analytics/src" "github.com/sacOO7/gowebsocket" "github.com/tidwall/gjson" "github.com/mum4k/termdash" "github.com/mum4k/termdash/cell" "github.com/mum4k/termdash/container" "github.com/mum4k/termdash/keyboard" "github.com/mum4k/termdash/linestyle" "github.com/mum4k/termdash/terminal/termbox" "github.com/mum4k/termdash/terminal/terminalapi" "github.com/mum4k/termdash/widgets/gauge" "github.com/mum4k/termdash/widgets/text" ) const ( appRPC = "http://localhost" tendermintRPC = "https://rpc.cosmos.network/" ) var givenPort = flag.String("p", "26657", "port to connect to as a string") func main() { view() connectionSignal := make(chan string) t, err := termbox.New() if err != nil { panic(err) } defer t.Close() flag.Parse() networkInfo := getFromRPC("status") networkStatus := gjson.Parse(networkInfo) if !networkStatus.Exists() { panic("Application not running on localhost:" + fmt.Sprintf("%s", *givenPort)) } ctx, cancel := context.WithCancel(context.Background()) // Blocks parsing widget blocksWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := blocksWidget.Write("Latest block height " + networkStatus.Get("result.sync_info.latest_block_height").String() + "\n"); err != nil { panic(err) } // Transaction parsing widget transactionWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := transactionWidget.Write("Transactions will appear as soon as they are confirmed in a block.\n\n"); err != nil { panic(err) } validatorWidget, err := text.New(text.RollContent(), text.WrapAtWords()) if err != nil { panic(err) } if err := validatorWidget.Write("List available validators.\n\n"); err != nil { panic(err) } peerWidget, err := text.New() if err != nil { panic(err) } if err := peerWidget.Write("0"); err != nil { panic(err) } healthWidget, err := text.New() if err != nil { panic(err) } if err := healthWidget.Write("🔴 no connection"); err != nil { panic(err) } timeWidget, err := text.New() if err != nil { panic(err) } currentTime := time.Now() if err := timeWidget.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } maxBlocksizeWidget, err := text.New() maxBlockSize := gjson.Get(getFromRPC("consensus_params"), "result.consensus_params.block.max_bytes").Int() if err != nil { panic(err) } if err := maxBlocksizeWidget.Write(fmt.Sprintf("%s", byteCountDecimal(maxBlockSize))); err != nil { panic(err) } // system powered widgets go writeTime(ctx, timeWidget, 1*time.Second) // rpc widgets go writePeers(ctx, peerWidget, 1*time.Second) go writeHealth(ctx, healthWidget, 500*time.Millisecond, connectionSignal) // websocket powered widgets go writeValidators(ctx, validatorWidget, connectionSignal) go writeBlocks(ctx, blocksWidget, connectionSignal) go writeTransactions(ctx, transactionWidget, connectionSignal) // blockchain download gauge syncWidget, err := gauge.New( gauge.Height(1), gauge.Color(cell.ColorBlue), gauge.Border(linestyle.Light), gauge.BorderTitle("Blockchain download %"), ) if err != nil { panic(err) } if networkStatus.Get("result.sync_info.catching_up").String() == "false" { if err := syncWidget.Absolute(100, 100); err != nil { panic(err) } } else { if networkStatus.Get("result.node_info.network").String() == "cosmoshub-3" { go syncGauge(ctx, syncWidget, networkStatus.Get("result.sync_info.latest_block_height").Int()) } else { // There is no way to detect maximum height in the network via RPC or websocket yet if err := syncWidget.Absolute(70, 100); err != nil { panic(err) } } } // Draw Dashboard c, err := container.New( t, container.Border(linestyle.Light), container.BorderTitle("PRESS Q or ESC TO QUIT | Network "+networkStatus.Get("result.node_info.network").String()+" Version "+networkStatus.Get("result.node_info.version").String()), container.BorderColor(cell.ColorNumber(2)), container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitHorizontal( container.Top( container.SplitVertical( container.Left( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Health"), container.PlaceWidget(healthWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("System Time"), container.PlaceWidget(timeWidget), ), ), ), container.Right( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Max Block Size"), container.PlaceWidget(maxBlocksizeWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Connected Peers"), container.PlaceWidget(peerWidget), ), ), ), ), ), container.Bottom( container.PlaceWidget(syncWidget), ), ), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Validators"), container.PlaceWidget(validatorWidget), ), ), ), container.Bottom( container.SplitVertical( container.Left( container.Border(linestyle.Light), container.BorderTitle("Latest Blocks"), container.PlaceWidget(blocksWidget), ), container.Right( container.Border(linestyle.Light), container.BorderTitle("Latest Confirmed Transactions"), container.PlaceWidget(transactionWidget), ), ), ), ), ) if err != nil { panic(err) } quitter := func(k *terminalapi.Keyboard) { if k.Key == 'q' || k.Key == 'Q' || k.Key == keyboard.KeyEsc { cancel() } } if err := termdash.Run(ctx, t, c, termdash.KeyboardSubscriber(quitter)); err != nil { panic(err) } } func getFromRPC(endpoint string) string { port := *givenPort resp, _ := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(appRPC + ":" + port + "/" + endpoint) return resp.String() } func getTendermintRPC(endpoint string) string { resp, err := resty.R(). SetHeader("Cache-Control", "no-cache"). SetHeader("Content-Type", "application/json"). Get(tendermintRPC + endpoint) if err != nil {
return resp.String() } // writeTime writes the current system time to the timeWidget. // Exits when the context expires. func writeTime(ctx context.Context, t *text.Text, delay time.Duration) { ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: currentTime := time.Now() t.Reset() if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02\n03:04:05 PM"))); err != nil { panic(err) } case <-ctx.Done(): return } } } // writeHealth writes the status to the healthWidget. // Exits when the context expires. func writeHealth(ctx context.Context, t *text.Text, delay time.Duration, connectionSignal chan string) { reconnect := false health := gjson.Get(getFromRPC("health"), "result") t.Reset() if health.Exists() { t.Write("🟢 good") } else { t.Write("🔴 no connection") } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: health := gjson.Get(getFromRPC("health"), "result") if health.Exists() { t.Write("🟢 good") if reconnect == true { connectionSignal <- "reconnect" connectionSignal <- "reconnect" connectionSignal <- "reconnect" reconnect = false } } else { t.Write("🔴 no connection") if reconnect == false { connectionSignal <- "no_connection" connectionSignal <- "no_connection" connectionSignal <- "no_connection" reconnect = true } } case <-ctx.Done(): return } } } // writePeers writes the connected Peers to the peerWidget. // Exits when the context expires. func writePeers(ctx context.Context, t *text.Text, delay time.Duration) { peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() t.Reset() if peers != "" { t.Write(peers) } if err := t.Write(peers); err != nil { panic(err) } ticker := time.NewTicker(delay) defer ticker.Stop() for { select { case <-ticker.C: t.Reset() peers := gjson.Get(getFromRPC("net_info"), "result.n_peers").String() if peers != "" { t.Write(peers) } case <-ctx.Done(): return } } } // writeTransactions writes the latest Transactions to the transactionsWidget. // Exits when the context expires. func writeTransactions(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentTx := gjson.Get(message, "result.data.value.TxResult.result.log") currentTime := time.Now() if currentTx.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", currentTime.Format("2006-01-02 03:04:05 PM")+"\n"+currentTx.String())); err != nil { panic(err) } } } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='Tx'\"], \"id\": 2 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeTransactions(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // writeBlocks writes the latest Block to the blocksWidget. // Exits when the context expires. func writeBlocks(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { currentBlock := gjson.Get(message, "result.data.value.block.header.height") if currentBlock.String() != "" { if err := t.Write(fmt.Sprintf("%s\n", "Latest block height "+currentBlock.String())); err != nil { panic(err) } } } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='NewBlock'\"], \"id\": 1 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeBlocks(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // writeValidators writes the current validator set to the validatoWidget // Exits when the context expires. func writeValidators(ctx context.Context, t *text.Text, connectionSignal <-chan string) { port := *givenPort socket := gowebsocket.New("ws://localhost:" + port + "/websocket") socket.OnConnected = func(socket gowebsocket.Socket) { validators := gjson.Get(getFromRPC("validators"), "result.validators") t.Reset() i := 1 validators.ForEach(func(key, validator gjson.Result) bool { ta := table.NewWriter() ta.AppendRow([]interface{}{fmt.Sprintf("%d", i), validator.Get("address").String(), validator.Get("voting_power").String()}) if err := t.Write(fmt.Sprintf("%s\n", ta.Render())); err != nil { panic(err) } i++ return true // keep iterating }) } socket.OnTextMessage = func(message string, socket gowebsocket.Socket) { validators := gjson.Get(getFromRPC("validators"), "result.validators") t.Reset() i := 1 validators.ForEach(func(key, validator gjson.Result) bool { ta := table.NewWriter() ta.AppendRow([]interface{}{fmt.Sprintf("%d", i), validator.Get("address").String(), validator.Get("voting_power").String()}) if err := t.Write(fmt.Sprintf("%s\n", ta.Render())); err != nil { panic(err) } i++ return true // keep iterating }) } socket.Connect() socket.SendText("{ \"jsonrpc\": \"2.0\", \"method\": \"subscribe\", \"params\": [\"tm.event='ValidatorSetUpdates'\"], \"id\": 3 }") for { select { case s := <-connectionSignal: if s == "no_connection" { socket.Close() } if s == "reconnect" { writeValidators(ctx, t, connectionSignal) } case <-ctx.Done(): log.Println("interrupt") socket.Close() return } } } // syncGauge displays the syncing status in the syncWidget // Exits when the context expires. func syncGauge(ctx context.Context, g *gauge.Gauge, blockHeight int64) { var progress int64 = 0 ticker := time.NewTicker(1000 * time.Millisecond) defer ticker.Stop() for { select { case <-ticker.C: maxHeight := gjson.Get(getTendermintRPC("consensus_state"), "result.round_state.height/round/step").String() maxHeightOnly := strings.Split(maxHeight, "/")[0] n, err := strconv.ParseInt(maxHeightOnly, 10, 64) if err != nil { panic(err) } progress = (blockHeight / n) * 100 if err := g.Absolute(int(progress), 100); err != nil { panic(err) } case <-ctx.Done(): return } } } // byteCountDecimal calculates bytes integer to a human readable decimal number func byteCountDecimal(b int64) string { const unit = 1000 if b < unit { return fmt.Sprintf("%d B", b) } div, exp := int64(unit), 0 for n := b / unit; n >= unit; n /= unit { div *= unit exp++ } return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "kMGTPE"[exp]) } func view() { api := new(ga.API) api.ContentType = "application/x-www-form-urlencoded" client := new(ga.Client) client.ProtocolVersion = "1" client.ClientID = uuid.New().String() client.TrackingID = "UA-183957259-1" client.HitType = "event" client.DocumentLocationURL = "https://github.com/cosmos/gex" client.DocumentTitle = "Dashboard" client.DocumentEncoding = "UTF-8" client.EventCategory = "Start" client.EventAction = "Dashboard" client.EventLabel = "start" api.Send(client) }
panic(err) }
conditional_block
shared.rs
/* Copyright 2016 Torbjørn Birch Moltu * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use Nbstr; use nbstr::{MAX_LENGTH,MAX_STACK,LITERAL,BOX}; extern crate std; use std::cmp::Ordering; use std::ops::Deref; use std::str as Str; use std::{mem,slice,ptr, fmt,hash}; use std::borrow::{Borrow,Cow}; /// Protected methods used by the impls below. pub trait Protected { /// create new of this variant with possibly uninitialized data fn new(u8) -> Self; /// store this str, which is either &'static or boxed fn with_pointer(u8, &str) -> Self; fn variant(&self) -> u8; /// get the area of self where (length,pointer)|inline is. fn data(&mut self) -> &mut [u8]; /// the root of AsRef,Borrow and Deref. fn get_slice(&self) -> &[u8]; } //////////////////// // public methods // //////////////////// impl Nbstr { #[cfg(feature="unstable")] /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. pub const MAX_LENGTH: usize = MAX_LENGTH; /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. /// /// This method will get deprecated once associated consts become stable. pub fn max_length() -> usize { MAX_LENGTH } // keeping all public methods under one impl gives cleaner rustdoc /// Create a Nbstr from a borrowed str with a limited lifetime. /// If the str is short enough it will be stored the inside struct itself and not boxed. pub fn from_str(s: &str) -> Self { Self::try_stack(s).unwrap_or_else(|| s.to_owned().into() ) } } //////////////// //Constructors// //////////////// impl Default for Nbstr { fn default() -> Self { let s = unsafe{ slice::from_raw_parts(1 as *const u8, 0) };//pointer must be nonzero Self::with_pointer(LITERAL, unsafe{ mem::transmute(s) }) } } impl From<&'static str> for Nbstr { fn from(s: &'static str) -> Self { Self::with_pointer(LITERAL, s) } } impl Nbstr { fn try_stack(s: &str) -> Option<Self> {match s.len() as u8 { // Cannot have stack str with length 0, as variant might be NonZero 0 => Some(Self::default()), 1...MAX_STACK => { let mut z = Self::new(s.len() as u8); for (d, s) in z.data().iter_mut().zip(s.bytes()) { *d = s; } Some(z) }, _ => None, }} } impl From<Box<str>> for Nbstr { fn from(s: Box<str>) -> Self { // Don't try stack; users might turn it back into a box later let z = if s.is_empty() {Self::default()}// Make it clear we don't own any memory. else {Self::with_pointer(BOX, &s)}; mem::forget(s); return z; } } impl From<String> for Nbstr { fn from(s: String) -> Self { if s.capacity() != s.len() {// into_boxed will reallocate if let Some(inline) = Self::try_stack(&s) { return inline;// and drop s } } return Self::from(s.into_boxed_str()); } } impl From<Cow<'static, str>> for Nbstr { fn from(cow: Cow<'static, str>) -> Self {match cow { Cow::Owned(owned) => Self::from(owned), Cow::Borrowed(borrowed) => Self::from(borrowed), }} } impl Clone for Nbstr { fn clone(&self) -> Self { if self.variant() == BOX {// try stack Nbstr::from_str(self.deref()) } else {// copy the un-copyable unsafe{ ptr::read(self) } } } fn clone_from(&mut self, from: &Self) { // keep existing box if possible if self.variant() == BOX && self.len() == from.len() { unsafe{ ptr::copy_nonoverlapping( from.as_ptr(), mem::transmute(self.as_ptr()), self.len() )}; } else { *self = from.clone(); } } } /////////// //Getters// /////////// impl AsRef<[u8]> for Nbstr { fn as_ref(&self) -> &[u8] { self.get_slice() } } impl AsRef<str> for Nbstr { fn as_ref(&self) -> &str { let bytes: &[u8] = self.as_ref(); unsafe{ Str::from_utf8_unchecked( bytes )} } } impl Deref for Nbstr { type Target = str; fn deref(&self) -> &Self::Target { self.as_ref() } } impl Borrow<[u8]> for Nbstr { fn borrow(&self) -> &[u8] { self.as_ref() } } impl Borrow<str> for Nbstr { fn borrow(&self) -> &str { self.as_ref() } } ///////////////// //Common traits// ///////////////// impl hash::Hash for Nbstr { fn hash<H:hash::Hasher>(&self, h: &mut H) { self.deref().hash(h); } } impl fmt::Display for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self.deref(), fmtr) } } impl PartialOrd for Nbstr { fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { self.deref().partial_cmp(rhs.deref()) } } impl Ord for Nbstr { fn cmp(&self, rhs: &Self) -> Ordering { self.deref().cmp(rhs.deref()) } } impl PartialEq for Nbstr { fn eq(&self, rhs: &Self) -> bool { self.deref() == rhs.deref() } } impl Eq for Nbstr {} /// Displays how the string is stored by prepending "stack: ", "literal: " or "boxed: ". impl fmt::Debug for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { write!(fmtr, "{}: {}", match self.variant() { 1...MAX_STACK => "stack", LITERAL => "literal", BOX => "boxed", _ => unreachable!("Unknown variant of Nbstr: {}", self.variant()) }, self.deref()) } } /////////////// //Destructors// /////////////// /// Returns Some if z contains a Box pub fn take_box(z: &mut Nbstr) -> Option<Box<str>> { if z.variant() == BOX { // I asked on #rust, and transmuting from & to mut is apparently undefined behaviour. // Is it really in this case? let s: *mut str = unsafe{ mem::transmute(z.get_slice()) }; // Cannot just assign default; then rust tries to drop the previous value! // .. which then calls this function. mem::forget(mem::replace(z, Nbstr::default())); Some(unsafe{ Box::from_raw(s) }) } else { None } } impl From<Nbstr> for Box<str> { fn from(mut z: Nbstr) -> Box<str> { take_box(&mut z).unwrap_or_else(|| z.deref().to_owned().into_boxed_str() ) } } impl From<Nbstr> for String { fn from(mut z: Nbstr) -> String { take_box(&mut z) .map(|b| b.into_string() ) .unwrap_or_else(|| z.deref().to_owned() ) } } impl From<Nbstr> for Cow<'static, str> { fn from(mut z: Nbstr) -> Cow<'static, str> { take_box(&mut z) .map(|b| Cow::from(b.into_string()) ) .unwrap_or_else(|| if z.variant() == LITERAL { let s: &'static str = unsafe{ mem::transmute(z.deref()) }; Cow::from(s) } else { Cow::from(z.deref().to_owned()) } ) } } #[cfg(not(test))]// Bugs in drop might cause stack overflow in suprising places. // The tests below should catch said bugs. impl Drop for Nbstr { fn drop(&mut self) { let _ = take_box(self); } } ////////////////////////////////////////////////////////////////////// // Tests that need private access or tests code that use cfg!(test) // ////////////////////////////////////////////////////////////////////// #[cfg(test)] mod tests { use nbstr::{Nbstr, MAX_STACK}; use super::*; use std::ops::Deref; use std::str as Str; use std::{mem,slice}; const STR: &'static str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; #[test] fn literal() { let mut z = Nbstr::from(STR); assert_eq!(z.deref().len(), STR.len()); assert_eq!(z.deref().as_ptr(), STR.as_ptr()); assert_eq!(z.deref(), STR); assert_eq!(take_box(&mut z), None); } #[test] fn stack() { let s = "abc"; let mut z = Nbstr::from_str(s); assert_eq!(z.deref().len(), s.len()); assert_eq!(z.deref().as_ptr() as usize, z.data().as_ptr() as usize); assert_eq!(z.deref(), s); assert_eq!(take_box(&mut z), None); } #[test]
let b2 = b.clone(); let mut z = Nbstr::from(b); assert_eq!(z.deref().len(), len); assert_eq!(z.deref().as_ptr(), ptr); assert_eq!(z.deref(), STR); assert_eq!(take_box(&mut z), Some(b2.clone())); assert_eq!(take_box(&mut Nbstr::from_str(STR)), Some(b2.clone())); } #[test] fn nuls() {// Is here because MAX_STACK let zeros_bytes = [0; MAX_STACK as usize]; let zeros_str = Str::from_utf8(&zeros_bytes).unwrap(); let zeros = Nbstr::from_str(zeros_str); assert_eq!(zeros.deref(), zeros_str); assert!(Some(zeros).is_some()); } #[test] #[cfg_attr(debug_assertions, should_panic)]// from arithmetic overflow or explicit panic fn too_long() {// Is here because no_giants has a custom panic message for tests, // because the normal one would segfault on the invalid test str. let b: &[u8] = unsafe{slice::from_raw_parts(1 as *const u8, 1+Nbstr::max_length() )}; let s: &'static str = unsafe{ mem::transmute(b) }; mem::forget(Nbstr::from(s)); } }
fn boxed() { let b: Box<str> = STR.to_string().into_boxed_str(); let len = b.len(); let ptr = b.as_ptr();
random_line_split
shared.rs
/* Copyright 2016 Torbjørn Birch Moltu * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use Nbstr; use nbstr::{MAX_LENGTH,MAX_STACK,LITERAL,BOX}; extern crate std; use std::cmp::Ordering; use std::ops::Deref; use std::str as Str; use std::{mem,slice,ptr, fmt,hash}; use std::borrow::{Borrow,Cow}; /// Protected methods used by the impls below. pub trait Protected { /// create new of this variant with possibly uninitialized data fn new(u8) -> Self; /// store this str, which is either &'static or boxed fn with_pointer(u8, &str) -> Self; fn variant(&self) -> u8; /// get the area of self where (length,pointer)|inline is. fn data(&mut self) -> &mut [u8]; /// the root of AsRef,Borrow and Deref. fn get_slice(&self) -> &[u8]; } //////////////////// // public methods // //////////////////// impl Nbstr { #[cfg(feature="unstable")] /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. pub const MAX_LENGTH: usize = MAX_LENGTH; /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. /// /// This method will get deprecated once associated consts become stable. pub fn max_length() -> usize { MAX_LENGTH } // keeping all public methods under one impl gives cleaner rustdoc /// Create a Nbstr from a borrowed str with a limited lifetime. /// If the str is short enough it will be stored the inside struct itself and not boxed. pub fn from_str(s: &str) -> Self { Self::try_stack(s).unwrap_or_else(|| s.to_owned().into() ) } } //////////////// //Constructors// //////////////// impl Default for Nbstr { fn default() -> Self { let s = unsafe{ slice::from_raw_parts(1 as *const u8, 0) };//pointer must be nonzero Self::with_pointer(LITERAL, unsafe{ mem::transmute(s) }) } } impl From<&'static str> for Nbstr { fn from(s: &'static str) -> Self { Self::with_pointer(LITERAL, s) } } impl Nbstr { fn try_stack(s: &str) -> Option<Self> {match s.len() as u8 { // Cannot have stack str with length 0, as variant might be NonZero 0 => Some(Self::default()), 1...MAX_STACK => { let mut z = Self::new(s.len() as u8); for (d, s) in z.data().iter_mut().zip(s.bytes()) { *d = s; } Some(z) }, _ => None, }} } impl From<Box<str>> for Nbstr { fn from(s: Box<str>) -> Self { // Don't try stack; users might turn it back into a box later let z = if s.is_empty() {Self::default()}// Make it clear we don't own any memory. else {Self::with_pointer(BOX, &s)}; mem::forget(s); return z; } } impl From<String> for Nbstr { fn from(s: String) -> Self { if s.capacity() != s.len() {// into_boxed will reallocate if let Some(inline) = Self::try_stack(&s) { return inline;// and drop s } } return Self::from(s.into_boxed_str()); } } impl From<Cow<'static, str>> for Nbstr { fn from(cow: Cow<'static, str>) -> Self {match cow { Cow::Owned(owned) => Self::from(owned), Cow::Borrowed(borrowed) => Self::from(borrowed), }} } impl Clone for Nbstr { fn clone(&self) -> Self { if self.variant() == BOX {// try stack Nbstr::from_str(self.deref()) } else {// copy the un-copyable unsafe{ ptr::read(self) } } } fn clone_from(&mut self, from: &Self) { // keep existing box if possible if self.variant() == BOX && self.len() == from.len() { unsafe{ ptr::copy_nonoverlapping( from.as_ptr(), mem::transmute(self.as_ptr()), self.len() )}; } else { *self = from.clone(); } } } /////////// //Getters// /////////// impl AsRef<[u8]> for Nbstr { fn as_ref(&self) -> &[u8] { self.get_slice() } } impl AsRef<str> for Nbstr { fn as_ref(&self) -> &str { let bytes: &[u8] = self.as_ref(); unsafe{ Str::from_utf8_unchecked( bytes )} } } impl Deref for Nbstr { type Target = str; fn deref(&self) -> &Self::Target { self.as_ref() } } impl Borrow<[u8]> for Nbstr { fn borrow(&self) -> &[u8] { self.as_ref() } } impl Borrow<str> for Nbstr { fn borrow(&self) -> &str { self.as_ref() } } ///////////////// //Common traits// ///////////////// impl hash::Hash for Nbstr { fn hash<H:hash::Hasher>(&self, h: &mut H) { self.deref().hash(h); } } impl fmt::Display for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self.deref(), fmtr) } } impl PartialOrd for Nbstr { fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { self.deref().partial_cmp(rhs.deref()) } } impl Ord for Nbstr { fn cmp(&self, rhs: &Self) -> Ordering { self.deref().cmp(rhs.deref()) } } impl PartialEq for Nbstr { fn eq(&self, rhs: &Self) -> bool { self.deref() == rhs.deref() } } impl Eq for Nbstr {} /// Displays how the string is stored by prepending "stack: ", "literal: " or "boxed: ". impl fmt::Debug for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { write!(fmtr, "{}: {}", match self.variant() { 1...MAX_STACK => "stack", LITERAL => "literal", BOX => "boxed", _ => unreachable!("Unknown variant of Nbstr: {}", self.variant()) }, self.deref()) } } /////////////// //Destructors// /////////////// /// Returns Some if z contains a Box pub fn take_box(z: &mut Nbstr) -> Option<Box<str>> {
impl From<Nbstr> for Box<str> { fn from(mut z: Nbstr) -> Box<str> { take_box(&mut z).unwrap_or_else(|| z.deref().to_owned().into_boxed_str() ) } } impl From<Nbstr> for String { fn from(mut z: Nbstr) -> String { take_box(&mut z) .map(|b| b.into_string() ) .unwrap_or_else(|| z.deref().to_owned() ) } } impl From<Nbstr> for Cow<'static, str> { fn from(mut z: Nbstr) -> Cow<'static, str> { take_box(&mut z) .map(|b| Cow::from(b.into_string()) ) .unwrap_or_else(|| if z.variant() == LITERAL { let s: &'static str = unsafe{ mem::transmute(z.deref()) }; Cow::from(s) } else { Cow::from(z.deref().to_owned()) } ) } } #[cfg(not(test))]// Bugs in drop might cause stack overflow in suprising places. // The tests below should catch said bugs. impl Drop for Nbstr { fn drop(&mut self) { let _ = take_box(self); } } ////////////////////////////////////////////////////////////////////// // Tests that need private access or tests code that use cfg!(test) // ////////////////////////////////////////////////////////////////////// #[cfg(test)] mod tests { use nbstr::{Nbstr, MAX_STACK}; use super::*; use std::ops::Deref; use std::str as Str; use std::{mem,slice}; const STR: &'static str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; #[test] fn literal() { let mut z = Nbstr::from(STR); assert_eq!(z.deref().len(), STR.len()); assert_eq!(z.deref().as_ptr(), STR.as_ptr()); assert_eq!(z.deref(), STR); assert_eq!(take_box(&mut z), None); } #[test] fn stack() { let s = "abc"; let mut z = Nbstr::from_str(s); assert_eq!(z.deref().len(), s.len()); assert_eq!(z.deref().as_ptr() as usize, z.data().as_ptr() as usize); assert_eq!(z.deref(), s); assert_eq!(take_box(&mut z), None); } #[test] fn boxed() { let b: Box<str> = STR.to_string().into_boxed_str(); let len = b.len(); let ptr = b.as_ptr(); let b2 = b.clone(); let mut z = Nbstr::from(b); assert_eq!(z.deref().len(), len); assert_eq!(z.deref().as_ptr(), ptr); assert_eq!(z.deref(), STR); assert_eq!(take_box(&mut z), Some(b2.clone())); assert_eq!(take_box(&mut Nbstr::from_str(STR)), Some(b2.clone())); } #[test] fn nuls() {// Is here because MAX_STACK let zeros_bytes = [0; MAX_STACK as usize]; let zeros_str = Str::from_utf8(&zeros_bytes).unwrap(); let zeros = Nbstr::from_str(zeros_str); assert_eq!(zeros.deref(), zeros_str); assert!(Some(zeros).is_some()); } #[test] #[cfg_attr(debug_assertions, should_panic)]// from arithmetic overflow or explicit panic fn too_long() {// Is here because no_giants has a custom panic message for tests, // because the normal one would segfault on the invalid test str. let b: &[u8] = unsafe{slice::from_raw_parts(1 as *const u8, 1+Nbstr::max_length() )}; let s: &'static str = unsafe{ mem::transmute(b) }; mem::forget(Nbstr::from(s)); } }
if z.variant() == BOX { // I asked on #rust, and transmuting from & to mut is apparently undefined behaviour. // Is it really in this case? let s: *mut str = unsafe{ mem::transmute(z.get_slice()) }; // Cannot just assign default; then rust tries to drop the previous value! // .. which then calls this function. mem::forget(mem::replace(z, Nbstr::default())); Some(unsafe{ Box::from_raw(s) }) } else { None } }
identifier_body
shared.rs
/* Copyright 2016 Torbjørn Birch Moltu * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use Nbstr; use nbstr::{MAX_LENGTH,MAX_STACK,LITERAL,BOX}; extern crate std; use std::cmp::Ordering; use std::ops::Deref; use std::str as Str; use std::{mem,slice,ptr, fmt,hash}; use std::borrow::{Borrow,Cow}; /// Protected methods used by the impls below. pub trait Protected { /// create new of this variant with possibly uninitialized data fn new(u8) -> Self; /// store this str, which is either &'static or boxed fn with_pointer(u8, &str) -> Self; fn variant(&self) -> u8; /// get the area of self where (length,pointer)|inline is. fn data(&mut self) -> &mut [u8]; /// the root of AsRef,Borrow and Deref. fn get_slice(&self) -> &[u8]; } //////////////////// // public methods // //////////////////// impl Nbstr { #[cfg(feature="unstable")] /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. pub const MAX_LENGTH: usize = MAX_LENGTH; /// Get the max length a Nbstr can store, /// as some compile-time features might limit it. /// /// This method will get deprecated once associated consts become stable. pub fn max_length() -> usize { MAX_LENGTH } // keeping all public methods under one impl gives cleaner rustdoc /// Create a Nbstr from a borrowed str with a limited lifetime. /// If the str is short enough it will be stored the inside struct itself and not boxed. pub fn from_str(s: &str) -> Self { Self::try_stack(s).unwrap_or_else(|| s.to_owned().into() ) } } //////////////// //Constructors// //////////////// impl Default for Nbstr { fn default() -> Self { let s = unsafe{ slice::from_raw_parts(1 as *const u8, 0) };//pointer must be nonzero Self::with_pointer(LITERAL, unsafe{ mem::transmute(s) }) } } impl From<&'static str> for Nbstr { fn f
s: &'static str) -> Self { Self::with_pointer(LITERAL, s) } } impl Nbstr { fn try_stack(s: &str) -> Option<Self> {match s.len() as u8 { // Cannot have stack str with length 0, as variant might be NonZero 0 => Some(Self::default()), 1...MAX_STACK => { let mut z = Self::new(s.len() as u8); for (d, s) in z.data().iter_mut().zip(s.bytes()) { *d = s; } Some(z) }, _ => None, }} } impl From<Box<str>> for Nbstr { fn from(s: Box<str>) -> Self { // Don't try stack; users might turn it back into a box later let z = if s.is_empty() {Self::default()}// Make it clear we don't own any memory. else {Self::with_pointer(BOX, &s)}; mem::forget(s); return z; } } impl From<String> for Nbstr { fn from(s: String) -> Self { if s.capacity() != s.len() {// into_boxed will reallocate if let Some(inline) = Self::try_stack(&s) { return inline;// and drop s } } return Self::from(s.into_boxed_str()); } } impl From<Cow<'static, str>> for Nbstr { fn from(cow: Cow<'static, str>) -> Self {match cow { Cow::Owned(owned) => Self::from(owned), Cow::Borrowed(borrowed) => Self::from(borrowed), }} } impl Clone for Nbstr { fn clone(&self) -> Self { if self.variant() == BOX {// try stack Nbstr::from_str(self.deref()) } else {// copy the un-copyable unsafe{ ptr::read(self) } } } fn clone_from(&mut self, from: &Self) { // keep existing box if possible if self.variant() == BOX && self.len() == from.len() { unsafe{ ptr::copy_nonoverlapping( from.as_ptr(), mem::transmute(self.as_ptr()), self.len() )}; } else { *self = from.clone(); } } } /////////// //Getters// /////////// impl AsRef<[u8]> for Nbstr { fn as_ref(&self) -> &[u8] { self.get_slice() } } impl AsRef<str> for Nbstr { fn as_ref(&self) -> &str { let bytes: &[u8] = self.as_ref(); unsafe{ Str::from_utf8_unchecked( bytes )} } } impl Deref for Nbstr { type Target = str; fn deref(&self) -> &Self::Target { self.as_ref() } } impl Borrow<[u8]> for Nbstr { fn borrow(&self) -> &[u8] { self.as_ref() } } impl Borrow<str> for Nbstr { fn borrow(&self) -> &str { self.as_ref() } } ///////////////// //Common traits// ///////////////// impl hash::Hash for Nbstr { fn hash<H:hash::Hasher>(&self, h: &mut H) { self.deref().hash(h); } } impl fmt::Display for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(self.deref(), fmtr) } } impl PartialOrd for Nbstr { fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { self.deref().partial_cmp(rhs.deref()) } } impl Ord for Nbstr { fn cmp(&self, rhs: &Self) -> Ordering { self.deref().cmp(rhs.deref()) } } impl PartialEq for Nbstr { fn eq(&self, rhs: &Self) -> bool { self.deref() == rhs.deref() } } impl Eq for Nbstr {} /// Displays how the string is stored by prepending "stack: ", "literal: " or "boxed: ". impl fmt::Debug for Nbstr { fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result { write!(fmtr, "{}: {}", match self.variant() { 1...MAX_STACK => "stack", LITERAL => "literal", BOX => "boxed", _ => unreachable!("Unknown variant of Nbstr: {}", self.variant()) }, self.deref()) } } /////////////// //Destructors// /////////////// /// Returns Some if z contains a Box pub fn take_box(z: &mut Nbstr) -> Option<Box<str>> { if z.variant() == BOX { // I asked on #rust, and transmuting from & to mut is apparently undefined behaviour. // Is it really in this case? let s: *mut str = unsafe{ mem::transmute(z.get_slice()) }; // Cannot just assign default; then rust tries to drop the previous value! // .. which then calls this function. mem::forget(mem::replace(z, Nbstr::default())); Some(unsafe{ Box::from_raw(s) }) } else { None } } impl From<Nbstr> for Box<str> { fn from(mut z: Nbstr) -> Box<str> { take_box(&mut z).unwrap_or_else(|| z.deref().to_owned().into_boxed_str() ) } } impl From<Nbstr> for String { fn from(mut z: Nbstr) -> String { take_box(&mut z) .map(|b| b.into_string() ) .unwrap_or_else(|| z.deref().to_owned() ) } } impl From<Nbstr> for Cow<'static, str> { fn from(mut z: Nbstr) -> Cow<'static, str> { take_box(&mut z) .map(|b| Cow::from(b.into_string()) ) .unwrap_or_else(|| if z.variant() == LITERAL { let s: &'static str = unsafe{ mem::transmute(z.deref()) }; Cow::from(s) } else { Cow::from(z.deref().to_owned()) } ) } } #[cfg(not(test))]// Bugs in drop might cause stack overflow in suprising places. // The tests below should catch said bugs. impl Drop for Nbstr { fn drop(&mut self) { let _ = take_box(self); } } ////////////////////////////////////////////////////////////////////// // Tests that need private access or tests code that use cfg!(test) // ////////////////////////////////////////////////////////////////////// #[cfg(test)] mod tests { use nbstr::{Nbstr, MAX_STACK}; use super::*; use std::ops::Deref; use std::str as Str; use std::{mem,slice}; const STR: &'static str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; #[test] fn literal() { let mut z = Nbstr::from(STR); assert_eq!(z.deref().len(), STR.len()); assert_eq!(z.deref().as_ptr(), STR.as_ptr()); assert_eq!(z.deref(), STR); assert_eq!(take_box(&mut z), None); } #[test] fn stack() { let s = "abc"; let mut z = Nbstr::from_str(s); assert_eq!(z.deref().len(), s.len()); assert_eq!(z.deref().as_ptr() as usize, z.data().as_ptr() as usize); assert_eq!(z.deref(), s); assert_eq!(take_box(&mut z), None); } #[test] fn boxed() { let b: Box<str> = STR.to_string().into_boxed_str(); let len = b.len(); let ptr = b.as_ptr(); let b2 = b.clone(); let mut z = Nbstr::from(b); assert_eq!(z.deref().len(), len); assert_eq!(z.deref().as_ptr(), ptr); assert_eq!(z.deref(), STR); assert_eq!(take_box(&mut z), Some(b2.clone())); assert_eq!(take_box(&mut Nbstr::from_str(STR)), Some(b2.clone())); } #[test] fn nuls() {// Is here because MAX_STACK let zeros_bytes = [0; MAX_STACK as usize]; let zeros_str = Str::from_utf8(&zeros_bytes).unwrap(); let zeros = Nbstr::from_str(zeros_str); assert_eq!(zeros.deref(), zeros_str); assert!(Some(zeros).is_some()); } #[test] #[cfg_attr(debug_assertions, should_panic)]// from arithmetic overflow or explicit panic fn too_long() {// Is here because no_giants has a custom panic message for tests, // because the normal one would segfault on the invalid test str. let b: &[u8] = unsafe{slice::from_raw_parts(1 as *const u8, 1+Nbstr::max_length() )}; let s: &'static str = unsafe{ mem::transmute(b) }; mem::forget(Nbstr::from(s)); } }
rom(
identifier_name
packfile.rs
use bytes::{BufMut, BytesMut}; use flate2::{write::ZlibEncoder, Compression}; use sha1::{ digest::{generic_array::GenericArray, FixedOutputDirty}, Digest, Sha1, }; use std::{convert::TryInto, fmt::Write, io::Write as IoWrite}; // The packfile itself is a very simple format. There is a header, a // series of packed objects (each with it's own header and body) and // then a checksum trailer. The first four bytes is the string 'PACK', // which is sort of used to make sure you're getting the start of the // packfile correctly. This is followed by a 4-byte packfile version // number and then a 4-byte number of entries in that file. pub struct PackFile<'a> { entries: Vec<PackFileEntry<'a>>, } impl<'a> PackFile<'a> { #[must_use] pub fn new(entries: Vec<PackFileEntry<'a>>) -> Self { Self { entries } } #[must_use] pub const fn header_size() -> usize { "PACK".len() + std::mem::size_of::<u32>() + std::mem::size_of::<u32>() } #[must_use] pub const fn footer_size() -> usize { 20 } pub fn encode_to(&self, original_buf: &mut BytesMut) -> Result<(), anyhow::Error> { let mut buf = original_buf.split_off(original_buf.len()); buf.reserve(Self::header_size() + Self::footer_size()); // header buf.extend_from_slice(b"PACK"); // magic header buf.put_u32(2); // version buf.put_u32(self.entries.len().try_into()?); // number of entries in the packfile // body for entry in &self.entries { entry.encode_to(&mut buf)?; } // footer buf.extend_from_slice(&sha1::Sha1::digest(&buf[..])); original_buf.unsplit(buf); Ok(()) } } #[derive(Debug)] pub struct Commit<'a> { pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray // pub parent: [u8; 20], pub author: CommitUserInfo<'a>, pub committer: CommitUserInfo<'a>, // pub gpgsig: &str, pub message: &'a str, } impl Commit<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { let mut tree_hex = [0_u8; 20 * 2]; hex::encode_to_slice(self.tree, &mut tree_hex)?; out.write_str("tree ")?; out.extend_from_slice(&tree_hex); out.write_char('\n')?; writeln!(out, "author {}", self.author.encode())?; writeln!(out, "committer {}", self.committer.encode())?; write!(out, "\n{}", self.message)?; Ok(()) } #[must_use] pub fn size(&self) -> usize { let mut len = 0; len += "tree ".len() + (self.tree.len() * 2) + "\n".len(); len += "author ".len() + self.author.size() + "\n".len(); len += "committer ".len() + self.committer.size() + "\n".len(); len += "\n".len() + self.message.len(); len } } #[derive(Copy, Clone, Debug)] pub struct CommitUserInfo<'a> { pub name: &'a str, pub email: &'a str, pub time: chrono::DateTime<chrono::Utc>, } impl CommitUserInfo<'_> { fn encode(&self) -> String { // TODO: remove `format!`, `format_args!`? format!( "{} <{}> {} +0000", self.name, self.email, self.time.timestamp() ) } #[must_use] pub fn size(&self) -> usize { let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len(); self.name.len() + "< ".len() + self.email.len() + "> ".len() + timestamp_len
} #[derive(Debug)] pub enum TreeItemKind { File, Directory, } impl TreeItemKind { #[must_use] pub const fn mode(&self) -> &'static str { match self { Self::File => "100644", Self::Directory => "40000", } } } #[derive(Debug)] pub struct TreeItem<'a> { pub kind: TreeItemKind, pub name: &'a str, pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays } // `[mode] [name]\0[hash]` impl TreeItem<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { out.write_str(self.kind.mode())?; write!(out, " {}\0", self.name)?; out.extend_from_slice(&self.hash); Ok(()) } #[must_use] pub fn size(&self) -> usize { self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len() } } #[derive(Debug)] pub enum PackFileEntry<'a> { // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc // commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c // parent c2a862612a14346ae95234f26efae1ee69b5b7a9 // author Jordan Doyle <[email protected]> 1630244577 +0100 // committer Jordan Doyle <[email protected]> 1630244577 +0100 // gpgsig -----BEGIN PGP SIGNATURE----- // // iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt // xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2 // tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ // tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6 // omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX // fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E // UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO // FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G // hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG // QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE // Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf // bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y= // =fXoH // -----END PGP SIGNATURE----- // // test Commit(Commit<'a>), // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc // tree 20940000 .cargo���CYy��Ve�������100644 .gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/�� // kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut� Tree(Vec<TreeItem<'a>>), // jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3| gzip -dc // blob 23try and find me in .git Blob(&'a [u8]), // Tag, // OfsDelta, // RefDelta, } impl PackFileEntry<'_> { fn write_header(&self, buf: &mut BytesMut) { let mut size = self.uncompressed_size(); // write header { let mut val = 0b1000_0000_u8; val |= match self { Self::Commit(_) => 0b001, Self::Tree(_) => 0b010, Self::Blob(_) => 0b011, // Self::Tag => 0b100, // Self::OfsDelta => 0b110, // Self::RefDelta => 0b111, } << 4; // pack the 4 LSBs of the size into the header #[allow(clippy::cast_possible_truncation)] // value is masked { val |= (size & 0b1111) as u8; } size >>= 4; buf.put_u8(val); } // write size bytes while size != 0 { // read 7 LSBs from the `size` and push them off for the next iteration #[allow(clippy::cast_possible_truncation)] // value is masked let mut val = (size & 0b111_1111) as u8; size >>= 7; if size != 0 { // MSB set to 1 implies there's more size bytes to come, otherwise // the data starts after this byte val |= 1 << 7; } buf.put_u8(val); } } pub fn encode_to(&self, original_out: &mut BytesMut) -> Result<(), anyhow::Error> { self.write_header(original_out); // TODO: this needs space reserving for it // todo is there a way to stream through the zlibencoder so we don't have to // have this intermediate bytesmut and vec? let mut out = BytesMut::new(); let size = self.uncompressed_size(); original_out.reserve(size); // the data ends up getting compressed but we'll need at least this many bytes out.reserve(size); match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => { for item in items { item.encode_to(&mut out)?; } } Self::Blob(data) => { out.extend_from_slice(data); } } debug_assert_eq!(out.len(), size); let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(&out)?; let compressed_data = e.finish()?; original_out.extend_from_slice(&compressed_data); Ok(()) } #[must_use] pub fn uncompressed_size(&self) -> usize { match self { Self::Commit(commit) => commit.size(), Self::Tree(items) => items.iter().map(TreeItem::size).sum(), Self::Blob(data) => data.len(), } } // wen const generics for RustCrypto? :-( pub fn hash( &self, ) -> Result<GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, anyhow::Error> { let size = self.uncompressed_size(); let file_prefix = match self { Self::Commit(_) => "commit", Self::Tree(_) => "tree", Self::Blob(_) => "blob", }; let size_len = itoa::Buffer::new().format(size).len(); let mut out = BytesMut::with_capacity(file_prefix.len() + " ".len() + size_len + "\n".len() + size); write!(out, "{} {}\0", file_prefix, size)?; match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => { for item in items { item.encode_to(&mut out)?; } } Self::Blob(blob) => { out.extend_from_slice(blob); } } Ok(sha1::Sha1::digest(&out)) } }
+ " +0000".len() }
random_line_split
packfile.rs
use bytes::{BufMut, BytesMut}; use flate2::{write::ZlibEncoder, Compression}; use sha1::{ digest::{generic_array::GenericArray, FixedOutputDirty}, Digest, Sha1, }; use std::{convert::TryInto, fmt::Write, io::Write as IoWrite}; // The packfile itself is a very simple format. There is a header, a // series of packed objects (each with it's own header and body) and // then a checksum trailer. The first four bytes is the string 'PACK', // which is sort of used to make sure you're getting the start of the // packfile correctly. This is followed by a 4-byte packfile version // number and then a 4-byte number of entries in that file. pub struct PackFile<'a> { entries: Vec<PackFileEntry<'a>>, } impl<'a> PackFile<'a> { #[must_use] pub fn new(entries: Vec<PackFileEntry<'a>>) -> Self { Self { entries } } #[must_use] pub const fn header_size() -> usize { "PACK".len() + std::mem::size_of::<u32>() + std::mem::size_of::<u32>() } #[must_use] pub const fn footer_size() -> usize { 20 } pub fn encode_to(&self, original_buf: &mut BytesMut) -> Result<(), anyhow::Error> { let mut buf = original_buf.split_off(original_buf.len()); buf.reserve(Self::header_size() + Self::footer_size()); // header buf.extend_from_slice(b"PACK"); // magic header buf.put_u32(2); // version buf.put_u32(self.entries.len().try_into()?); // number of entries in the packfile // body for entry in &self.entries { entry.encode_to(&mut buf)?; } // footer buf.extend_from_slice(&sha1::Sha1::digest(&buf[..])); original_buf.unsplit(buf); Ok(()) } } #[derive(Debug)] pub struct Commit<'a> { pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray // pub parent: [u8; 20], pub author: CommitUserInfo<'a>, pub committer: CommitUserInfo<'a>, // pub gpgsig: &str, pub message: &'a str, } impl Commit<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { let mut tree_hex = [0_u8; 20 * 2]; hex::encode_to_slice(self.tree, &mut tree_hex)?; out.write_str("tree ")?; out.extend_from_slice(&tree_hex); out.write_char('\n')?; writeln!(out, "author {}", self.author.encode())?; writeln!(out, "committer {}", self.committer.encode())?; write!(out, "\n{}", self.message)?; Ok(()) } #[must_use] pub fn size(&self) -> usize { let mut len = 0; len += "tree ".len() + (self.tree.len() * 2) + "\n".len(); len += "author ".len() + self.author.size() + "\n".len(); len += "committer ".len() + self.committer.size() + "\n".len(); len += "\n".len() + self.message.len(); len } } #[derive(Copy, Clone, Debug)] pub struct CommitUserInfo<'a> { pub name: &'a str, pub email: &'a str, pub time: chrono::DateTime<chrono::Utc>, } impl CommitUserInfo<'_> { fn encode(&self) -> String { // TODO: remove `format!`, `format_args!`? format!( "{} <{}> {} +0000", self.name, self.email, self.time.timestamp() ) } #[must_use] pub fn size(&self) -> usize { let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len(); self.name.len() + "< ".len() + self.email.len() + "> ".len() + timestamp_len + " +0000".len() } } #[derive(Debug)] pub enum TreeItemKind { File, Directory, } impl TreeItemKind { #[must_use] pub const fn mode(&self) -> &'static str { match self { Self::File => "100644", Self::Directory => "40000", } } } #[derive(Debug)] pub struct TreeItem<'a> { pub kind: TreeItemKind, pub name: &'a str, pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays } // `[mode] [name]\0[hash]` impl TreeItem<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { out.write_str(self.kind.mode())?; write!(out, " {}\0", self.name)?; out.extend_from_slice(&self.hash); Ok(()) } #[must_use] pub fn size(&self) -> usize { self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len() } } #[derive(Debug)] pub enum PackFileEntry<'a> { // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc // commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c // parent c2a862612a14346ae95234f26efae1ee69b5b7a9 // author Jordan Doyle <[email protected]> 1630244577 +0100 // committer Jordan Doyle <[email protected]> 1630244577 +0100 // gpgsig -----BEGIN PGP SIGNATURE----- // // iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt // xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2 // tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ // tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6 // omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX // fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E // UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO // FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G // hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG // QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE // Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf // bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y= // =fXoH // -----END PGP SIGNATURE----- // // test Commit(Commit<'a>), // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc // tree 20940000 .cargo���CYy��Ve�������100644 .gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/�� // kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut� Tree(Vec<TreeItem<'a>>), // jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3| gzip -dc // blob 23try and find me in .git Blob(&'a [u8]), // Tag, // OfsDelta, // RefDelta, } impl PackFileEntry<'_> { fn write_header(&self, buf: &mut BytesMut) { let mut size = self.uncompressed_size(); // write header { let mut val = 0b1000_0000_u8; val |= match self { Self::Commit(_) => 0b001, Self::Tree(_) => 0b010, Self::Blob(_) => 0b011, // Self::Tag => 0b100, // Self::OfsDelta => 0b110, // Self::RefDelta => 0b111, } << 4; // pack the 4 LSBs of the size into the header #[allow(clippy::cast_possible_truncation)] // value is masked { val |= (size & 0b1111) as u8; } size >>= 4; buf.put_u8(val); } // write size bytes while size != 0 { // read 7 LSBs from the `size` and push them off for the next iteration #[allow(clippy::cast_possible_truncation)] // value is masked let mut val = (size & 0b111_1111) as u8; size >>= 7; if size != 0 { // MSB set to 1 implies there's more size bytes to come, otherwise // the data starts after this byte val |= 1 << 7; } buf.put_u8(val); } } pub fn encode_to(&self, original_out: &mut BytesMut) -> Result<(), anyhow::Error> { self.write_header(original_out); // TODO: this needs space reserving for it // todo is there a way to stream through the zlibencoder so we don't have to // have this intermediate bytesmut and vec? let mut out = BytesMut::new(); let size = self.uncompressed_size(); original_out.reserve(size); // the data ends up getting compressed but we'll need at least this many bytes out.reserve(size); match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => { for item in items { item.encode_to(&mut out)?; } } Self::Blob(data) => { out.extend_from_slice(data); } } debug_assert_eq!(out.len(), size); let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(&out)?; let compressed_data = e.finish()?; original_out.extend_from_slice(&compressed_data); Ok(()) } #[must_use] pub fn uncompressed_size(&self) -> usize { match self { Self::Commit(commit) => commit.size(), Self::Tree(items) => items.iter().map(TreeItem::size).sum(), Self::Blob(data) => data.len(), } } // wen const generics for RustCrypto? :-( pub fn hash( &self, ) -> Result<GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, anyhow::Error> { let size = self.uncompressed_size(); let file_prefix = match self { Self::Commit(_) => "commit", Self::Tree(_) => "tree", Self::Blob(_) => "blob", }; let size_len = itoa::Buffer::new().format(size).len(); let mut out = BytesMut::with_capacity(file_prefix.len() + " ".len() + size_len + "\n".len() + size); write!(out, "{} {}\0", file_prefix, size)?; match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => {
e_to(&mut out)?; } } Self::Blob(blob) => { out.extend_from_slice(blob); } } Ok(sha1::Sha1::digest(&out)) } }
for item in items { item.encod
conditional_block
packfile.rs
use bytes::{BufMut, BytesMut}; use flate2::{write::ZlibEncoder, Compression}; use sha1::{ digest::{generic_array::GenericArray, FixedOutputDirty}, Digest, Sha1, }; use std::{convert::TryInto, fmt::Write, io::Write as IoWrite}; // The packfile itself is a very simple format. There is a header, a // series of packed objects (each with it's own header and body) and // then a checksum trailer. The first four bytes is the string 'PACK', // which is sort of used to make sure you're getting the start of the // packfile correctly. This is followed by a 4-byte packfile version // number and then a 4-byte number of entries in that file. pub struct PackFile<'a> { entries: Vec<PackFileEntry<'a>>, } impl<'a> PackFile<'a> { #[must_use] pub fn new(entries: Vec<PackFileEntry<'a>>) -> Self { Self { entries } } #[must_use] pub const fn header_size() -> usize { "PACK".len() + std::mem::size_of::<u32>() + std::mem::size_of::<u32>() } #[must_use] pub const fn footer_size() -> usize
pub fn encode_to(&self, original_buf: &mut BytesMut) -> Result<(), anyhow::Error> { let mut buf = original_buf.split_off(original_buf.len()); buf.reserve(Self::header_size() + Self::footer_size()); // header buf.extend_from_slice(b"PACK"); // magic header buf.put_u32(2); // version buf.put_u32(self.entries.len().try_into()?); // number of entries in the packfile // body for entry in &self.entries { entry.encode_to(&mut buf)?; } // footer buf.extend_from_slice(&sha1::Sha1::digest(&buf[..])); original_buf.unsplit(buf); Ok(()) } } #[derive(Debug)] pub struct Commit<'a> { pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray // pub parent: [u8; 20], pub author: CommitUserInfo<'a>, pub committer: CommitUserInfo<'a>, // pub gpgsig: &str, pub message: &'a str, } impl Commit<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { let mut tree_hex = [0_u8; 20 * 2]; hex::encode_to_slice(self.tree, &mut tree_hex)?; out.write_str("tree ")?; out.extend_from_slice(&tree_hex); out.write_char('\n')?; writeln!(out, "author {}", self.author.encode())?; writeln!(out, "committer {}", self.committer.encode())?; write!(out, "\n{}", self.message)?; Ok(()) } #[must_use] pub fn size(&self) -> usize { let mut len = 0; len += "tree ".len() + (self.tree.len() * 2) + "\n".len(); len += "author ".len() + self.author.size() + "\n".len(); len += "committer ".len() + self.committer.size() + "\n".len(); len += "\n".len() + self.message.len(); len } } #[derive(Copy, Clone, Debug)] pub struct CommitUserInfo<'a> { pub name: &'a str, pub email: &'a str, pub time: chrono::DateTime<chrono::Utc>, } impl CommitUserInfo<'_> { fn encode(&self) -> String { // TODO: remove `format!`, `format_args!`? format!( "{} <{}> {} +0000", self.name, self.email, self.time.timestamp() ) } #[must_use] pub fn size(&self) -> usize { let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len(); self.name.len() + "< ".len() + self.email.len() + "> ".len() + timestamp_len + " +0000".len() } } #[derive(Debug)] pub enum TreeItemKind { File, Directory, } impl TreeItemKind { #[must_use] pub const fn mode(&self) -> &'static str { match self { Self::File => "100644", Self::Directory => "40000", } } } #[derive(Debug)] pub struct TreeItem<'a> { pub kind: TreeItemKind, pub name: &'a str, pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays } // `[mode] [name]\0[hash]` impl TreeItem<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { out.write_str(self.kind.mode())?; write!(out, " {}\0", self.name)?; out.extend_from_slice(&self.hash); Ok(()) } #[must_use] pub fn size(&self) -> usize { self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len() } } #[derive(Debug)] pub enum PackFileEntry<'a> { // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc // commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c // parent c2a862612a14346ae95234f26efae1ee69b5b7a9 // author Jordan Doyle <[email protected]> 1630244577 +0100 // committer Jordan Doyle <[email protected]> 1630244577 +0100 // gpgsig -----BEGIN PGP SIGNATURE----- // // iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt // xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2 // tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ // tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6 // omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX // fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E // UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO // FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G // hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG // QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE // Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf // bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y= // =fXoH // -----END PGP SIGNATURE----- // // test Commit(Commit<'a>), // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc // tree 20940000 .cargo���CYy��Ve�������100644 .gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/�� // kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut� Tree(Vec<TreeItem<'a>>), // jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3| gzip -dc // blob 23try and find me in .git Blob(&'a [u8]), // Tag, // OfsDelta, // RefDelta, } impl PackFileEntry<'_> { fn write_header(&self, buf: &mut BytesMut) { let mut size = self.uncompressed_size(); // write header { let mut val = 0b1000_0000_u8; val |= match self { Self::Commit(_) => 0b001, Self::Tree(_) => 0b010, Self::Blob(_) => 0b011, // Self::Tag => 0b100, // Self::OfsDelta => 0b110, // Self::RefDelta => 0b111, } << 4; // pack the 4 LSBs of the size into the header #[allow(clippy::cast_possible_truncation)] // value is masked { val |= (size & 0b1111) as u8; } size >>= 4; buf.put_u8(val); } // write size bytes while size != 0 { // read 7 LSBs from the `size` and push them off for the next iteration #[allow(clippy::cast_possible_truncation)] // value is masked let mut val = (size & 0b111_1111) as u8; size >>= 7; if size != 0 { // MSB set to 1 implies there's more size bytes to come, otherwise // the data starts after this byte val |= 1 << 7; } buf.put_u8(val); } } pub fn encode_to(&self, original_out: &mut BytesMut) -> Result<(), anyhow::Error> { self.write_header(original_out); // TODO: this needs space reserving for it // todo is there a way to stream through the zlibencoder so we don't have to // have this intermediate bytesmut and vec? let mut out = BytesMut::new(); let size = self.uncompressed_size(); original_out.reserve(size); // the data ends up getting compressed but we'll need at least this many bytes out.reserve(size); match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => { for item in items { item.encode_to(&mut out)?; } } Self::Blob(data) => { out.extend_from_slice(data); } } debug_assert_eq!(out.len(), size); let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(&out)?; let compressed_data = e.finish()?; original_out.extend_from_slice(&compressed_data); Ok(()) } #[must_use] pub fn uncompressed_size(&self) -> usize { match self { Self::Commit(commit) => commit.size(), Self::Tree(items) => items.iter().map(TreeItem::size).sum(), Self::Blob(data) => data.len(), } } // wen const generics for RustCrypto? :-( pub fn hash( &self, ) -> Result<GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, anyhow::Error> { let size = self.uncompressed_size(); let file_prefix = match self { Self::Commit(_) => "commit", Self::Tree(_) => "tree", Self::Blob(_) => "blob", }; let size_len = itoa::Buffer::new().format(size).len(); let mut out = BytesMut::with_capacity(file_prefix.len() + " ".len() + size_len + "\n".len() + size); write!(out, "{} {}\0", file_prefix, size)?; match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => { for item in items { item.encode_to(&mut out)?; } } Self::Blob(blob) => { out.extend_from_slice(blob); } } Ok(sha1::Sha1::digest(&out)) } }
{ 20 }
identifier_body
packfile.rs
use bytes::{BufMut, BytesMut}; use flate2::{write::ZlibEncoder, Compression}; use sha1::{ digest::{generic_array::GenericArray, FixedOutputDirty}, Digest, Sha1, }; use std::{convert::TryInto, fmt::Write, io::Write as IoWrite}; // The packfile itself is a very simple format. There is a header, a // series of packed objects (each with it's own header and body) and // then a checksum trailer. The first four bytes is the string 'PACK', // which is sort of used to make sure you're getting the start of the // packfile correctly. This is followed by a 4-byte packfile version // number and then a 4-byte number of entries in that file. pub struct PackFile<'a> { entries: Vec<PackFileEntry<'a>>, } impl<'a> PackFile<'a> { #[must_use] pub fn new(entries: Vec<PackFileEntry<'a>>) -> Self { Self { entries } } #[must_use] pub const fn header_size() -> usize { "PACK".len() + std::mem::size_of::<u32>() + std::mem::size_of::<u32>() } #[must_use] pub const fn footer_size() -> usize { 20 } pub fn encode_to(&self, original_buf: &mut BytesMut) -> Result<(), anyhow::Error> { let mut buf = original_buf.split_off(original_buf.len()); buf.reserve(Self::header_size() + Self::footer_size()); // header buf.extend_from_slice(b"PACK"); // magic header buf.put_u32(2); // version buf.put_u32(self.entries.len().try_into()?); // number of entries in the packfile // body for entry in &self.entries { entry.encode_to(&mut buf)?; } // footer buf.extend_from_slice(&sha1::Sha1::digest(&buf[..])); original_buf.unsplit(buf); Ok(()) } } #[derive(Debug)] pub struct Commit<'a> { pub tree: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20], but sha-1 returns a GenericArray // pub parent: [u8; 20], pub author: CommitUserInfo<'a>, pub committer: CommitUserInfo<'a>, // pub gpgsig: &str, pub message: &'a str, } impl Commit<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { let mut tree_hex = [0_u8; 20 * 2]; hex::encode_to_slice(self.tree, &mut tree_hex)?; out.write_str("tree ")?; out.extend_from_slice(&tree_hex); out.write_char('\n')?; writeln!(out, "author {}", self.author.encode())?; writeln!(out, "committer {}", self.committer.encode())?; write!(out, "\n{}", self.message)?; Ok(()) } #[must_use] pub fn size(&self) -> usize { let mut len = 0; len += "tree ".len() + (self.tree.len() * 2) + "\n".len(); len += "author ".len() + self.author.size() + "\n".len(); len += "committer ".len() + self.committer.size() + "\n".len(); len += "\n".len() + self.message.len(); len } } #[derive(Copy, Clone, Debug)] pub struct CommitUserInfo<'a> { pub name: &'a str, pub email: &'a str, pub time: chrono::DateTime<chrono::Utc>, } impl CommitUserInfo<'_> { fn encode(&self) -> String { // TODO: remove `format!`, `format_args!`? format!( "{} <{}> {} +0000", self.name, self.email, self.time.timestamp() ) } #[must_use] pub fn size(&self) -> usize { let timestamp_len = itoa::Buffer::new().format(self.time.timestamp()).len(); self.name.len() + "< ".len() + self.email.len() + "> ".len() + timestamp_len + " +0000".len() } } #[derive(Debug)] pub enum TreeItemKind { File, Directory, } impl TreeItemKind { #[must_use] pub const fn mode(&self) -> &'static str { match self { Self::File => "100644", Self::Directory => "40000", } } } #[derive(Debug)] pub struct TreeItem<'a> { pub kind: TreeItemKind, pub name: &'a str, pub hash: GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, // [u8; 20] - but we have to deal with GenericArrays } // `[mode] [name]\0[hash]` impl TreeItem<'_> { fn encode_to(&self, out: &mut BytesMut) -> Result<(), anyhow::Error> { out.write_str(self.kind.mode())?; write!(out, " {}\0", self.name)?; out.extend_from_slice(&self.hash); Ok(()) } #[must_use] pub fn
(&self) -> usize { self.kind.mode().len() + " ".len() + self.name.len() + "\0".len() + self.hash.len() } } #[derive(Debug)] pub enum PackFileEntry<'a> { // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3 | gzip -dc // commit 1068tree 0d586b48bc42e8591773d3d8a7223551c39d453c // parent c2a862612a14346ae95234f26efae1ee69b5b7a9 // author Jordan Doyle <[email protected]> 1630244577 +0100 // committer Jordan Doyle <[email protected]> 1630244577 +0100 // gpgsig -----BEGIN PGP SIGNATURE----- // // iQIzBAABCAAdFiEEMn1zof7yzaURQBGDHqa65vZtxJoFAmErjuEACgkQHqa65vZt // xJqhvhAAieKXnGRjT926qzozcvarC8D3TlA+Z1wVXueTAWqfusNIP0zCun/crOb2 // tOULO+/DXVBmwu5eInAf+t/wvlnIsrzJonhVr1ZT0f0vDX6fs2vflWg4UCVEuTsZ // tg+aTjcibwnmViIM9XVOzhU8Au2OIqMQLyQOMWSt8NhY0W2WhBCdQvhktvK1V8W6 // omPs04SrR39xWBDQaxsXYxq/1ZKUYXDwudvEfv14EvrxG1vWumpUVJd7Ib5w4gXX // fYa95DxYL720ZaiWPIYEG8FMBzSOpo6lUzY9g2/o/wKwSQZJNvpaMGCuouy8Fb+E // UaqC0XPxqpKG9duXPgCldUr+P7++48CF5zc358RBGz5OCNeTREsIQQo5PUO1k+wO // FnGOQTT8vvNOrxBgb3QgKu67RVwWDc6JnQCNpUrhUJrXMDWnYLBqo4Y+CdKGSQ4G // hW8V/hVTOlJZNi8bbU4v53cxh4nXiMM6NKUblUKs65ar3/2dkojwunz7r7GVZ6mG // QUpr9+ybG61XDqd1ad1A/B/i3WdWixTmJS3K/4uXjFjFX1f3RAk7O0gHc9I8HYOE // Vd8UsHzLOWAUHeaqbsd6xx3GCXF4D5D++kh9OY9Ov7CXlqbYbHd6Atg+PQ7VnqNf // bDqWN0Q2qcKX3k4ggtucmkkA6gP+K3+F5ANQj3AsGMQeddowC0Y= // =fXoH // -----END PGP SIGNATURE----- // // test Commit(Commit<'a>), // jordan@Jordans-MacBook-Pro-2 0d % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - 0d/586b48bc42e8591773d3d8a7223551c39d453c | gzip -dc // tree 20940000 .cargo���CYy��Ve�������100644 .gitignore�K��_ow�]����4�n�ݺ100644 Cargo.lock�7�3-�?/�� // kt��c0C�100644 Cargo.toml�6�&(��]\8@�SHA�]f40000 src0QW��ƅ���b[�!�S&N�100644 test�G2Y�gN�b9vj?��Ut� Tree(Vec<TreeItem<'a>>), // jordan@Jordans-MacBook-Pro-2 objects % printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" | cat - f5/473259d9674ed66239766a013f96a3550374e3| gzip -dc // blob 23try and find me in .git Blob(&'a [u8]), // Tag, // OfsDelta, // RefDelta, } impl PackFileEntry<'_> { fn write_header(&self, buf: &mut BytesMut) { let mut size = self.uncompressed_size(); // write header { let mut val = 0b1000_0000_u8; val |= match self { Self::Commit(_) => 0b001, Self::Tree(_) => 0b010, Self::Blob(_) => 0b011, // Self::Tag => 0b100, // Self::OfsDelta => 0b110, // Self::RefDelta => 0b111, } << 4; // pack the 4 LSBs of the size into the header #[allow(clippy::cast_possible_truncation)] // value is masked { val |= (size & 0b1111) as u8; } size >>= 4; buf.put_u8(val); } // write size bytes while size != 0 { // read 7 LSBs from the `size` and push them off for the next iteration #[allow(clippy::cast_possible_truncation)] // value is masked let mut val = (size & 0b111_1111) as u8; size >>= 7; if size != 0 { // MSB set to 1 implies there's more size bytes to come, otherwise // the data starts after this byte val |= 1 << 7; } buf.put_u8(val); } } pub fn encode_to(&self, original_out: &mut BytesMut) -> Result<(), anyhow::Error> { self.write_header(original_out); // TODO: this needs space reserving for it // todo is there a way to stream through the zlibencoder so we don't have to // have this intermediate bytesmut and vec? let mut out = BytesMut::new(); let size = self.uncompressed_size(); original_out.reserve(size); // the data ends up getting compressed but we'll need at least this many bytes out.reserve(size); match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => { for item in items { item.encode_to(&mut out)?; } } Self::Blob(data) => { out.extend_from_slice(data); } } debug_assert_eq!(out.len(), size); let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(&out)?; let compressed_data = e.finish()?; original_out.extend_from_slice(&compressed_data); Ok(()) } #[must_use] pub fn uncompressed_size(&self) -> usize { match self { Self::Commit(commit) => commit.size(), Self::Tree(items) => items.iter().map(TreeItem::size).sum(), Self::Blob(data) => data.len(), } } // wen const generics for RustCrypto? :-( pub fn hash( &self, ) -> Result<GenericArray<u8, <Sha1 as FixedOutputDirty>::OutputSize>, anyhow::Error> { let size = self.uncompressed_size(); let file_prefix = match self { Self::Commit(_) => "commit", Self::Tree(_) => "tree", Self::Blob(_) => "blob", }; let size_len = itoa::Buffer::new().format(size).len(); let mut out = BytesMut::with_capacity(file_prefix.len() + " ".len() + size_len + "\n".len() + size); write!(out, "{} {}\0", file_prefix, size)?; match self { Self::Commit(commit) => { commit.encode_to(&mut out)?; } Self::Tree(items) => { for item in items { item.encode_to(&mut out)?; } } Self::Blob(blob) => { out.extend_from_slice(blob); } } Ok(sha1::Sha1::digest(&out)) } }
size
identifier_name
poll.rs
extern crate nix; use std::os::unix::io::AsRawFd; use std::os::unix::io::RawFd; use std::time; use std::io; use self::nix::sys::epoll; /// Polls for readiness events on all registered file descriptors. /// /// `Poll` allows a program to monitor a large number of file descriptors, waiting until one or /// more become "ready" for some class of operations; e.g. reading and writing. A file descriptor /// is considered ready if it is possible to immediately perform a corresponding operation; e.g. /// [`read`]. /// /// These `Poll` instances are optimized for a worker pool use-case, and so they are all /// oneshot, edge-triggered, and only support "ready to read". /// /// To use `Poll`, a file descriptor must first be registered with the `Poll` instance using the /// [`register`] method. A `Token` is also passed to the [`register`] function, and that same /// `Token` is returned when the given file descriptor is ready. /// /// [`read`]: tcp/struct.TcpStream.html#method.read /// [`register`]: #method.register /// [`reregister`]: #method.reregister /// /// # Examples /// /// A basic example -- establishing a `TcpStream` connection. /// /// ```no_run /// # extern crate mio; /// # extern crate mio_pool; /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Events, Poll, Token}; /// use mio::net::TcpStream; /// /// use std::net::{TcpListener, SocketAddr}; /// /// // Bind a server socket to connect to. /// let addr: SocketAddr = "127.0.0.1:0".parse()?; /// let server = TcpListener::bind(&addr)?; /// /// // Construct a new `Poll` handle as well as the `Events` we'll store into /// let poll = Poll::new()?; /// let mut events = Events::with_capacity(1024); /// /// // Connect the stream /// let stream = TcpStream::connect(&server.local_addr()?)?; /// /// // Register the stream with `Poll` /// poll.register(&stream, Token(0))?; /// /// // Wait for the socket to become ready. This has to happens in a loop to /// // handle spurious wakeups. /// loop { /// poll.poll(&mut events, None)?; /// /// for Token(t) in &events { /// if t == 0 { /// // The socket connected (probably; it could be a spurious wakeup) /// return Ok(()); /// } /// } /// } /// # Ok(()) /// # } /// #
/// # } /// ``` /// /// # Exclusive access /// /// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file /// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued /// for a given descriptor, not more events will be issued for that descriptor until it has been /// re-registered using [`reregister`]. pub struct Poll(RawFd); /// Associates an event with a file descriptor. /// /// `Token` is a wrapper around `usize`, and is used as an argument to /// [`Poll::register`] and [`Poll::reregister`]. /// /// See [`Poll`] for more documentation on polling. You will likely want to use something like /// [`slab`] for creating and managing these. /// /// [`Poll`]: struct.Poll.html /// [`Poll::register`]: struct.Poll.html#method.register /// [`Poll::reregister`]: struct.Poll.html#method.reregister /// [`slab`]: https://crates.io/crates/slab pub struct Token(pub usize); /// A collection of readiness events. /// /// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received /// since the last poll. Usually, a single `Events` instance is created at the same time as a /// [`Poll`] and reused on each call to [`Poll::poll`]. /// /// See [`Poll`] for more documentation on polling. /// /// [`Poll::poll`]: struct.Poll.html#method.poll /// [`Poll`]: struct.Poll.html pub struct Events { all: Vec<epoll::EpollEvent>, /// How many of the events in `.all` are filled with responses to the last `poll()`? current: usize, } impl Events { /// Return a new `Events` capable of holding up to `capacity` events. pub fn with_capacity(capacity: usize) -> Events { let mut events = Vec::new(); events.resize(capacity, epoll::EpollEvent::empty()); Events { all: events, current: 0, } } } fn nix_to_io_err(e: nix::Error) -> io::Error { match e { nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32), nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e), } } impl Poll { /// Return a new `Poll` handle. /// /// This function will make a syscall to the operating system to create the system selector. If /// this syscall fails, `Poll::new` will return with the error. /// /// See [struct] level docs for more details. /// /// [struct]: struct.Poll.html /// /// # Examples /// /// ``` /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Poll, Events}; /// use std::time::Duration; /// /// let poll = match Poll::new() { /// Ok(poll) => poll, /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), /// }; /// /// // Create a structure to receive polled events /// let mut events = Events::with_capacity(1024); /// /// // Wait for events, but none will be received because no `Evented` /// // handles have been registered with this `Poll` instance. /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?; /// assert_eq!(n, 0); /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` pub fn new() -> io::Result<Self> { epoll::epoll_create1(epoll::EpollCreateFlags::empty()) .map(Poll) .map_err(nix_to_io_err) } fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> { let mut event = epoll::EpollEvent::new( epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT, t.0 as u64, ); epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err) } /// Register a file descriptor with this `Poll` instance. /// /// Once registered, the `Poll` instance monitors the given descriptor for readiness state /// changes. When it notices a state change, it will return a readiness event for the handle /// the next time [`poll`] is called. /// /// See the [`struct`] docs for a high level overview. /// /// `token` is user-defined value that is associated with the given `file`. When [`poll`] /// returns an event for `file`, this token is included. This allows the caller to map the /// event back to its descriptor. The token associated with a file descriptor can be changed at /// any time by calling [`reregister`]. pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlAdd) } /// Re-register a file descriptor with this `Poll` instance. /// /// When you re-register a file descriptor, you can change the details of the registration. /// Specifically, you can update the `token` specified in previous `register` and `reregister` /// calls. /// /// See the [`register`] documentation for details about the function /// arguments and see the [`struct`] docs for a high level overview of /// polling. /// /// [`struct`]: # /// [`register`]: #method.register pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlMod) } /// Deregister a file descriptor from this `Poll` instance. /// /// When you deregister a file descriptor, it will no longer be modified for readiness events, /// and it will no longer produce events from `poll`. pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> { epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None) .map_err(nix_to_io_err) } /// Wait for events on file descriptors associated with this `Poll` instance. /// /// Blocks the current thread and waits for events for any of the file descriptors that are /// registered with this `Poll` instance. The function blocks until either at least one /// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means /// that `poll` blocks until a readiness event has been received. /// /// The supplied `events` will be cleared and newly received readiness events will be pushed /// onto the end. At most `events.capacity()` events will be returned. If there are further /// pending readiness events, they are returned on the next call to `poll`. /// /// Note that once an event has been issued for a given `token` (or rather, for the token's /// file descriptor), no further events will be issued for that descriptor until it has been /// re-registered. Note also that the `timeout` is rounded up to the system clock granularity /// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun /// by a small amount. /// /// `poll` returns the number of events that have been pushed into `events`, or `Err` when an /// error has been encountered with the system selector. /// /// See the [struct] level documentation for a higher level discussion of polling. /// /// [struct]: # pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> { let timeout = match timeout { None => -1, Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize, }; events.current = epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?; Ok(events.current) } } /// [`Events`] iterator. /// /// This struct is created by the `into_iter` method on [`Events`]. /// /// [`Events`]: struct.Events.html pub struct EventsIterator<'a> { events: &'a Events, at: usize, } impl<'a> IntoIterator for &'a Events { type IntoIter = EventsIterator<'a>; type Item = Token; fn into_iter(self) -> Self::IntoIter { EventsIterator { events: self, at: 0, } } } impl<'a> Iterator for EventsIterator<'a> { type Item = Token; fn next(&mut self) -> Option<Self::Item> { let at = &mut self.at; if *at >= self.events.current { // events beyond .1 are old return None; } self.events.all.get(*at).map(|e| { *at += 1; Token(e.data() as usize) }) } }
/// # fn main() { /// # try_main().unwrap();
random_line_split
poll.rs
extern crate nix; use std::os::unix::io::AsRawFd; use std::os::unix::io::RawFd; use std::time; use std::io; use self::nix::sys::epoll; /// Polls for readiness events on all registered file descriptors. /// /// `Poll` allows a program to monitor a large number of file descriptors, waiting until one or /// more become "ready" for some class of operations; e.g. reading and writing. A file descriptor /// is considered ready if it is possible to immediately perform a corresponding operation; e.g. /// [`read`]. /// /// These `Poll` instances are optimized for a worker pool use-case, and so they are all /// oneshot, edge-triggered, and only support "ready to read". /// /// To use `Poll`, a file descriptor must first be registered with the `Poll` instance using the /// [`register`] method. A `Token` is also passed to the [`register`] function, and that same /// `Token` is returned when the given file descriptor is ready. /// /// [`read`]: tcp/struct.TcpStream.html#method.read /// [`register`]: #method.register /// [`reregister`]: #method.reregister /// /// # Examples /// /// A basic example -- establishing a `TcpStream` connection. /// /// ```no_run /// # extern crate mio; /// # extern crate mio_pool; /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Events, Poll, Token}; /// use mio::net::TcpStream; /// /// use std::net::{TcpListener, SocketAddr}; /// /// // Bind a server socket to connect to. /// let addr: SocketAddr = "127.0.0.1:0".parse()?; /// let server = TcpListener::bind(&addr)?; /// /// // Construct a new `Poll` handle as well as the `Events` we'll store into /// let poll = Poll::new()?; /// let mut events = Events::with_capacity(1024); /// /// // Connect the stream /// let stream = TcpStream::connect(&server.local_addr()?)?; /// /// // Register the stream with `Poll` /// poll.register(&stream, Token(0))?; /// /// // Wait for the socket to become ready. This has to happens in a loop to /// // handle spurious wakeups. /// loop { /// poll.poll(&mut events, None)?; /// /// for Token(t) in &events { /// if t == 0 { /// // The socket connected (probably; it could be a spurious wakeup) /// return Ok(()); /// } /// } /// } /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` /// /// # Exclusive access /// /// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file /// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued /// for a given descriptor, not more events will be issued for that descriptor until it has been /// re-registered using [`reregister`]. pub struct Poll(RawFd); /// Associates an event with a file descriptor. /// /// `Token` is a wrapper around `usize`, and is used as an argument to /// [`Poll::register`] and [`Poll::reregister`]. /// /// See [`Poll`] for more documentation on polling. You will likely want to use something like /// [`slab`] for creating and managing these. /// /// [`Poll`]: struct.Poll.html /// [`Poll::register`]: struct.Poll.html#method.register /// [`Poll::reregister`]: struct.Poll.html#method.reregister /// [`slab`]: https://crates.io/crates/slab pub struct Token(pub usize); /// A collection of readiness events. /// /// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received /// since the last poll. Usually, a single `Events` instance is created at the same time as a /// [`Poll`] and reused on each call to [`Poll::poll`]. /// /// See [`Poll`] for more documentation on polling. /// /// [`Poll::poll`]: struct.Poll.html#method.poll /// [`Poll`]: struct.Poll.html pub struct Events { all: Vec<epoll::EpollEvent>, /// How many of the events in `.all` are filled with responses to the last `poll()`? current: usize, } impl Events { /// Return a new `Events` capable of holding up to `capacity` events. pub fn with_capacity(capacity: usize) -> Events { let mut events = Vec::new(); events.resize(capacity, epoll::EpollEvent::empty()); Events { all: events, current: 0, } } } fn nix_to_io_err(e: nix::Error) -> io::Error { match e { nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32), nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e), } } impl Poll { /// Return a new `Poll` handle. /// /// This function will make a syscall to the operating system to create the system selector. If /// this syscall fails, `Poll::new` will return with the error. /// /// See [struct] level docs for more details. /// /// [struct]: struct.Poll.html /// /// # Examples /// /// ``` /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Poll, Events}; /// use std::time::Duration; /// /// let poll = match Poll::new() { /// Ok(poll) => poll, /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), /// }; /// /// // Create a structure to receive polled events /// let mut events = Events::with_capacity(1024); /// /// // Wait for events, but none will be received because no `Evented` /// // handles have been registered with this `Poll` instance. /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?; /// assert_eq!(n, 0); /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` pub fn new() -> io::Result<Self> { epoll::epoll_create1(epoll::EpollCreateFlags::empty()) .map(Poll) .map_err(nix_to_io_err) } fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> { let mut event = epoll::EpollEvent::new( epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT, t.0 as u64, ); epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err) } /// Register a file descriptor with this `Poll` instance. /// /// Once registered, the `Poll` instance monitors the given descriptor for readiness state /// changes. When it notices a state change, it will return a readiness event for the handle /// the next time [`poll`] is called. /// /// See the [`struct`] docs for a high level overview. /// /// `token` is user-defined value that is associated with the given `file`. When [`poll`] /// returns an event for `file`, this token is included. This allows the caller to map the /// event back to its descriptor. The token associated with a file descriptor can be changed at /// any time by calling [`reregister`]. pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlAdd) } /// Re-register a file descriptor with this `Poll` instance. /// /// When you re-register a file descriptor, you can change the details of the registration. /// Specifically, you can update the `token` specified in previous `register` and `reregister` /// calls. /// /// See the [`register`] documentation for details about the function /// arguments and see the [`struct`] docs for a high level overview of /// polling. /// /// [`struct`]: # /// [`register`]: #method.register pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlMod) } /// Deregister a file descriptor from this `Poll` instance. /// /// When you deregister a file descriptor, it will no longer be modified for readiness events, /// and it will no longer produce events from `poll`. pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> { epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None) .map_err(nix_to_io_err) } /// Wait for events on file descriptors associated with this `Poll` instance. /// /// Blocks the current thread and waits for events for any of the file descriptors that are /// registered with this `Poll` instance. The function blocks until either at least one /// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means /// that `poll` blocks until a readiness event has been received. /// /// The supplied `events` will be cleared and newly received readiness events will be pushed /// onto the end. At most `events.capacity()` events will be returned. If there are further /// pending readiness events, they are returned on the next call to `poll`. /// /// Note that once an event has been issued for a given `token` (or rather, for the token's /// file descriptor), no further events will be issued for that descriptor until it has been /// re-registered. Note also that the `timeout` is rounded up to the system clock granularity /// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun /// by a small amount. /// /// `poll` returns the number of events that have been pushed into `events`, or `Err` when an /// error has been encountered with the system selector. /// /// See the [struct] level documentation for a higher level discussion of polling. /// /// [struct]: # pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> { let timeout = match timeout { None => -1, Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize, }; events.current = epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?; Ok(events.current) } } /// [`Events`] iterator. /// /// This struct is created by the `into_iter` method on [`Events`]. /// /// [`Events`]: struct.Events.html pub struct EventsIterator<'a> { events: &'a Events, at: usize, } impl<'a> IntoIterator for &'a Events { type IntoIter = EventsIterator<'a>; type Item = Token; fn into_iter(self) -> Self::IntoIter { EventsIterator { events: self, at: 0, } } } impl<'a> Iterator for EventsIterator<'a> { type Item = Token; fn next(&mut self) -> Option<Self::Item> { let at = &mut self.at; if *at >= self.events.current
self.events.all.get(*at).map(|e| { *at += 1; Token(e.data() as usize) }) } }
{ // events beyond .1 are old return None; }
conditional_block
poll.rs
extern crate nix; use std::os::unix::io::AsRawFd; use std::os::unix::io::RawFd; use std::time; use std::io; use self::nix::sys::epoll; /// Polls for readiness events on all registered file descriptors. /// /// `Poll` allows a program to monitor a large number of file descriptors, waiting until one or /// more become "ready" for some class of operations; e.g. reading and writing. A file descriptor /// is considered ready if it is possible to immediately perform a corresponding operation; e.g. /// [`read`]. /// /// These `Poll` instances are optimized for a worker pool use-case, and so they are all /// oneshot, edge-triggered, and only support "ready to read". /// /// To use `Poll`, a file descriptor must first be registered with the `Poll` instance using the /// [`register`] method. A `Token` is also passed to the [`register`] function, and that same /// `Token` is returned when the given file descriptor is ready. /// /// [`read`]: tcp/struct.TcpStream.html#method.read /// [`register`]: #method.register /// [`reregister`]: #method.reregister /// /// # Examples /// /// A basic example -- establishing a `TcpStream` connection. /// /// ```no_run /// # extern crate mio; /// # extern crate mio_pool; /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Events, Poll, Token}; /// use mio::net::TcpStream; /// /// use std::net::{TcpListener, SocketAddr}; /// /// // Bind a server socket to connect to. /// let addr: SocketAddr = "127.0.0.1:0".parse()?; /// let server = TcpListener::bind(&addr)?; /// /// // Construct a new `Poll` handle as well as the `Events` we'll store into /// let poll = Poll::new()?; /// let mut events = Events::with_capacity(1024); /// /// // Connect the stream /// let stream = TcpStream::connect(&server.local_addr()?)?; /// /// // Register the stream with `Poll` /// poll.register(&stream, Token(0))?; /// /// // Wait for the socket to become ready. This has to happens in a loop to /// // handle spurious wakeups. /// loop { /// poll.poll(&mut events, None)?; /// /// for Token(t) in &events { /// if t == 0 { /// // The socket connected (probably; it could be a spurious wakeup) /// return Ok(()); /// } /// } /// } /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` /// /// # Exclusive access /// /// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file /// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued /// for a given descriptor, not more events will be issued for that descriptor until it has been /// re-registered using [`reregister`]. pub struct Poll(RawFd); /// Associates an event with a file descriptor. /// /// `Token` is a wrapper around `usize`, and is used as an argument to /// [`Poll::register`] and [`Poll::reregister`]. /// /// See [`Poll`] for more documentation on polling. You will likely want to use something like /// [`slab`] for creating and managing these. /// /// [`Poll`]: struct.Poll.html /// [`Poll::register`]: struct.Poll.html#method.register /// [`Poll::reregister`]: struct.Poll.html#method.reregister /// [`slab`]: https://crates.io/crates/slab pub struct Token(pub usize); /// A collection of readiness events. /// /// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received /// since the last poll. Usually, a single `Events` instance is created at the same time as a /// [`Poll`] and reused on each call to [`Poll::poll`]. /// /// See [`Poll`] for more documentation on polling. /// /// [`Poll::poll`]: struct.Poll.html#method.poll /// [`Poll`]: struct.Poll.html pub struct Events { all: Vec<epoll::EpollEvent>, /// How many of the events in `.all` are filled with responses to the last `poll()`? current: usize, } impl Events { /// Return a new `Events` capable of holding up to `capacity` events. pub fn with_capacity(capacity: usize) -> Events { let mut events = Vec::new(); events.resize(capacity, epoll::EpollEvent::empty()); Events { all: events, current: 0, } } } fn nix_to_io_err(e: nix::Error) -> io::Error { match e { nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32), nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e), } } impl Poll { /// Return a new `Poll` handle. /// /// This function will make a syscall to the operating system to create the system selector. If /// this syscall fails, `Poll::new` will return with the error. /// /// See [struct] level docs for more details. /// /// [struct]: struct.Poll.html /// /// # Examples /// /// ``` /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Poll, Events}; /// use std::time::Duration; /// /// let poll = match Poll::new() { /// Ok(poll) => poll, /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), /// }; /// /// // Create a structure to receive polled events /// let mut events = Events::with_capacity(1024); /// /// // Wait for events, but none will be received because no `Evented` /// // handles have been registered with this `Poll` instance. /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?; /// assert_eq!(n, 0); /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` pub fn new() -> io::Result<Self> { epoll::epoll_create1(epoll::EpollCreateFlags::empty()) .map(Poll) .map_err(nix_to_io_err) } fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> { let mut event = epoll::EpollEvent::new( epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT, t.0 as u64, ); epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err) } /// Register a file descriptor with this `Poll` instance. /// /// Once registered, the `Poll` instance monitors the given descriptor for readiness state /// changes. When it notices a state change, it will return a readiness event for the handle /// the next time [`poll`] is called. /// /// See the [`struct`] docs for a high level overview. /// /// `token` is user-defined value that is associated with the given `file`. When [`poll`] /// returns an event for `file`, this token is included. This allows the caller to map the /// event back to its descriptor. The token associated with a file descriptor can be changed at /// any time by calling [`reregister`]. pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlAdd) } /// Re-register a file descriptor with this `Poll` instance. /// /// When you re-register a file descriptor, you can change the details of the registration. /// Specifically, you can update the `token` specified in previous `register` and `reregister` /// calls. /// /// See the [`register`] documentation for details about the function /// arguments and see the [`struct`] docs for a high level overview of /// polling. /// /// [`struct`]: # /// [`register`]: #method.register pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlMod) } /// Deregister a file descriptor from this `Poll` instance. /// /// When you deregister a file descriptor, it will no longer be modified for readiness events, /// and it will no longer produce events from `poll`. pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> { epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None) .map_err(nix_to_io_err) } /// Wait for events on file descriptors associated with this `Poll` instance. /// /// Blocks the current thread and waits for events for any of the file descriptors that are /// registered with this `Poll` instance. The function blocks until either at least one /// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means /// that `poll` blocks until a readiness event has been received. /// /// The supplied `events` will be cleared and newly received readiness events will be pushed /// onto the end. At most `events.capacity()` events will be returned. If there are further /// pending readiness events, they are returned on the next call to `poll`. /// /// Note that once an event has been issued for a given `token` (or rather, for the token's /// file descriptor), no further events will be issued for that descriptor until it has been /// re-registered. Note also that the `timeout` is rounded up to the system clock granularity /// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun /// by a small amount. /// /// `poll` returns the number of events that have been pushed into `events`, or `Err` when an /// error has been encountered with the system selector. /// /// See the [struct] level documentation for a higher level discussion of polling. /// /// [struct]: # pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> { let timeout = match timeout { None => -1, Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize, }; events.current = epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?; Ok(events.current) } } /// [`Events`] iterator. /// /// This struct is created by the `into_iter` method on [`Events`]. /// /// [`Events`]: struct.Events.html pub struct
<'a> { events: &'a Events, at: usize, } impl<'a> IntoIterator for &'a Events { type IntoIter = EventsIterator<'a>; type Item = Token; fn into_iter(self) -> Self::IntoIter { EventsIterator { events: self, at: 0, } } } impl<'a> Iterator for EventsIterator<'a> { type Item = Token; fn next(&mut self) -> Option<Self::Item> { let at = &mut self.at; if *at >= self.events.current { // events beyond .1 are old return None; } self.events.all.get(*at).map(|e| { *at += 1; Token(e.data() as usize) }) } }
EventsIterator
identifier_name
poll.rs
extern crate nix; use std::os::unix::io::AsRawFd; use std::os::unix::io::RawFd; use std::time; use std::io; use self::nix::sys::epoll; /// Polls for readiness events on all registered file descriptors. /// /// `Poll` allows a program to monitor a large number of file descriptors, waiting until one or /// more become "ready" for some class of operations; e.g. reading and writing. A file descriptor /// is considered ready if it is possible to immediately perform a corresponding operation; e.g. /// [`read`]. /// /// These `Poll` instances are optimized for a worker pool use-case, and so they are all /// oneshot, edge-triggered, and only support "ready to read". /// /// To use `Poll`, a file descriptor must first be registered with the `Poll` instance using the /// [`register`] method. A `Token` is also passed to the [`register`] function, and that same /// `Token` is returned when the given file descriptor is ready. /// /// [`read`]: tcp/struct.TcpStream.html#method.read /// [`register`]: #method.register /// [`reregister`]: #method.reregister /// /// # Examples /// /// A basic example -- establishing a `TcpStream` connection. /// /// ```no_run /// # extern crate mio; /// # extern crate mio_pool; /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Events, Poll, Token}; /// use mio::net::TcpStream; /// /// use std::net::{TcpListener, SocketAddr}; /// /// // Bind a server socket to connect to. /// let addr: SocketAddr = "127.0.0.1:0".parse()?; /// let server = TcpListener::bind(&addr)?; /// /// // Construct a new `Poll` handle as well as the `Events` we'll store into /// let poll = Poll::new()?; /// let mut events = Events::with_capacity(1024); /// /// // Connect the stream /// let stream = TcpStream::connect(&server.local_addr()?)?; /// /// // Register the stream with `Poll` /// poll.register(&stream, Token(0))?; /// /// // Wait for the socket to become ready. This has to happens in a loop to /// // handle spurious wakeups. /// loop { /// poll.poll(&mut events, None)?; /// /// for Token(t) in &events { /// if t == 0 { /// // The socket connected (probably; it could be a spurious wakeup) /// return Ok(()); /// } /// } /// } /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` /// /// # Exclusive access /// /// Since this `Poll` implementation is optimized for worker-pool style use-cases, all file /// descriptors are registered using `EPOLL_ONESHOT`. This means that once an event has been issued /// for a given descriptor, not more events will be issued for that descriptor until it has been /// re-registered using [`reregister`]. pub struct Poll(RawFd); /// Associates an event with a file descriptor. /// /// `Token` is a wrapper around `usize`, and is used as an argument to /// [`Poll::register`] and [`Poll::reregister`]. /// /// See [`Poll`] for more documentation on polling. You will likely want to use something like /// [`slab`] for creating and managing these. /// /// [`Poll`]: struct.Poll.html /// [`Poll::register`]: struct.Poll.html#method.register /// [`Poll::reregister`]: struct.Poll.html#method.reregister /// [`slab`]: https://crates.io/crates/slab pub struct Token(pub usize); /// A collection of readiness events. /// /// `Events` is passed as an argument to [`Poll::poll`], and provides any readiness events received /// since the last poll. Usually, a single `Events` instance is created at the same time as a /// [`Poll`] and reused on each call to [`Poll::poll`]. /// /// See [`Poll`] for more documentation on polling. /// /// [`Poll::poll`]: struct.Poll.html#method.poll /// [`Poll`]: struct.Poll.html pub struct Events { all: Vec<epoll::EpollEvent>, /// How many of the events in `.all` are filled with responses to the last `poll()`? current: usize, } impl Events { /// Return a new `Events` capable of holding up to `capacity` events. pub fn with_capacity(capacity: usize) -> Events { let mut events = Vec::new(); events.resize(capacity, epoll::EpollEvent::empty()); Events { all: events, current: 0, } } } fn nix_to_io_err(e: nix::Error) -> io::Error { match e { nix::Error::Sys(errno) => io::Error::from_raw_os_error(errno as i32), nix::Error::InvalidPath => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::InvalidUtf8 => io::Error::new(io::ErrorKind::InvalidInput, e), nix::Error::UnsupportedOperation => io::Error::new(io::ErrorKind::Other, e), } } impl Poll { /// Return a new `Poll` handle. /// /// This function will make a syscall to the operating system to create the system selector. If /// this syscall fails, `Poll::new` will return with the error. /// /// See [struct] level docs for more details. /// /// [struct]: struct.Poll.html /// /// # Examples /// /// ``` /// # use std::error::Error; /// # fn try_main() -> Result<(), Box<Error>> { /// use mio_pool::poll::{Poll, Events}; /// use std::time::Duration; /// /// let poll = match Poll::new() { /// Ok(poll) => poll, /// Err(e) => panic!("failed to create Poll instance; err={:?}", e), /// }; /// /// // Create a structure to receive polled events /// let mut events = Events::with_capacity(1024); /// /// // Wait for events, but none will be received because no `Evented` /// // handles have been registered with this `Poll` instance. /// let n = poll.poll(&mut events, Some(Duration::from_millis(500)))?; /// assert_eq!(n, 0); /// # Ok(()) /// # } /// # /// # fn main() { /// # try_main().unwrap(); /// # } /// ``` pub fn new() -> io::Result<Self> { epoll::epoll_create1(epoll::EpollCreateFlags::empty()) .map(Poll) .map_err(nix_to_io_err) } fn ctl(&self, file: &AsRawFd, t: Token, op: epoll::EpollOp) -> io::Result<()> { let mut event = epoll::EpollEvent::new( epoll::EpollFlags::EPOLLIN | epoll::EpollFlags::EPOLLONESHOT, t.0 as u64, ); epoll::epoll_ctl(self.0, op, file.as_raw_fd(), &mut event).map_err(nix_to_io_err) } /// Register a file descriptor with this `Poll` instance. /// /// Once registered, the `Poll` instance monitors the given descriptor for readiness state /// changes. When it notices a state change, it will return a readiness event for the handle /// the next time [`poll`] is called. /// /// See the [`struct`] docs for a high level overview. /// /// `token` is user-defined value that is associated with the given `file`. When [`poll`] /// returns an event for `file`, this token is included. This allows the caller to map the /// event back to its descriptor. The token associated with a file descriptor can be changed at /// any time by calling [`reregister`]. pub fn register(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlAdd) } /// Re-register a file descriptor with this `Poll` instance. /// /// When you re-register a file descriptor, you can change the details of the registration. /// Specifically, you can update the `token` specified in previous `register` and `reregister` /// calls. /// /// See the [`register`] documentation for details about the function /// arguments and see the [`struct`] docs for a high level overview of /// polling. /// /// [`struct`]: # /// [`register`]: #method.register pub fn reregister(&self, file: &AsRawFd, t: Token) -> io::Result<()> { self.ctl(file, t, epoll::EpollOp::EpollCtlMod) } /// Deregister a file descriptor from this `Poll` instance. /// /// When you deregister a file descriptor, it will no longer be modified for readiness events, /// and it will no longer produce events from `poll`. pub fn deregister(&self, file: &AsRawFd) -> io::Result<()> { epoll::epoll_ctl(self.0, epoll::EpollOp::EpollCtlDel, file.as_raw_fd(), None) .map_err(nix_to_io_err) } /// Wait for events on file descriptors associated with this `Poll` instance. /// /// Blocks the current thread and waits for events for any of the file descriptors that are /// registered with this `Poll` instance. The function blocks until either at least one /// readiness event has been received or `timeout` has elapsed. A `timeout` of `None` means /// that `poll` blocks until a readiness event has been received. /// /// The supplied `events` will be cleared and newly received readiness events will be pushed /// onto the end. At most `events.capacity()` events will be returned. If there are further /// pending readiness events, they are returned on the next call to `poll`. /// /// Note that once an event has been issued for a given `token` (or rather, for the token's /// file descriptor), no further events will be issued for that descriptor until it has been /// re-registered. Note also that the `timeout` is rounded up to the system clock granularity /// (usually 1ms), and kernel scheduling delays mean that the blocking interval may be overrun /// by a small amount. /// /// `poll` returns the number of events that have been pushed into `events`, or `Err` when an /// error has been encountered with the system selector. /// /// See the [struct] level documentation for a higher level discussion of polling. /// /// [struct]: # pub fn poll(&self, events: &mut Events, timeout: Option<time::Duration>) -> io::Result<usize> { let timeout = match timeout { None => -1, Some(d) => (d.as_secs() * 1000 + d.subsec_nanos() as u64 / 1_000_000) as isize, }; events.current = epoll::epoll_wait(self.0, &mut events.all[..], timeout).map_err(nix_to_io_err)?; Ok(events.current) } } /// [`Events`] iterator. /// /// This struct is created by the `into_iter` method on [`Events`]. /// /// [`Events`]: struct.Events.html pub struct EventsIterator<'a> { events: &'a Events, at: usize, } impl<'a> IntoIterator for &'a Events { type IntoIter = EventsIterator<'a>; type Item = Token; fn into_iter(self) -> Self::IntoIter { EventsIterator { events: self, at: 0, } } } impl<'a> Iterator for EventsIterator<'a> { type Item = Token; fn next(&mut self) -> Option<Self::Item>
}
{ let at = &mut self.at; if *at >= self.events.current { // events beyond .1 are old return None; } self.events.all.get(*at).map(|e| { *at += 1; Token(e.data() as usize) }) }
identifier_body