file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
infer_lst.py | # ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import argparse
import datetime
import json
import random
import time
from pathlib import Path
from PIL import Image, ImageFont, ImageDraw, ImageEnhance
import torchvision.transforms as T
import numpy as np
import torch
from torch.utils.data import DataLoader
import datasets
import util.misc as utils
from util import box_ops
import datasets.samplers as samplers
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
import time
import os
def get_args_parser():
parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False)
parser.add_argument('--lr', default=2e-4, type=float)
parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+')
parser.add_argument('--lr_backbone', default=2e-5, type=float)
parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+')
parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--lr_drop', default=40, type=int)
parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+')
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
parser.add_argument('--sgd', action='store_true')
# Variants of Deformable DETR
parser.add_argument('--with_box_refine', default=False, action='store_true')
parser.add_argument('--two_stage', default=False, action='store_true')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,
help="position / size * scale")
parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=1024, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=300, type=int,
help="Number of query slots")
parser.add_argument('--dec_n_points', default=4, type=int)
parser.add_argument('--enc_n_points', default=4, type=int)
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=2, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--cls_loss_coef', default=2, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--focal_alpha', default=0.25, type=float)
# dataset parameters
parser.add_argument('--dataset_file', default='ICDAR2013')
parser.add_argument('--coco_path', default='./data/coco', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--imgs_dir', type=str, help='input images folder for inference')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')
return parser
# standard PyTorch mean-std input image normalization
transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
label_names = ['table', 'figure', 'natural_image', 'logo', 'signature']
colors = ['red', 'blue', 'green', 'yellow', 'black']
def | (args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'], strict=False)
if torch.cuda.is_available():
model.cuda()
model.eval()
for img_file in os.listdir(args.imgs_dir):
t0 = time.time()
img_path = os.path.join(args.imgs_dir, img_file)
out_imgName = './visualize/'+'out_'+img_file[:-4]+'.png'
im = Image.open(img_path)
# mean-std normalize the input image (batch-size: 1)
img = transform(im).unsqueeze(0)
img=img.cuda()
# propagate through the model
outputs = model(img)
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = topk_indexes // out_logits.shape[2]
labels = topk_indexes % out_logits.shape[2]
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4))
keep = scores[0] > 0.2
boxes = boxes[0, keep]
labels = labels[0, keep]
# and from relative [0, 1] to absolute [0, height] coordinates
im_h,im_w = im.size
#print('im_h,im_w',im_h,im_w)
target_sizes =torch.tensor([[im_w,im_h]])
target_sizes =target_sizes.cuda()
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
print(time.time()-t0)
#plot_results
source_img = Image.open(img_path).convert("RGBA")
fnt = ImageFont.truetype("/content/content/Deformable-DETR/font/Aaargh.ttf", 18)
draw = ImageDraw.Draw(source_img)
#print ('label' , labels.tolist())
label_list = labels.tolist()
#print("Boxes",boxes,boxes.tolist())
i=0
for xmin, ymin, xmax, ymax in boxes[0].tolist():
draw.rectangle(((xmin, ymin), (xmax, ymax)), outline =colors[label_list[i]-1])
# print('--------')
# print('i= ', i)
# print('label is = ', label_list[i]-1)
# print(label_names[label_list[i]-1])
if ymin-18 >=0 :
ymin = ymin-18
draw.text((xmin, ymin), label_names[label_list[i]-1], anchor = 'md', font=fnt, fill=colors[label_list[i]-1])
i+=1
source_img.save(out_imgName, "png")
results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]
print("Outputs",results)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args) | main | identifier_name |
server.rs | // use std::{
// hash::Hash,
// str,
// io::Write,
// net::{SocketAddr, IpAddr, Ipv4Addr},
// sync::Mutex,
// time::{Instant}
// };
// use actix_http::{
// body::Body,
// http::{
// header::{CONTENT_TYPE, SERVER},
// HeaderValue, | // NewService,
// Service,
// };
// use actix_server::{ServerConfig};
// use actix_web::dev::Server
use actix::prelude::*;
// use bytes::{BytesMut, Bytes};
// use futures::{
// future::{
// ok,
// join_all,
// Future,
// },
// Async, Poll,
// };
// use serde_json::to_writer;
// use actix_web::{
// App,
// web,
// middleware,
// Error as AWError,
// HttpResponse,
// HttpRequest,
// HttpServer,
// };
// use actix_web_actors::ws::{Message as WsMessage, CloseCode, CloseReason };
// use askama::Template;
//use actix_redis::{Command, RedisActor, Error as ARError};
use actix_redis::{RedisActor};
// use redis_async::{
// client::{PairedConnection, paired_connect, PubsubConnection, pubsub_connect},
// resp::{RespValue},
// };
use crate::ws::{Close as WsClose, WsSession};
// use super::db::{RedisConnection};
// pub struct App {
// // db: PgConnection,
// db: RedisConnection,
// // db: Arc<PairedConnection>,
// hdr_srv: HeaderValue,
// hdr_ctjson: HeaderValue,
// hdr_cthtml: HeaderValue,
// }
// impl Service for App {
// type Request = Request;
// type Response = Response;
// type Error = Error;
// type Future = Box<dyn Future<Item = Response, Error = Error>>;
// #[inline]
// fn poll_ready(&mut self) -> Poll<(), Self::Error> {
// Ok(Async::Ready(()))
// }
// fn call(&mut self, req: Request) -> Self::Future {
// let path = req.path();
// match path {
// "/db" => {
// let h_srv = self.hdr_srv.clone();
// let h_ct = self.hdr_ctjson.clone();
// Box::new(self.db.get("mydomain:one")
// .map(|v:String| {
// let mut body = BytesMut::new();
// serde_json::to_writer(Writer(&mut body), &Message{
// message: &*v
// }).unwrap();
// let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// let hdrs = res.headers_mut();
// hdrs.insert(SERVER, h_srv);
// hdrs.insert(CONTENT_TYPE, h_ct);
// res
// })
// )
// }
// "/fortune" => {
// let h_srv = self.hdr_srv.clone();
// let h_ct = self.hdr_cthtml.clone();
// // Box::new(self.db.tell_fortune().from_err().map(move |fortunes| {
// Box::new(ok({
// let mut body = BytesMut::with_capacity(2048);
// let mut writer = Writer(&mut body);
// let _ = write!(writer, "{}", HelloTemplate { name : "tester" });//FortunesTemplate { fortunes });
// let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// let hdrs = res.headers_mut();
// hdrs.insert(SERVER, h_srv);
// hdrs.insert(CONTENT_TYPE, h_ct);
// res
// }))
// }
// "/json" => {
// Box::new(ok(json()))
// }
// "/plaintext" => {
// Box::new(ok(plaintext()))
// }
// // "/queries" => {
// // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize;
// // let h_srv = self.hdr_srv.clone();
// // let h_ct = self.hdr_ctjson.clone();
// // Box::new(self.db.get_worlds(q).from_err().map(move |worlds| {
// // let mut body = BytesMut::with_capacity(35 * worlds.len());
// // to_writer(Writer(&mut body), &worlds).unwrap();
// // let mut res =
// // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// // let hdrs = res.headers_mut();
// // hdrs.insert(SERVER, h_srv);
// // hdrs.insert(CONTENT_TYPE, h_ct);
// // res
// // }))
// // }
// // "/updates" => {
// // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize;
// // let h_srv = self.hdr_srv.clone();
// // let h_ct = self.hdr_ctjson.clone();
// // Box::new(self.db.update(q).from_err().map(move |worlds| {
// // let mut body = BytesMut::with_capacity(35 * worlds.len());
// // to_writer(Writer(&mut body), &worlds).unwrap();
// // let mut res =
// // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// // let hdrs = res.headers_mut();
// // hdrs.insert(SERVER, h_srv);
// // hdrs.insert(CONTENT_TYPE, h_ct);
// // res
// // }))
// // }
// _ => Box::new(ok(Response::new(StatusCode::NOT_FOUND))),
// }
// }
// }
// #[derive(Clone)]
// pub struct AppFactory;
// impl NewService for AppFactory {
// type Config = ServerConfig;
// type Request = Request;
// type Response = Response;
// type Error = Error;
// type Service = App;
// type InitError = ();
// type Future = Box<dyn Future<Item = Self::Service, Error = Self::InitError>>;
// fn new_service(&self, _: &ServerConfig) -> Self::Future {
// // const DB_URL: &str =
// // "postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world";
// // Box::new(PgConnection::connect(DB_URL).map(|db| App {
// // db,
// // hdr_srv: HeaderValue::from_static("Actix"),
// // hdr_ctjson: HeaderValue::from_static("application/json"),
// // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"),
// // }));
// Box::new(
// // paired_connect(&String::from(DB_URL).parse().unwrap())
// RedisConnection::connect(DB_URL)
// .map_err(|_| ())
// .map(|db|{
// let app = App {
// db,
// hdr_srv: HeaderValue::from_static("Actix"),
// hdr_ctjson: HeaderValue::from_static("application/json"),
// hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"),
// };
// app
// })
// // })
// )
// }
// }
// pub fn json() -> HttpResponse {
// let message = Message {
// message: "Hello, World!",
// };
// let mut body = BytesMut::with_capacity(SIZE);
// serde_json::to_writer(Writer(&mut body), &message).unwrap();
// let mut res = HttpResponse::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// res.headers_mut()
// .insert(SERVER, HeaderValue::from_static("Actix"));
// res.headers_mut()
// .insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
// res
// }
// fn plaintext() -> HttpResponse {
// let mut res = HttpResponse::with_body(
// StatusCode::OK,
// Body::Bytes(Bytes::from_static(b"Hello, World!")),
// );
// res.headers_mut()
// .insert(SERVER, HeaderValue::from_static("Actix"));
// res.headers_mut()
// .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain"));
// res
// }
// #[derive(Template)]
// #[template(path = "test.html")]
// struct HelloTemplate<'a> {
// name: &'a str,
// }
// pub fn root_handler(
// req: web::HttpRequest
// ) -> impl Future<Item = HttpResponse, Error = ()> {
// let path = req.match_info().query("filename").parse().unwrap();
// HttpResponse::from(
// Ok( NamedFile::open(path).unwrap() )
// )
// // ok( HttpResponse::Ok().body("hi"))
// // Ok(HttpResponse::InternalServerError().finish())
// }
pub struct WsServer {
sessions: Vec<Addr<WsSession>>,
db: Addr<RedisActor>,
}
impl Actor for WsServer {
type Context = Context<Self>;
}
impl WsServer {
pub fn new(db : Addr<RedisActor>) -> WsServer {
let sessions = vec![];
WsServer { sessions, db }
}
}
impl WsServer {
fn close_all(&self) {
// for s in &*self.sessions.lock().unwrap() {
for s in &self.sessions {
// if let Some(v) = s.upgrade(){
if s.connected() {
// println!("sending WsClose");
// v.do_send(WsClose);
s.do_send(WsClose);
//WsMessage::Close(Some(CloseReason { code: CloseCode::Restart, description: None })));
}
}
}
}
/// new websocket connection
#[derive(Message)]
pub struct Connect {
pub addr: Addr<WsSession>,
}
// impl Message for Connect {
// type Result = usize;
// }
impl Handler<Connect> for WsServer {
type Result = ();
fn handle(&mut self, msg: Connect, _ctx: &mut Self::Context) -> Self::Result {
// println!("{:?} joined wsserver", msg.addr);
// let mut s = &mut *self.sessions.get_mut().unwrap();
let s = &mut self.sessions;
s.push(msg.addr); //.downgrade());
println!(
"new web socket added to server : {} sockets opened",
s.len()
);
}
}
/// websocket session disconnected
#[derive(Message)]
pub struct Disconnect {
pub addr: Addr<WsSession>,
// pub id : usize,
}
impl Handler<Disconnect> for WsServer {
type Result = ();
fn handle(&mut self, msg: Disconnect, _ctx: &mut Self::Context) -> Self::Result {
println!("a websocket session requested disconnect");
let mut s = 0;
let mut f = false;
// let mut ss = &mut *self.sessions.get_mut().unwrap();
let ss = &mut self.sessions;
for i in 0..ss.len() {
// if let Some(v) = self.sessions[i].upgrade(){
if ss[i] == msg.addr {
// if ss[i] == msg.addr {
// if v == msg.addr {
s = i;
f = true;
break;
// }
}
}
if f {
ss.remove(s);
println!(
"a websocket session removed from server : {} sockets opened",
ss.len()
);
}
}
}
/// request to close all other connections
#[derive(Message)]
pub struct CloseAll;
impl Handler<CloseAll> for WsServer {
type Result = ();
fn handle(&mut self, _msg: CloseAll, _ctx: &mut Self::Context) -> Self::Result {
println!("received CloseAll");
self.close_all();
}
} | // StatusCode,
// },
// Error, Request, Response,
// };
// use actix_service::{ | random_line_split |
server.rs | // use std::{
// hash::Hash,
// str,
// io::Write,
// net::{SocketAddr, IpAddr, Ipv4Addr},
// sync::Mutex,
// time::{Instant}
// };
// use actix_http::{
// body::Body,
// http::{
// header::{CONTENT_TYPE, SERVER},
// HeaderValue,
// StatusCode,
// },
// Error, Request, Response,
// };
// use actix_service::{
// NewService,
// Service,
// };
// use actix_server::{ServerConfig};
// use actix_web::dev::Server
use actix::prelude::*;
// use bytes::{BytesMut, Bytes};
// use futures::{
// future::{
// ok,
// join_all,
// Future,
// },
// Async, Poll,
// };
// use serde_json::to_writer;
// use actix_web::{
// App,
// web,
// middleware,
// Error as AWError,
// HttpResponse,
// HttpRequest,
// HttpServer,
// };
// use actix_web_actors::ws::{Message as WsMessage, CloseCode, CloseReason };
// use askama::Template;
//use actix_redis::{Command, RedisActor, Error as ARError};
use actix_redis::{RedisActor};
// use redis_async::{
// client::{PairedConnection, paired_connect, PubsubConnection, pubsub_connect},
// resp::{RespValue},
// };
use crate::ws::{Close as WsClose, WsSession};
// use super::db::{RedisConnection};
// pub struct App {
// // db: PgConnection,
// db: RedisConnection,
// // db: Arc<PairedConnection>,
// hdr_srv: HeaderValue,
// hdr_ctjson: HeaderValue,
// hdr_cthtml: HeaderValue,
// }
// impl Service for App {
// type Request = Request;
// type Response = Response;
// type Error = Error;
// type Future = Box<dyn Future<Item = Response, Error = Error>>;
// #[inline]
// fn poll_ready(&mut self) -> Poll<(), Self::Error> {
// Ok(Async::Ready(()))
// }
// fn call(&mut self, req: Request) -> Self::Future {
// let path = req.path();
// match path {
// "/db" => {
// let h_srv = self.hdr_srv.clone();
// let h_ct = self.hdr_ctjson.clone();
// Box::new(self.db.get("mydomain:one")
// .map(|v:String| {
// let mut body = BytesMut::new();
// serde_json::to_writer(Writer(&mut body), &Message{
// message: &*v
// }).unwrap();
// let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// let hdrs = res.headers_mut();
// hdrs.insert(SERVER, h_srv);
// hdrs.insert(CONTENT_TYPE, h_ct);
// res
// })
// )
// }
// "/fortune" => {
// let h_srv = self.hdr_srv.clone();
// let h_ct = self.hdr_cthtml.clone();
// // Box::new(self.db.tell_fortune().from_err().map(move |fortunes| {
// Box::new(ok({
// let mut body = BytesMut::with_capacity(2048);
// let mut writer = Writer(&mut body);
// let _ = write!(writer, "{}", HelloTemplate { name : "tester" });//FortunesTemplate { fortunes });
// let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// let hdrs = res.headers_mut();
// hdrs.insert(SERVER, h_srv);
// hdrs.insert(CONTENT_TYPE, h_ct);
// res
// }))
// }
// "/json" => {
// Box::new(ok(json()))
// }
// "/plaintext" => {
// Box::new(ok(plaintext()))
// }
// // "/queries" => {
// // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize;
// // let h_srv = self.hdr_srv.clone();
// // let h_ct = self.hdr_ctjson.clone();
// // Box::new(self.db.get_worlds(q).from_err().map(move |worlds| {
// // let mut body = BytesMut::with_capacity(35 * worlds.len());
// // to_writer(Writer(&mut body), &worlds).unwrap();
// // let mut res =
// // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// // let hdrs = res.headers_mut();
// // hdrs.insert(SERVER, h_srv);
// // hdrs.insert(CONTENT_TYPE, h_ct);
// // res
// // }))
// // }
// // "/updates" => {
// // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize;
// // let h_srv = self.hdr_srv.clone();
// // let h_ct = self.hdr_ctjson.clone();
// // Box::new(self.db.update(q).from_err().map(move |worlds| {
// // let mut body = BytesMut::with_capacity(35 * worlds.len());
// // to_writer(Writer(&mut body), &worlds).unwrap();
// // let mut res =
// // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// // let hdrs = res.headers_mut();
// // hdrs.insert(SERVER, h_srv);
// // hdrs.insert(CONTENT_TYPE, h_ct);
// // res
// // }))
// // }
// _ => Box::new(ok(Response::new(StatusCode::NOT_FOUND))),
// }
// }
// }
// #[derive(Clone)]
// pub struct AppFactory;
// impl NewService for AppFactory {
// type Config = ServerConfig;
// type Request = Request;
// type Response = Response;
// type Error = Error;
// type Service = App;
// type InitError = ();
// type Future = Box<dyn Future<Item = Self::Service, Error = Self::InitError>>;
// fn new_service(&self, _: &ServerConfig) -> Self::Future {
// // const DB_URL: &str =
// // "postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world";
// // Box::new(PgConnection::connect(DB_URL).map(|db| App {
// // db,
// // hdr_srv: HeaderValue::from_static("Actix"),
// // hdr_ctjson: HeaderValue::from_static("application/json"),
// // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"),
// // }));
// Box::new(
// // paired_connect(&String::from(DB_URL).parse().unwrap())
// RedisConnection::connect(DB_URL)
// .map_err(|_| ())
// .map(|db|{
// let app = App {
// db,
// hdr_srv: HeaderValue::from_static("Actix"),
// hdr_ctjson: HeaderValue::from_static("application/json"),
// hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"),
// };
// app
// })
// // })
// )
// }
// }
// pub fn json() -> HttpResponse {
// let message = Message {
// message: "Hello, World!",
// };
// let mut body = BytesMut::with_capacity(SIZE);
// serde_json::to_writer(Writer(&mut body), &message).unwrap();
// let mut res = HttpResponse::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// res.headers_mut()
// .insert(SERVER, HeaderValue::from_static("Actix"));
// res.headers_mut()
// .insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
// res
// }
// fn plaintext() -> HttpResponse {
// let mut res = HttpResponse::with_body(
// StatusCode::OK,
// Body::Bytes(Bytes::from_static(b"Hello, World!")),
// );
// res.headers_mut()
// .insert(SERVER, HeaderValue::from_static("Actix"));
// res.headers_mut()
// .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain"));
// res
// }
// #[derive(Template)]
// #[template(path = "test.html")]
// struct HelloTemplate<'a> {
// name: &'a str,
// }
// pub fn root_handler(
// req: web::HttpRequest
// ) -> impl Future<Item = HttpResponse, Error = ()> {
// let path = req.match_info().query("filename").parse().unwrap();
// HttpResponse::from(
// Ok( NamedFile::open(path).unwrap() )
// )
// // ok( HttpResponse::Ok().body("hi"))
// // Ok(HttpResponse::InternalServerError().finish())
// }
pub struct WsServer {
sessions: Vec<Addr<WsSession>>,
db: Addr<RedisActor>,
}
impl Actor for WsServer {
type Context = Context<Self>;
}
impl WsServer {
pub fn new(db : Addr<RedisActor>) -> WsServer {
let sessions = vec![];
WsServer { sessions, db }
}
}
impl WsServer {
fn close_all(&self) {
// for s in &*self.sessions.lock().unwrap() {
for s in &self.sessions {
// if let Some(v) = s.upgrade(){
if s.connected() {
// println!("sending WsClose");
// v.do_send(WsClose);
s.do_send(WsClose);
//WsMessage::Close(Some(CloseReason { code: CloseCode::Restart, description: None })));
}
}
}
}
/// new websocket connection
#[derive(Message)]
pub struct Connect {
pub addr: Addr<WsSession>,
}
// impl Message for Connect {
// type Result = usize;
// }
impl Handler<Connect> for WsServer {
type Result = ();
fn handle(&mut self, msg: Connect, _ctx: &mut Self::Context) -> Self::Result {
// println!("{:?} joined wsserver", msg.addr);
// let mut s = &mut *self.sessions.get_mut().unwrap();
let s = &mut self.sessions;
s.push(msg.addr); //.downgrade());
println!(
"new web socket added to server : {} sockets opened",
s.len()
);
}
}
/// websocket session disconnected
#[derive(Message)]
pub struct Disconnect {
pub addr: Addr<WsSession>,
// pub id : usize,
}
impl Handler<Disconnect> for WsServer {
type Result = ();
fn handle(&mut self, msg: Disconnect, _ctx: &mut Self::Context) -> Self::Result {
println!("a websocket session requested disconnect");
let mut s = 0;
let mut f = false;
// let mut ss = &mut *self.sessions.get_mut().unwrap();
let ss = &mut self.sessions;
for i in 0..ss.len() {
// if let Some(v) = self.sessions[i].upgrade(){
if ss[i] == msg.addr |
}
if f {
ss.remove(s);
println!(
"a websocket session removed from server : {} sockets opened",
ss.len()
);
}
}
}
/// request to close all other connections
#[derive(Message)]
pub struct CloseAll;
impl Handler<CloseAll> for WsServer {
type Result = ();
fn handle(&mut self, _msg: CloseAll, _ctx: &mut Self::Context) -> Self::Result {
println!("received CloseAll");
self.close_all();
}
}
| {
// if ss[i] == msg.addr {
// if v == msg.addr {
s = i;
f = true;
break;
// }
} | conditional_block |
server.rs | // use std::{
// hash::Hash,
// str,
// io::Write,
// net::{SocketAddr, IpAddr, Ipv4Addr},
// sync::Mutex,
// time::{Instant}
// };
// use actix_http::{
// body::Body,
// http::{
// header::{CONTENT_TYPE, SERVER},
// HeaderValue,
// StatusCode,
// },
// Error, Request, Response,
// };
// use actix_service::{
// NewService,
// Service,
// };
// use actix_server::{ServerConfig};
// use actix_web::dev::Server
use actix::prelude::*;
// use bytes::{BytesMut, Bytes};
// use futures::{
// future::{
// ok,
// join_all,
// Future,
// },
// Async, Poll,
// };
// use serde_json::to_writer;
// use actix_web::{
// App,
// web,
// middleware,
// Error as AWError,
// HttpResponse,
// HttpRequest,
// HttpServer,
// };
// use actix_web_actors::ws::{Message as WsMessage, CloseCode, CloseReason };
// use askama::Template;
//use actix_redis::{Command, RedisActor, Error as ARError};
use actix_redis::{RedisActor};
// use redis_async::{
// client::{PairedConnection, paired_connect, PubsubConnection, pubsub_connect},
// resp::{RespValue},
// };
use crate::ws::{Close as WsClose, WsSession};
// use super::db::{RedisConnection};
// pub struct App {
// // db: PgConnection,
// db: RedisConnection,
// // db: Arc<PairedConnection>,
// hdr_srv: HeaderValue,
// hdr_ctjson: HeaderValue,
// hdr_cthtml: HeaderValue,
// }
// impl Service for App {
// type Request = Request;
// type Response = Response;
// type Error = Error;
// type Future = Box<dyn Future<Item = Response, Error = Error>>;
// #[inline]
// fn poll_ready(&mut self) -> Poll<(), Self::Error> {
// Ok(Async::Ready(()))
// }
// fn call(&mut self, req: Request) -> Self::Future {
// let path = req.path();
// match path {
// "/db" => {
// let h_srv = self.hdr_srv.clone();
// let h_ct = self.hdr_ctjson.clone();
// Box::new(self.db.get("mydomain:one")
// .map(|v:String| {
// let mut body = BytesMut::new();
// serde_json::to_writer(Writer(&mut body), &Message{
// message: &*v
// }).unwrap();
// let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// let hdrs = res.headers_mut();
// hdrs.insert(SERVER, h_srv);
// hdrs.insert(CONTENT_TYPE, h_ct);
// res
// })
// )
// }
// "/fortune" => {
// let h_srv = self.hdr_srv.clone();
// let h_ct = self.hdr_cthtml.clone();
// // Box::new(self.db.tell_fortune().from_err().map(move |fortunes| {
// Box::new(ok({
// let mut body = BytesMut::with_capacity(2048);
// let mut writer = Writer(&mut body);
// let _ = write!(writer, "{}", HelloTemplate { name : "tester" });//FortunesTemplate { fortunes });
// let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// let hdrs = res.headers_mut();
// hdrs.insert(SERVER, h_srv);
// hdrs.insert(CONTENT_TYPE, h_ct);
// res
// }))
// }
// "/json" => {
// Box::new(ok(json()))
// }
// "/plaintext" => {
// Box::new(ok(plaintext()))
// }
// // "/queries" => {
// // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize;
// // let h_srv = self.hdr_srv.clone();
// // let h_ct = self.hdr_ctjson.clone();
// // Box::new(self.db.get_worlds(q).from_err().map(move |worlds| {
// // let mut body = BytesMut::with_capacity(35 * worlds.len());
// // to_writer(Writer(&mut body), &worlds).unwrap();
// // let mut res =
// // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// // let hdrs = res.headers_mut();
// // hdrs.insert(SERVER, h_srv);
// // hdrs.insert(CONTENT_TYPE, h_ct);
// // res
// // }))
// // }
// // "/updates" => {
// // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize;
// // let h_srv = self.hdr_srv.clone();
// // let h_ct = self.hdr_ctjson.clone();
// // Box::new(self.db.update(q).from_err().map(move |worlds| {
// // let mut body = BytesMut::with_capacity(35 * worlds.len());
// // to_writer(Writer(&mut body), &worlds).unwrap();
// // let mut res =
// // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// // let hdrs = res.headers_mut();
// // hdrs.insert(SERVER, h_srv);
// // hdrs.insert(CONTENT_TYPE, h_ct);
// // res
// // }))
// // }
// _ => Box::new(ok(Response::new(StatusCode::NOT_FOUND))),
// }
// }
// }
// #[derive(Clone)]
// pub struct AppFactory;
// impl NewService for AppFactory {
// type Config = ServerConfig;
// type Request = Request;
// type Response = Response;
// type Error = Error;
// type Service = App;
// type InitError = ();
// type Future = Box<dyn Future<Item = Self::Service, Error = Self::InitError>>;
// fn new_service(&self, _: &ServerConfig) -> Self::Future {
// // const DB_URL: &str =
// // "postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world";
// // Box::new(PgConnection::connect(DB_URL).map(|db| App {
// // db,
// // hdr_srv: HeaderValue::from_static("Actix"),
// // hdr_ctjson: HeaderValue::from_static("application/json"),
// // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"),
// // }));
// Box::new(
// // paired_connect(&String::from(DB_URL).parse().unwrap())
// RedisConnection::connect(DB_URL)
// .map_err(|_| ())
// .map(|db|{
// let app = App {
// db,
// hdr_srv: HeaderValue::from_static("Actix"),
// hdr_ctjson: HeaderValue::from_static("application/json"),
// hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"),
// };
// app
// })
// // })
// )
// }
// }
// pub fn json() -> HttpResponse {
// let message = Message {
// message: "Hello, World!",
// };
// let mut body = BytesMut::with_capacity(SIZE);
// serde_json::to_writer(Writer(&mut body), &message).unwrap();
// let mut res = HttpResponse::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// res.headers_mut()
// .insert(SERVER, HeaderValue::from_static("Actix"));
// res.headers_mut()
// .insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
// res
// }
// fn plaintext() -> HttpResponse {
// let mut res = HttpResponse::with_body(
// StatusCode::OK,
// Body::Bytes(Bytes::from_static(b"Hello, World!")),
// );
// res.headers_mut()
// .insert(SERVER, HeaderValue::from_static("Actix"));
// res.headers_mut()
// .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain"));
// res
// }
// #[derive(Template)]
// #[template(path = "test.html")]
// struct HelloTemplate<'a> {
// name: &'a str,
// }
// pub fn root_handler(
// req: web::HttpRequest
// ) -> impl Future<Item = HttpResponse, Error = ()> {
// let path = req.match_info().query("filename").parse().unwrap();
// HttpResponse::from(
// Ok( NamedFile::open(path).unwrap() )
// )
// // ok( HttpResponse::Ok().body("hi"))
// // Ok(HttpResponse::InternalServerError().finish())
// }
pub struct WsServer {
sessions: Vec<Addr<WsSession>>,
db: Addr<RedisActor>,
}
impl Actor for WsServer {
type Context = Context<Self>;
}
impl WsServer {
pub fn new(db : Addr<RedisActor>) -> WsServer {
let sessions = vec![];
WsServer { sessions, db }
}
}
impl WsServer {
fn close_all(&self) {
// for s in &*self.sessions.lock().unwrap() {
for s in &self.sessions {
// if let Some(v) = s.upgrade(){
if s.connected() {
// println!("sending WsClose");
// v.do_send(WsClose);
s.do_send(WsClose);
//WsMessage::Close(Some(CloseReason { code: CloseCode::Restart, description: None })));
}
}
}
}
/// new websocket connection
#[derive(Message)]
pub struct Connect {
pub addr: Addr<WsSession>,
}
// impl Message for Connect {
// type Result = usize;
// }
impl Handler<Connect> for WsServer {
type Result = ();
fn handle(&mut self, msg: Connect, _ctx: &mut Self::Context) -> Self::Result |
}
/// websocket session disconnected
#[derive(Message)]
pub struct Disconnect {
pub addr: Addr<WsSession>,
// pub id : usize,
}
impl Handler<Disconnect> for WsServer {
type Result = ();
fn handle(&mut self, msg: Disconnect, _ctx: &mut Self::Context) -> Self::Result {
println!("a websocket session requested disconnect");
let mut s = 0;
let mut f = false;
// let mut ss = &mut *self.sessions.get_mut().unwrap();
let ss = &mut self.sessions;
for i in 0..ss.len() {
// if let Some(v) = self.sessions[i].upgrade(){
if ss[i] == msg.addr {
// if ss[i] == msg.addr {
// if v == msg.addr {
s = i;
f = true;
break;
// }
}
}
if f {
ss.remove(s);
println!(
"a websocket session removed from server : {} sockets opened",
ss.len()
);
}
}
}
/// request to close all other connections
#[derive(Message)]
pub struct CloseAll;
impl Handler<CloseAll> for WsServer {
type Result = ();
fn handle(&mut self, _msg: CloseAll, _ctx: &mut Self::Context) -> Self::Result {
println!("received CloseAll");
self.close_all();
}
}
| {
// println!("{:?} joined wsserver", msg.addr);
// let mut s = &mut *self.sessions.get_mut().unwrap();
let s = &mut self.sessions;
s.push(msg.addr); //.downgrade());
println!(
"new web socket added to server : {} sockets opened",
s.len()
);
} | identifier_body |
server.rs | // use std::{
// hash::Hash,
// str,
// io::Write,
// net::{SocketAddr, IpAddr, Ipv4Addr},
// sync::Mutex,
// time::{Instant}
// };
// use actix_http::{
// body::Body,
// http::{
// header::{CONTENT_TYPE, SERVER},
// HeaderValue,
// StatusCode,
// },
// Error, Request, Response,
// };
// use actix_service::{
// NewService,
// Service,
// };
// use actix_server::{ServerConfig};
// use actix_web::dev::Server
use actix::prelude::*;
// use bytes::{BytesMut, Bytes};
// use futures::{
// future::{
// ok,
// join_all,
// Future,
// },
// Async, Poll,
// };
// use serde_json::to_writer;
// use actix_web::{
// App,
// web,
// middleware,
// Error as AWError,
// HttpResponse,
// HttpRequest,
// HttpServer,
// };
// use actix_web_actors::ws::{Message as WsMessage, CloseCode, CloseReason };
// use askama::Template;
//use actix_redis::{Command, RedisActor, Error as ARError};
use actix_redis::{RedisActor};
// use redis_async::{
// client::{PairedConnection, paired_connect, PubsubConnection, pubsub_connect},
// resp::{RespValue},
// };
use crate::ws::{Close as WsClose, WsSession};
// use super::db::{RedisConnection};
// pub struct App {
// // db: PgConnection,
// db: RedisConnection,
// // db: Arc<PairedConnection>,
// hdr_srv: HeaderValue,
// hdr_ctjson: HeaderValue,
// hdr_cthtml: HeaderValue,
// }
// impl Service for App {
// type Request = Request;
// type Response = Response;
// type Error = Error;
// type Future = Box<dyn Future<Item = Response, Error = Error>>;
// #[inline]
// fn poll_ready(&mut self) -> Poll<(), Self::Error> {
// Ok(Async::Ready(()))
// }
// fn call(&mut self, req: Request) -> Self::Future {
// let path = req.path();
// match path {
// "/db" => {
// let h_srv = self.hdr_srv.clone();
// let h_ct = self.hdr_ctjson.clone();
// Box::new(self.db.get("mydomain:one")
// .map(|v:String| {
// let mut body = BytesMut::new();
// serde_json::to_writer(Writer(&mut body), &Message{
// message: &*v
// }).unwrap();
// let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// let hdrs = res.headers_mut();
// hdrs.insert(SERVER, h_srv);
// hdrs.insert(CONTENT_TYPE, h_ct);
// res
// })
// )
// }
// "/fortune" => {
// let h_srv = self.hdr_srv.clone();
// let h_ct = self.hdr_cthtml.clone();
// // Box::new(self.db.tell_fortune().from_err().map(move |fortunes| {
// Box::new(ok({
// let mut body = BytesMut::with_capacity(2048);
// let mut writer = Writer(&mut body);
// let _ = write!(writer, "{}", HelloTemplate { name : "tester" });//FortunesTemplate { fortunes });
// let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// let hdrs = res.headers_mut();
// hdrs.insert(SERVER, h_srv);
// hdrs.insert(CONTENT_TYPE, h_ct);
// res
// }))
// }
// "/json" => {
// Box::new(ok(json()))
// }
// "/plaintext" => {
// Box::new(ok(plaintext()))
// }
// // "/queries" => {
// // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize;
// // let h_srv = self.hdr_srv.clone();
// // let h_ct = self.hdr_ctjson.clone();
// // Box::new(self.db.get_worlds(q).from_err().map(move |worlds| {
// // let mut body = BytesMut::with_capacity(35 * worlds.len());
// // to_writer(Writer(&mut body), &worlds).unwrap();
// // let mut res =
// // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// // let hdrs = res.headers_mut();
// // hdrs.insert(SERVER, h_srv);
// // hdrs.insert(CONTENT_TYPE, h_ct);
// // res
// // }))
// // }
// // "/updates" => {
// // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize;
// // let h_srv = self.hdr_srv.clone();
// // let h_ct = self.hdr_ctjson.clone();
// // Box::new(self.db.update(q).from_err().map(move |worlds| {
// // let mut body = BytesMut::with_capacity(35 * worlds.len());
// // to_writer(Writer(&mut body), &worlds).unwrap();
// // let mut res =
// // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// // let hdrs = res.headers_mut();
// // hdrs.insert(SERVER, h_srv);
// // hdrs.insert(CONTENT_TYPE, h_ct);
// // res
// // }))
// // }
// _ => Box::new(ok(Response::new(StatusCode::NOT_FOUND))),
// }
// }
// }
// #[derive(Clone)]
// pub struct AppFactory;
// impl NewService for AppFactory {
// type Config = ServerConfig;
// type Request = Request;
// type Response = Response;
// type Error = Error;
// type Service = App;
// type InitError = ();
// type Future = Box<dyn Future<Item = Self::Service, Error = Self::InitError>>;
// fn new_service(&self, _: &ServerConfig) -> Self::Future {
// // const DB_URL: &str =
// // "postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world";
// // Box::new(PgConnection::connect(DB_URL).map(|db| App {
// // db,
// // hdr_srv: HeaderValue::from_static("Actix"),
// // hdr_ctjson: HeaderValue::from_static("application/json"),
// // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"),
// // }));
// Box::new(
// // paired_connect(&String::from(DB_URL).parse().unwrap())
// RedisConnection::connect(DB_URL)
// .map_err(|_| ())
// .map(|db|{
// let app = App {
// db,
// hdr_srv: HeaderValue::from_static("Actix"),
// hdr_ctjson: HeaderValue::from_static("application/json"),
// hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"),
// };
// app
// })
// // })
// )
// }
// }
// pub fn json() -> HttpResponse {
// let message = Message {
// message: "Hello, World!",
// };
// let mut body = BytesMut::with_capacity(SIZE);
// serde_json::to_writer(Writer(&mut body), &message).unwrap();
// let mut res = HttpResponse::with_body(StatusCode::OK, Body::Bytes(body.freeze()));
// res.headers_mut()
// .insert(SERVER, HeaderValue::from_static("Actix"));
// res.headers_mut()
// .insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
// res
// }
// fn plaintext() -> HttpResponse {
// let mut res = HttpResponse::with_body(
// StatusCode::OK,
// Body::Bytes(Bytes::from_static(b"Hello, World!")),
// );
// res.headers_mut()
// .insert(SERVER, HeaderValue::from_static("Actix"));
// res.headers_mut()
// .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain"));
// res
// }
// #[derive(Template)]
// #[template(path = "test.html")]
// struct HelloTemplate<'a> {
// name: &'a str,
// }
// pub fn root_handler(
// req: web::HttpRequest
// ) -> impl Future<Item = HttpResponse, Error = ()> {
// let path = req.match_info().query("filename").parse().unwrap();
// HttpResponse::from(
// Ok( NamedFile::open(path).unwrap() )
// )
// // ok( HttpResponse::Ok().body("hi"))
// // Ok(HttpResponse::InternalServerError().finish())
// }
pub struct WsServer {
sessions: Vec<Addr<WsSession>>,
db: Addr<RedisActor>,
}
impl Actor for WsServer {
type Context = Context<Self>;
}
impl WsServer {
pub fn new(db : Addr<RedisActor>) -> WsServer {
let sessions = vec![];
WsServer { sessions, db }
}
}
impl WsServer {
fn close_all(&self) {
// for s in &*self.sessions.lock().unwrap() {
for s in &self.sessions {
// if let Some(v) = s.upgrade(){
if s.connected() {
// println!("sending WsClose");
// v.do_send(WsClose);
s.do_send(WsClose);
//WsMessage::Close(Some(CloseReason { code: CloseCode::Restart, description: None })));
}
}
}
}
/// new websocket connection
#[derive(Message)]
pub struct Connect {
pub addr: Addr<WsSession>,
}
// impl Message for Connect {
// type Result = usize;
// }
impl Handler<Connect> for WsServer {
type Result = ();
fn handle(&mut self, msg: Connect, _ctx: &mut Self::Context) -> Self::Result {
// println!("{:?} joined wsserver", msg.addr);
// let mut s = &mut *self.sessions.get_mut().unwrap();
let s = &mut self.sessions;
s.push(msg.addr); //.downgrade());
println!(
"new web socket added to server : {} sockets opened",
s.len()
);
}
}
/// websocket session disconnected
#[derive(Message)]
pub struct Disconnect {
pub addr: Addr<WsSession>,
// pub id : usize,
}
impl Handler<Disconnect> for WsServer {
type Result = ();
fn | (&mut self, msg: Disconnect, _ctx: &mut Self::Context) -> Self::Result {
println!("a websocket session requested disconnect");
let mut s = 0;
let mut f = false;
// let mut ss = &mut *self.sessions.get_mut().unwrap();
let ss = &mut self.sessions;
for i in 0..ss.len() {
// if let Some(v) = self.sessions[i].upgrade(){
if ss[i] == msg.addr {
// if ss[i] == msg.addr {
// if v == msg.addr {
s = i;
f = true;
break;
// }
}
}
if f {
ss.remove(s);
println!(
"a websocket session removed from server : {} sockets opened",
ss.len()
);
}
}
}
/// request to close all other connections
#[derive(Message)]
pub struct CloseAll;
impl Handler<CloseAll> for WsServer {
type Result = ();
fn handle(&mut self, _msg: CloseAll, _ctx: &mut Self::Context) -> Self::Result {
println!("received CloseAll");
self.close_all();
}
}
| handle | identifier_name |
rt_network_v3_2_x.py | # -*- coding: utf-8 -*-
"""RT-Network-v3.2.x.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1MfD_C225OafgIsQKVy76p3E2Qam6kXDo
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 2.x
# %load_ext tensorboard
import glob, math, os, re, sys, zipfile
import numpy as np
import pandas as pd
import tensorflow as tf
import seaborn as sns
import pickle as pkl
import matplotlib.pyplot as plt
from functools import reduce
from itertools import cycle
from datetime import datetime
from google.colab import auth
from oauth2client.client import GoogleCredentials
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from IPython.display import display, HTML
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.utils import resample
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, InputLayer
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
"""## Authorize Google Drive"""
auth.authenticate_user()
"""## Setup & Install
Basic setup and install additional dependencies
"""
# Some global variables and general settings
saved_model_dir = './saved_model'
tensorboard_logs = './logs'
pd.options.display.float_format = '{:.2f}'.format
sns.set_context('notebook')
nnet_tools_path = os.path.abspath('NNet')
def print_html(string, tag='span', color=None, size=None):
size = f'font-size:{size};' if size else ''
color = f'color:{color};' if color else ''
display(HTML(f'<{tag} style="{color}{size}">{string}</{tag}>'))
def print_heading(string, color=None):
print_html(string, tag='h3', color=color)
def print_message(string, color=None):
print_html(string, color=color)
def download_file_from_gdrive(gdrive_id, output_file):
# Authenticate google drive
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# Download csv from GDrive...
dataset = drive.CreateFile({'id': gdrive_id})
dataset_filename = dataset['title']
print_message(f'Downloading {dataset_filename} ({gdrive_id}) from GDrive. Please wait...')
dataset.GetContentFile(output_file)
print_message(f'Download {gdrive_id} completed.')
def download_and_unzip(src_url, out_dir='./', zip_file='dl.zip', remove_zip=True):
print(f'Downloading {src_url} to {zip_file}')
!wget $src_url -O $zip_file -q --show-progress
print(f'Download complete. Unzipping {zip_file}')
z = zipfile.ZipFile(zip_file, 'r')
z.extractall(out_dir)
print(f'Unzipped to {out_dir}. Cleaning up...')
z.close()
if remove_zip: os.remove(zip_file)
def overwrite_gdrive_file(gdrive_id, input_file):
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
drive_file = drive.CreateFile({'id': gdrive_id})
drive_file.SetContentFile(input_file)
drive_file.Upload()
drive_filename = drive_file['title']
print(f'Wrote {input_file} to GDrive {drive_filename} ({gdrive_id}).')
def update_path_vars(paths=[]):
python_path = os.environ.get('PYTHONPATH') or ''
jupyter_path = os.environ.get('JUPYTER_PATH') or ''
for path in paths:
if not path in python_path:
python_path += f':{path}'
if not path in jupyter_path:
jupyter_path += f':{path}'
os.environ['PYTHONPATH'] = python_path
os.environ['JUPYTER_PATH'] = jupyter_path
def install_nnet_tools(nnet_tools_path):
nnet_tools_url = 'https://github.com/sisl/NNet/archive/master.zip'
download_and_unzip(nnet_tools_url)
!mv ./NNet-master $nnet_tools_path
def setup_nnet_tools(nnet_tools_path):
# install nnet tools if not already installed.
if not os.path.exists(nnet_tools_path):
install_nnet_tools(nnet_tools_path)
# add folder to PYTHONPATH & JUPYTER_PATH
update_path_vars([nnet_tools_path])
# delete sample data
!rm -rf sample_data
# setup nnet tools (for converting model to Stanford's nnet format)
setup_nnet_tools(nnet_tools_path)
# used for conversion to NNet format
from NNet.utils.writeNNet import writeNNet
"""## Download Dataset"""
# GDrive ID of csv file (AllData_ReactionTime.csv)
# https://drive.google.com/file/d/1WNnAd8lYWMT_mQWwiN6dP2KC6NTLFjXn
gdrive_id='1WNnAd8lYWMT_mQWwiN6dP2KC6NTLFjXn'
dataset_file='all_data_rt.csv'
# GDrive ID of adversarial csv
# https://drive.google.com/file/d/1N9QZm0NXXYPCQnBNrJHPzmWHkU9SSkbg
adv_dataset_gdrive_id='1N9QZm0NXXYPCQnBNrJHPzmWHkU9SSkbg'
adv_dataset_file = 'adv_dataset.csv'
# load the dataset from gdrive if it doesn't exist in the runtime's filesystem.
if not os.path.exists(dataset_file):
download_file_from_gdrive(gdrive_id, dataset_file)
if not os.path.exists(adv_dataset_file):
download_file_from_gdrive(adv_dataset_gdrive_id, adv_dataset_file)
"""## Import Dataset"""
raw_columns = ['ID', 'Name', 'FixationDuration', 'FixationStart', 'FixationSeq',
'FixationX', 'FixationY', 'GazeDirectionLeftZ', 'GazeDirectionRightZ',
'PupilLeft', 'PupilRight', 'InterpolatedGazeX', 'InterpolatedGazeY',
'AutoThrottle', 'AutoWheel', 'CurrentThrottle', 'CurrentWheel',
'Distance3D', 'MPH', 'ManualBrake', 'ManualThrottle', 'ManualWheel',
'RangeW', 'RightLaneDist', 'RightLaneType', 'LeftLaneDist', 'LeftLaneType',
'ReactionTime']
raw_df = pd.read_csv(dataset_file, usecols=raw_columns)
raw_df.set_index(['ID'], inplace=True)
pure_df = pd.read_csv(dataset_file)
pure_df.set_index(['ID'], inplace=True)
pure_df
# 5 class using mean & sdev
def create_tot_categories(rt_column):
rt_mean = round(rt_column.mean())
rt_sdev = round(rt_column.std())
bound_1 = rt_mean - rt_sdev
bound_2 = rt_mean - rt_sdev // 2
bound_3 = rt_mean + rt_sdev // 2
bound_4 = rt_mean + rt_sdev
bins = [float('-inf'), bound_1, bound_2, bound_3, bound_4, float('inf')]
labels = np.array(['fast', 'med_fast', 'med', 'med_slow', 'slow'], dtype=object)
return (bins, labels)
# make a copy the raw data
df = raw_df.copy()
# compute 'TOT' categories
tot_bins, tot_labels = create_tot_categories(df.ReactionTime)
n_categories = len(tot_labels)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
# convert leftlane type and rightlanetype to categorical:
# add the class to the dataframe
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
# # Select a handful of ppl for saving resource
df['Name'] = df['Name'].str.upper()
chunk_users = [[f'{i}'.zfill(3) + f'_M{j}' for j in range(1, 4)] for i in [13, 14]]
chunk_users = [u for l in chunk_users for u in l] | def upsample_minority_TOTs(X_train, y_train, tot_labels, random_state=27):
# contat the training data together.
X = pd.concat([X_train, y_train], axis=1)
# separate majority and minority classes
buckets = {l: X[X.TOT == l] for l in tot_labels}
maj_label, majority = reduce(lambda a,b: b if b[1].shape[0] > a[1].shape[0] else a, buckets.items())
minorities = {k:v for k,v in buckets.items() if k != maj_label}
# upsample the minority classes
for k,v in minorities.items():
buckets[k] = resample(v, replace=True, n_samples=majority.shape[0], random_state=random_state)
upsampled = pd.concat(buckets.values()).sample(frac=1)
# split the upsampled data into X and y
y_train = upsampled['TOT']
X_train = upsampled.drop('TOT', axis=1)
return X_train, y_train
def prepare_inputs(X_train, X_test):
# scales inputs using "standard scaler", and returns 2D numpy array
scaler = StandardScaler().fit(pd.concat([X_train, X_test]))
X_train = scaler.transform(X_train.values)
X_test = scaler.transform(X_test.values)
return X_train, X_test, scaler
def prepare_target(y_train, y_test, categories):
# convert target to categorical, and returns 2D numpy array
y_train = y_train.to_numpy().reshape(-1,1)
y_test = y_test.to_numpy().reshape(-1,1)
onehot = OneHotEncoder(categories=categories)
onehot.fit(np.concatenate([y_train, y_test]))
y_train = onehot.transform(y_train).toarray()
y_test = onehot.transform(y_test).toarray()
return y_train, y_test, onehot
# split features and targets
y = df.TOT
X = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
# make results easier to reproduce
random_state = 27
# split train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, stratify=y, random_state=random_state)
# upsample the training data
X_train, y_train = upsample_minority_TOTs(X_train, y_train, tot_labels)
# scale the inputs
X_train_enc, X_test_enc, scaler = prepare_inputs(X_train, X_test)
# categorize outputs
y_train_enc, y_test_enc, onehot = prepare_target(y_train, y_test, categories=[tot_labels])
print_heading('TOT Value Counts')
print(y_train.value_counts())
# Prepare data used for extended evaluation and verification (all participants)
# split features and targets
Y_verification = all_users.TOT
X_verification = all_users.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
# upsample minority classes
X_verification, Y_verification = upsample_minority_TOTs(X_verification, Y_verification, tot_labels)
# scale the inputs
X_verification = scaler.transform(X_verification.values)
# categorize outputs
Y_verification = onehot.transform(Y_verification.to_numpy().reshape(-1, 1)).toarray()
def add_adversarial_data(X_train, y_train, adversarial_df):
y_cols = [f'y{y}' for y in range(y_train.shape[1])]
y_adv = adv_df[y_cols].values
X_adv = adv_df.drop(y_cols, axis=1).values
return np.append(X_train, X_adv, axis=0), np.append(y_train, y_adv, axis=0)
# adv_df = pd.read_csv(adv_dataset_file)
# X_train_enc, y_train_enc = add_adversarial_data(X_train_enc, y_train_enc, adv_df)
# save the column names & indexes for use during verification
feature_names = list(X.columns)
# display the feature names
print_heading('Feature Names')
print_message(feature_names)
# print the TOT categories
print_heading('TOT Categories')
print('\n'.join(['%s: %9.2f, %7.2f' % (tot_labels[i].rjust(8), tot_bins[i], tot_bins[i+1]) for i in range(n_categories)]))
def display_processed_data(feature_names, unencoded=True, encoded=True, describe=True):
if unencoded:
print_heading('Unencoded Data')
display(pd.concat([X_train, y_train], axis=1).describe())
if encoded:
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
print_heading('Encoded Data')
display(pd.concat([pd.DataFrame(X_train_enc, columns=feature_names),
pd.DataFrame(y_train_enc, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels}).describe())
display_processed_data(feature_names, unencoded=False)
"""
## Build & Train NN"""
# cleanup the old training logs and models
!rm -rf $tensorboard_logs model-*.h5 $saved_model_dir
!mkdir -p $tensorboard_logs
# training callbacks
mc_file = 'model-best-{epoch:02d}-{val_loss:.2f}.h5'
es_cb = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
mc_cb = ModelCheckpoint(mc_file, monitor='val_accuracy', verbose=1, save_best_only=True)
# tb_cb = TensorBoard(log_dir=tensorboard_logs, histogram_freq=1, write_graph=True, write_images=True)
# v3.2.2
# loss: 0.3316 - accuracy: 0.8707 - val_loss: 0.3212 - val_accuracy: 0.874
# 1) Train: 0.869, 2) Test: 0.847
model = Sequential()
model.add(InputLayer(input_shape=(X_train_enc.shape[1],)))
model.add(Dense(23, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(18, activation='relu'))
model.add(Dense(11, activation='relu'))
model.add(Dense(n_categories, activation='softmax')) # logits layer
optimizer = 'adam'
# optimizer = tf.keras.optimizers.Adam(learning_rate=0.02)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# fit the keras model on the dataset
history = model.fit(X_train_enc, y_train_enc,
validation_data=(X_test_enc, y_test_enc),
# epochs=30,
epochs=1,
batch_size=16,
callbacks=[es_cb, mc_cb])
# callbacks=[es_cb, mc_cb, tb_cb])
# pick best model file from filesystem
best_model_path = sorted(glob.glob('model-best-*.h5'), key=lambda f: int(re.search(r'\d+', f).group()))[-1]
print_heading('Best Model:')
print_message(best_model_path)
# cleanup old model
!rm -rf $saved_model_dir
# save model in tf and h5 formats
tf_model_path = f'{saved_model_dir}/model'
h5_model_path = f'{saved_model_dir}/model.h5'
model.save(tf_model_path) # save_format='tf'
model.save(h5_model_path, save_format='h5')
print_heading(f'Evaluating {best_model_path}')
!mkdir -p images
# load the saved best model
saved_model = load_model(tf_model_path)
# evaluate the model
_, train_acc = saved_model.evaluate(X_train_enc, y_train_enc, verbose=2)
_, test_acc = saved_model.evaluate(X_test_enc, y_test_enc, verbose=1)
print('Accuracy of test: %.2f' % (test_acc*100))
print('Accuracy of the: '+'1) Train: %.3f, 2) Test: %.3f' % (train_acc, test_acc))
# plot training history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend(['train', 'test'], loc='upper left')
plt.ylabel('Loss')
plt.savefig('images/training_history.png', dpi=300)
plt.show()
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('images/accuracy_history.png', dpi=300)
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('images/loss_history.png', dpi=300)
plt.show()
#note in kera model.predict() will return predict probabilities
pred_prob = saved_model.predict(X_test_enc, verbose=0)
fpr, tpr, threshold = metrics.roc_curve(y_test_enc.ravel(), pred_prob.ravel())
roc_auc = metrics.auc(fpr, tpr)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_categories):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test_enc[:,i], pred_prob[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr['micro'], tpr['micro'], _ = metrics.roc_curve(y_test_enc.ravel(), pred_prob.ravel())
roc_auc['micro'] = metrics.auc(fpr['micro'], tpr['micro'])
# Compute macro-average ROC curve and ROC area
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(3)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_categories):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_categories
fpr['macro'] = all_fpr
tpr['macro'] = mean_tpr
roc_auc['macro'] = metrics.auc(fpr['macro'], tpr['macro'])
plt.figure(1)
plt.plot(fpr['micro'], tpr['micro'],
label='micro-average ROC curve (area = {0:0.2f})' \
''.format(roc_auc['micro']),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr['macro'], tpr['macro'],
label='macro-average ROC curve (area = {0:0.2f})' \
''.format(roc_auc['macro']),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'red', 'blue'])
for i, color in zip(range(n_categories), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label='ROC curve of class {0} (area = {1:0.2f})' \
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Result for Receiver operating characteristic to multi-class of Reaction Time')
plt.legend(loc='lower right')
plt.savefig('images/roc.png', dpi=300)
plt.show()
print_heading(f'Extended Evaluation (all participants)')
def extended_evaluation(model_path, X, Y):
# load the saved best model
model = load_model(model_path)
# evaluate the model
_, accuracy = model.evaluate(X, Y, verbose=2)
print('Accuracy: %.2f' % (accuracy * 100))
extended_evaluation(tf_model_path, X_verification, Y_verification)
"""## Create Verification Artifacts
"""
def compute_nnet_params(model_file, df, scaler):
outputs = df['TOT']
inputs = df.drop(['Name', 'TOT', 'ReactionTime'], axis=1)
enc_inputs = pd.DataFrame(scaler.transform(inputs.values), columns=inputs.columns)
# compute sdev, mins, and maxs for inputs
input_sdev = enc_inputs.std().to_numpy()
input_mins = enc_inputs.min().to_numpy()
input_maxs = enc_inputs.max().to_numpy()
# extend input maxs and mins by std dev
input_mins -= input_sdev
input_maxs += input_sdev
# maraboupy only supports normalization (not standardization)
# use mean=0, and range=1 to neutralize maraboupy normalization
means = np.zeros(enc_inputs.shape[1]+1, dtype=int)
ranges = np.ones(enc_inputs.shape[1]+1, dtype=int)
# extract weights and biases from model
model = load_model(model_file)
model_params = model.get_weights()
weights = [w.T for w in model_params[0:len(model_params):2]]
biases = model_params[1:len(model_params):2]
return (weights, biases, input_mins, input_maxs, means, ranges)
def save_nnet(weights, biases, input_mins, input_maxs, means, ranges, output_path):
# write model in nnet format.
writeNNet(weights, biases, input_mins, input_maxs, means, ranges, output_path)
def save_encoders(scaler, onehot, output_dir):
pkl.dump(scaler, open(f'{output_dir}/scaler.pkl', 'wb'))
pkl.dump(onehot, open(f'{output_dir}/onehot.pkl', 'wb'))
def save_verification_data(modelpath, df, onehot, scaler, tot_bins, tot_labels, outpath='artifacts/verification.csv'):
m = load_model(modelpath)
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
df = df.drop_duplicates()
X_df = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
X = scaler.transform(X_df.values)
y = onehot.transform(df.TOT.values.reshape(-1, 1)).toarray()
predictions, idxs = m.predict(X), []
for i,p in enumerate(predictions):
maxidxs = np.where(p==p.max())[0]
if (maxidxs.shape[0] == 1) and (maxidxs[0] == y[i].tolist().index(1)):
idxs.append(i)
X, y = X[idxs], y[idxs]
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
v_df = pd.concat([pd.DataFrame(X, columns=X_df.columns),
pd.DataFrame(y, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels})
v_df.to_csv(outpath)
print(f'wrote verification data to {outpath}')
def create_verification_artifacts(tf_model_path, h5_model_path, df, feature_names, tot_bins, tot_labels, scaler, onehot):
print_heading(f'Creating verification artifacts...')
output_dir='artifacts'
archive_path = 'artifacts.zip'
h5_path = os.path.join(output_dir, 'model.h5')
pb_path = os.path.join(output_dir, 'model.pb')
nnet_path = os.path.join(output_dir, 'model.nnet')
model_zip = os.path.join(output_dir, 'model.zip')
# clear previous folder
!rm -rf $output_dir
# create the folder
!mkdir -p $output_dir
# zip up the tf model, and copy to artifacts
!cd $tf_model_path/.. && zip -qr ../$model_zip model && cd - > /dev/null
# copy the pb model file
!cp $tf_model_path/saved_model.pb $pb_path
# copy the h5 model file
!cp $h5_model_path $h5_path
# copy the images to artifacts
!cp -r images $output_dir
# extract params for nnet format
nnet_params = compute_nnet_params(tf_model_path, df, scaler)
weights, biases, input_mins, input_maxs, means, ranges = nnet_params
# write the model to nnet file.
save_nnet(weights, biases, input_mins, input_maxs, means, ranges, nnet_path)
# write encoders to file
save_encoders(scaler, onehot, output_dir)
# save verification data
save_verification_data(tf_model_path, df.copy(), onehot, scaler, tot_bins, tot_labels)
# create a zip archive of artifacts
!zip -rq $archive_path $output_dir
print_message(f'Saved artifacts to {archive_path}')
# create artifacts used for verification
create_verification_artifacts(tf_model_path, h5_model_path, df, feature_names, tot_bins, tot_labels, scaler, onehot)
def save_data(X_train_enc, X_test_enc, y_train_enc, y_test_enc, features, onehot, data_dir='data', archive_path='data.zip'):
tot_labels = onehot.get_feature_names(input_features=['TOT'])
train_df = pd.concat([pd.DataFrame(X_train_enc, columns=features),
pd.DataFrame(y_train_enc, columns=tot_labels)],
axis=1).astype({k:int for k in tot_labels})
test_df = pd.concat([pd.DataFrame(X_test_enc, columns=features),
pd.DataFrame(y_test_enc, columns=tot_labels)],
axis=1).astype({k:int for k in tot_labels})
!mkdir -p $data_dir
train_csv, test_csv = f'{data_dir}/train.csv', f'{data_dir}/test.csv'
train_df.to_csv(train_csv)
test_df.to_csv(test_csv)
print(f'wrote data to {train_csv} and {test_csv}, compressing...')
!zip -qr $archive_path $data_dir
# save_data(X_train_enc, X_test_enc, y_train_enc, y_test_enc, X_train.columns, onehot)
def create_verification_data(modelpath, df, onehot, scaler, tot_bins, tot_labels, outpath='verification.csv'):
m = load_model(modelpath)
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
df = df.drop_duplicates()
X_df = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
X = scaler.transform(X_df.values)
y = onehot.transform(df.TOT.values.reshape(-1, 1)).toarray()
predictions, idxs = m.predict(X), []
for i,p in enumerate(predictions):
maxidxs = np.where(p==p.max())[0]
if (maxidxs.shape[0] == 1) and (maxidxs[0] == y[i].tolist().index(1)):
idxs.append(i)
X, y = X[idxs], y[idxs]
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
v_df = pd.concat([pd.DataFrame(X, columns=X_df.columns),
pd.DataFrame(y, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels})
v_df.to_csv(outpath)
print(f'wrote verification data to {outpath}')
return v_df
# create_verification_data(tf_model_path, raw_df.copy(), onehot, scaler, tot_bins, tot_labels)
"""## Save Model & Verification Artifacts to GDrive"""
# GDrive ID's point to files in models/latest folder
artifacts = {
'artifacts/model.zip': '100s5DVwaK6ILlDe2ZCgm2F8JGrY7Wixf', # tf format
'artifacts/model.h5': '1Kyxb1A4E6U_HPaPjRLVnb2OTJtXOzTXX', # h5 format
'artifacts/model.pb': '1Ap3eWHWwAyw_3wOmy237AJF3pWQRnG3_', # pb format
'artifacts/model.nnet': '1HzfGxhKrw9PpeA1cMsexC4FcWv5OPdtB', # nnet format
'artifacts/scaler.pkl': '10EkqHQ3aqEYAxbLS4Q4LRWJ1byNCvAcf', # scaler object
'artifacts/onehot.pkl': '1SeED9m_TeyqtmHRgDe_kd9HVmn2K1hh8' # onehot object
}
# upload all of the artifacts to drive
# for fname,driveid in artifacts.items():
# overwrite_gdrive_file(driveid, fname)
"""
## Visualization
"""
!mkdir -p images
display(tf.keras.utils.plot_model(model, to_file='images/model.png', show_shapes=True, show_layer_names=True, expand_nested=True, dpi=1200, rankdir='LR'))
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
svg_plot = tf.keras.utils.model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='LR')
SVG(svg_plot.create(prog='dot', format='svg'))
# with open('images/model.svg', 'w') as f:
# f.write(svg_plot.decode('utf-8'))
svg_plot.write_svg('images/model.svg')
# Commented out IPython magic to ensure Python compatibility.
# %tensorboard --logdir $tensorboard_logs --host localhost --port 6006 | all_users = df.copy()
df = df.loc[df['Name'].isin(chunk_users)]
| random_line_split |
rt_network_v3_2_x.py | # -*- coding: utf-8 -*-
"""RT-Network-v3.2.x.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1MfD_C225OafgIsQKVy76p3E2Qam6kXDo
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 2.x
# %load_ext tensorboard
import glob, math, os, re, sys, zipfile
import numpy as np
import pandas as pd
import tensorflow as tf
import seaborn as sns
import pickle as pkl
import matplotlib.pyplot as plt
from functools import reduce
from itertools import cycle
from datetime import datetime
from google.colab import auth
from oauth2client.client import GoogleCredentials
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from IPython.display import display, HTML
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.utils import resample
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, InputLayer
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
"""## Authorize Google Drive"""
auth.authenticate_user()
"""## Setup & Install
Basic setup and install additional dependencies
"""
# Some global variables and general settings
saved_model_dir = './saved_model'
tensorboard_logs = './logs'
pd.options.display.float_format = '{:.2f}'.format
sns.set_context('notebook')
nnet_tools_path = os.path.abspath('NNet')
def print_html(string, tag='span', color=None, size=None):
size = f'font-size:{size};' if size else ''
color = f'color:{color};' if color else ''
display(HTML(f'<{tag} style="{color}{size}">{string}</{tag}>'))
def print_heading(string, color=None):
print_html(string, tag='h3', color=color)
def print_message(string, color=None):
print_html(string, color=color)
def download_file_from_gdrive(gdrive_id, output_file):
# Authenticate google drive
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# Download csv from GDrive...
dataset = drive.CreateFile({'id': gdrive_id})
dataset_filename = dataset['title']
print_message(f'Downloading {dataset_filename} ({gdrive_id}) from GDrive. Please wait...')
dataset.GetContentFile(output_file)
print_message(f'Download {gdrive_id} completed.')
def download_and_unzip(src_url, out_dir='./', zip_file='dl.zip', remove_zip=True):
print(f'Downloading {src_url} to {zip_file}')
!wget $src_url -O $zip_file -q --show-progress
print(f'Download complete. Unzipping {zip_file}')
z = zipfile.ZipFile(zip_file, 'r')
z.extractall(out_dir)
print(f'Unzipped to {out_dir}. Cleaning up...')
z.close()
if remove_zip: os.remove(zip_file)
def overwrite_gdrive_file(gdrive_id, input_file):
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
drive_file = drive.CreateFile({'id': gdrive_id})
drive_file.SetContentFile(input_file)
drive_file.Upload()
drive_filename = drive_file['title']
print(f'Wrote {input_file} to GDrive {drive_filename} ({gdrive_id}).')
def update_path_vars(paths=[]):
python_path = os.environ.get('PYTHONPATH') or ''
jupyter_path = os.environ.get('JUPYTER_PATH') or ''
for path in paths:
if not path in python_path:
python_path += f':{path}'
if not path in jupyter_path:
jupyter_path += f':{path}'
os.environ['PYTHONPATH'] = python_path
os.environ['JUPYTER_PATH'] = jupyter_path
def install_nnet_tools(nnet_tools_path):
nnet_tools_url = 'https://github.com/sisl/NNet/archive/master.zip'
download_and_unzip(nnet_tools_url)
!mv ./NNet-master $nnet_tools_path
def setup_nnet_tools(nnet_tools_path):
# install nnet tools if not already installed.
|
# delete sample data
!rm -rf sample_data
# setup nnet tools (for converting model to Stanford's nnet format)
setup_nnet_tools(nnet_tools_path)
# used for conversion to NNet format
from NNet.utils.writeNNet import writeNNet
"""## Download Dataset"""
# GDrive ID of csv file (AllData_ReactionTime.csv)
# https://drive.google.com/file/d/1WNnAd8lYWMT_mQWwiN6dP2KC6NTLFjXn
gdrive_id='1WNnAd8lYWMT_mQWwiN6dP2KC6NTLFjXn'
dataset_file='all_data_rt.csv'
# GDrive ID of adversarial csv
# https://drive.google.com/file/d/1N9QZm0NXXYPCQnBNrJHPzmWHkU9SSkbg
adv_dataset_gdrive_id='1N9QZm0NXXYPCQnBNrJHPzmWHkU9SSkbg'
adv_dataset_file = 'adv_dataset.csv'
# load the dataset from gdrive if it doesn't exist in the runtime's filesystem.
if not os.path.exists(dataset_file):
download_file_from_gdrive(gdrive_id, dataset_file)
if not os.path.exists(adv_dataset_file):
download_file_from_gdrive(adv_dataset_gdrive_id, adv_dataset_file)
"""## Import Dataset"""
raw_columns = ['ID', 'Name', 'FixationDuration', 'FixationStart', 'FixationSeq',
'FixationX', 'FixationY', 'GazeDirectionLeftZ', 'GazeDirectionRightZ',
'PupilLeft', 'PupilRight', 'InterpolatedGazeX', 'InterpolatedGazeY',
'AutoThrottle', 'AutoWheel', 'CurrentThrottle', 'CurrentWheel',
'Distance3D', 'MPH', 'ManualBrake', 'ManualThrottle', 'ManualWheel',
'RangeW', 'RightLaneDist', 'RightLaneType', 'LeftLaneDist', 'LeftLaneType',
'ReactionTime']
raw_df = pd.read_csv(dataset_file, usecols=raw_columns)
raw_df.set_index(['ID'], inplace=True)
pure_df = pd.read_csv(dataset_file)
pure_df.set_index(['ID'], inplace=True)
pure_df
# 5 class using mean & sdev
def create_tot_categories(rt_column):
rt_mean = round(rt_column.mean())
rt_sdev = round(rt_column.std())
bound_1 = rt_mean - rt_sdev
bound_2 = rt_mean - rt_sdev // 2
bound_3 = rt_mean + rt_sdev // 2
bound_4 = rt_mean + rt_sdev
bins = [float('-inf'), bound_1, bound_2, bound_3, bound_4, float('inf')]
labels = np.array(['fast', 'med_fast', 'med', 'med_slow', 'slow'], dtype=object)
return (bins, labels)
# make a copy the raw data
df = raw_df.copy()
# compute 'TOT' categories
tot_bins, tot_labels = create_tot_categories(df.ReactionTime)
n_categories = len(tot_labels)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
# convert leftlane type and rightlanetype to categorical:
# add the class to the dataframe
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
# # Select a handful of ppl for saving resource
df['Name'] = df['Name'].str.upper()
chunk_users = [[f'{i}'.zfill(3) + f'_M{j}' for j in range(1, 4)] for i in [13, 14]]
chunk_users = [u for l in chunk_users for u in l]
all_users = df.copy()
df = df.loc[df['Name'].isin(chunk_users)]
def upsample_minority_TOTs(X_train, y_train, tot_labels, random_state=27):
# contat the training data together.
X = pd.concat([X_train, y_train], axis=1)
# separate majority and minority classes
buckets = {l: X[X.TOT == l] for l in tot_labels}
maj_label, majority = reduce(lambda a,b: b if b[1].shape[0] > a[1].shape[0] else a, buckets.items())
minorities = {k:v for k,v in buckets.items() if k != maj_label}
# upsample the minority classes
for k,v in minorities.items():
buckets[k] = resample(v, replace=True, n_samples=majority.shape[0], random_state=random_state)
upsampled = pd.concat(buckets.values()).sample(frac=1)
# split the upsampled data into X and y
y_train = upsampled['TOT']
X_train = upsampled.drop('TOT', axis=1)
return X_train, y_train
def prepare_inputs(X_train, X_test):
# scales inputs using "standard scaler", and returns 2D numpy array
scaler = StandardScaler().fit(pd.concat([X_train, X_test]))
X_train = scaler.transform(X_train.values)
X_test = scaler.transform(X_test.values)
return X_train, X_test, scaler
def prepare_target(y_train, y_test, categories):
# convert target to categorical, and returns 2D numpy array
y_train = y_train.to_numpy().reshape(-1,1)
y_test = y_test.to_numpy().reshape(-1,1)
onehot = OneHotEncoder(categories=categories)
onehot.fit(np.concatenate([y_train, y_test]))
y_train = onehot.transform(y_train).toarray()
y_test = onehot.transform(y_test).toarray()
return y_train, y_test, onehot
# split features and targets
y = df.TOT
X = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
# make results easier to reproduce
random_state = 27
# split train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, stratify=y, random_state=random_state)
# upsample the training data
X_train, y_train = upsample_minority_TOTs(X_train, y_train, tot_labels)
# scale the inputs
X_train_enc, X_test_enc, scaler = prepare_inputs(X_train, X_test)
# categorize outputs
y_train_enc, y_test_enc, onehot = prepare_target(y_train, y_test, categories=[tot_labels])
print_heading('TOT Value Counts')
print(y_train.value_counts())
# Prepare data used for extended evaluation and verification (all participants)
# split features and targets
Y_verification = all_users.TOT
X_verification = all_users.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
# upsample minority classes
X_verification, Y_verification = upsample_minority_TOTs(X_verification, Y_verification, tot_labels)
# scale the inputs
X_verification = scaler.transform(X_verification.values)
# categorize outputs
Y_verification = onehot.transform(Y_verification.to_numpy().reshape(-1, 1)).toarray()
def add_adversarial_data(X_train, y_train, adversarial_df):
y_cols = [f'y{y}' for y in range(y_train.shape[1])]
y_adv = adv_df[y_cols].values
X_adv = adv_df.drop(y_cols, axis=1).values
return np.append(X_train, X_adv, axis=0), np.append(y_train, y_adv, axis=0)
# adv_df = pd.read_csv(adv_dataset_file)
# X_train_enc, y_train_enc = add_adversarial_data(X_train_enc, y_train_enc, adv_df)
# save the column names & indexes for use during verification
feature_names = list(X.columns)
# display the feature names
print_heading('Feature Names')
print_message(feature_names)
# print the TOT categories
print_heading('TOT Categories')
print('\n'.join(['%s: %9.2f, %7.2f' % (tot_labels[i].rjust(8), tot_bins[i], tot_bins[i+1]) for i in range(n_categories)]))
def display_processed_data(feature_names, unencoded=True, encoded=True, describe=True):
if unencoded:
print_heading('Unencoded Data')
display(pd.concat([X_train, y_train], axis=1).describe())
if encoded:
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
print_heading('Encoded Data')
display(pd.concat([pd.DataFrame(X_train_enc, columns=feature_names),
pd.DataFrame(y_train_enc, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels}).describe())
display_processed_data(feature_names, unencoded=False)
"""
## Build & Train NN"""
# cleanup the old training logs and models
!rm -rf $tensorboard_logs model-*.h5 $saved_model_dir
!mkdir -p $tensorboard_logs
# training callbacks
mc_file = 'model-best-{epoch:02d}-{val_loss:.2f}.h5'
es_cb = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
mc_cb = ModelCheckpoint(mc_file, monitor='val_accuracy', verbose=1, save_best_only=True)
# tb_cb = TensorBoard(log_dir=tensorboard_logs, histogram_freq=1, write_graph=True, write_images=True)
# v3.2.2
# loss: 0.3316 - accuracy: 0.8707 - val_loss: 0.3212 - val_accuracy: 0.874
# 1) Train: 0.869, 2) Test: 0.847
model = Sequential()
model.add(InputLayer(input_shape=(X_train_enc.shape[1],)))
model.add(Dense(23, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(18, activation='relu'))
model.add(Dense(11, activation='relu'))
model.add(Dense(n_categories, activation='softmax')) # logits layer
optimizer = 'adam'
# optimizer = tf.keras.optimizers.Adam(learning_rate=0.02)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# fit the keras model on the dataset
history = model.fit(X_train_enc, y_train_enc,
validation_data=(X_test_enc, y_test_enc),
# epochs=30,
epochs=1,
batch_size=16,
callbacks=[es_cb, mc_cb])
# callbacks=[es_cb, mc_cb, tb_cb])
# pick best model file from filesystem
best_model_path = sorted(glob.glob('model-best-*.h5'), key=lambda f: int(re.search(r'\d+', f).group()))[-1]
print_heading('Best Model:')
print_message(best_model_path)
# cleanup old model
!rm -rf $saved_model_dir
# save model in tf and h5 formats
tf_model_path = f'{saved_model_dir}/model'
h5_model_path = f'{saved_model_dir}/model.h5'
model.save(tf_model_path) # save_format='tf'
model.save(h5_model_path, save_format='h5')
print_heading(f'Evaluating {best_model_path}')
!mkdir -p images
# load the saved best model
saved_model = load_model(tf_model_path)
# evaluate the model
_, train_acc = saved_model.evaluate(X_train_enc, y_train_enc, verbose=2)
_, test_acc = saved_model.evaluate(X_test_enc, y_test_enc, verbose=1)
print('Accuracy of test: %.2f' % (test_acc*100))
print('Accuracy of the: '+'1) Train: %.3f, 2) Test: %.3f' % (train_acc, test_acc))
# plot training history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend(['train', 'test'], loc='upper left')
plt.ylabel('Loss')
plt.savefig('images/training_history.png', dpi=300)
plt.show()
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('images/accuracy_history.png', dpi=300)
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('images/loss_history.png', dpi=300)
plt.show()
#note in kera model.predict() will return predict probabilities
pred_prob = saved_model.predict(X_test_enc, verbose=0)
fpr, tpr, threshold = metrics.roc_curve(y_test_enc.ravel(), pred_prob.ravel())
roc_auc = metrics.auc(fpr, tpr)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_categories):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test_enc[:,i], pred_prob[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr['micro'], tpr['micro'], _ = metrics.roc_curve(y_test_enc.ravel(), pred_prob.ravel())
roc_auc['micro'] = metrics.auc(fpr['micro'], tpr['micro'])
# Compute macro-average ROC curve and ROC area
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(3)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_categories):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_categories
fpr['macro'] = all_fpr
tpr['macro'] = mean_tpr
roc_auc['macro'] = metrics.auc(fpr['macro'], tpr['macro'])
plt.figure(1)
plt.plot(fpr['micro'], tpr['micro'],
label='micro-average ROC curve (area = {0:0.2f})' \
''.format(roc_auc['micro']),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr['macro'], tpr['macro'],
label='macro-average ROC curve (area = {0:0.2f})' \
''.format(roc_auc['macro']),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'red', 'blue'])
for i, color in zip(range(n_categories), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label='ROC curve of class {0} (area = {1:0.2f})' \
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Result for Receiver operating characteristic to multi-class of Reaction Time')
plt.legend(loc='lower right')
plt.savefig('images/roc.png', dpi=300)
plt.show()
print_heading(f'Extended Evaluation (all participants)')
def extended_evaluation(model_path, X, Y):
# load the saved best model
model = load_model(model_path)
# evaluate the model
_, accuracy = model.evaluate(X, Y, verbose=2)
print('Accuracy: %.2f' % (accuracy * 100))
extended_evaluation(tf_model_path, X_verification, Y_verification)
"""## Create Verification Artifacts
"""
def compute_nnet_params(model_file, df, scaler):
outputs = df['TOT']
inputs = df.drop(['Name', 'TOT', 'ReactionTime'], axis=1)
enc_inputs = pd.DataFrame(scaler.transform(inputs.values), columns=inputs.columns)
# compute sdev, mins, and maxs for inputs
input_sdev = enc_inputs.std().to_numpy()
input_mins = enc_inputs.min().to_numpy()
input_maxs = enc_inputs.max().to_numpy()
# extend input maxs and mins by std dev
input_mins -= input_sdev
input_maxs += input_sdev
# maraboupy only supports normalization (not standardization)
# use mean=0, and range=1 to neutralize maraboupy normalization
means = np.zeros(enc_inputs.shape[1]+1, dtype=int)
ranges = np.ones(enc_inputs.shape[1]+1, dtype=int)
# extract weights and biases from model
model = load_model(model_file)
model_params = model.get_weights()
weights = [w.T for w in model_params[0:len(model_params):2]]
biases = model_params[1:len(model_params):2]
return (weights, biases, input_mins, input_maxs, means, ranges)
def save_nnet(weights, biases, input_mins, input_maxs, means, ranges, output_path):
# write model in nnet format.
writeNNet(weights, biases, input_mins, input_maxs, means, ranges, output_path)
def save_encoders(scaler, onehot, output_dir):
pkl.dump(scaler, open(f'{output_dir}/scaler.pkl', 'wb'))
pkl.dump(onehot, open(f'{output_dir}/onehot.pkl', 'wb'))
def save_verification_data(modelpath, df, onehot, scaler, tot_bins, tot_labels, outpath='artifacts/verification.csv'):
m = load_model(modelpath)
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
df = df.drop_duplicates()
X_df = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
X = scaler.transform(X_df.values)
y = onehot.transform(df.TOT.values.reshape(-1, 1)).toarray()
predictions, idxs = m.predict(X), []
for i,p in enumerate(predictions):
maxidxs = np.where(p==p.max())[0]
if (maxidxs.shape[0] == 1) and (maxidxs[0] == y[i].tolist().index(1)):
idxs.append(i)
X, y = X[idxs], y[idxs]
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
v_df = pd.concat([pd.DataFrame(X, columns=X_df.columns),
pd.DataFrame(y, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels})
v_df.to_csv(outpath)
print(f'wrote verification data to {outpath}')
def create_verification_artifacts(tf_model_path, h5_model_path, df, feature_names, tot_bins, tot_labels, scaler, onehot):
print_heading(f'Creating verification artifacts...')
output_dir='artifacts'
archive_path = 'artifacts.zip'
h5_path = os.path.join(output_dir, 'model.h5')
pb_path = os.path.join(output_dir, 'model.pb')
nnet_path = os.path.join(output_dir, 'model.nnet')
model_zip = os.path.join(output_dir, 'model.zip')
# clear previous folder
!rm -rf $output_dir
# create the folder
!mkdir -p $output_dir
# zip up the tf model, and copy to artifacts
!cd $tf_model_path/.. && zip -qr ../$model_zip model && cd - > /dev/null
# copy the pb model file
!cp $tf_model_path/saved_model.pb $pb_path
# copy the h5 model file
!cp $h5_model_path $h5_path
# copy the images to artifacts
!cp -r images $output_dir
# extract params for nnet format
nnet_params = compute_nnet_params(tf_model_path, df, scaler)
weights, biases, input_mins, input_maxs, means, ranges = nnet_params
# write the model to nnet file.
save_nnet(weights, biases, input_mins, input_maxs, means, ranges, nnet_path)
# write encoders to file
save_encoders(scaler, onehot, output_dir)
# save verification data
save_verification_data(tf_model_path, df.copy(), onehot, scaler, tot_bins, tot_labels)
# create a zip archive of artifacts
!zip -rq $archive_path $output_dir
print_message(f'Saved artifacts to {archive_path}')
# create artifacts used for verification
create_verification_artifacts(tf_model_path, h5_model_path, df, feature_names, tot_bins, tot_labels, scaler, onehot)
def save_data(X_train_enc, X_test_enc, y_train_enc, y_test_enc, features, onehot, data_dir='data', archive_path='data.zip'):
tot_labels = onehot.get_feature_names(input_features=['TOT'])
train_df = pd.concat([pd.DataFrame(X_train_enc, columns=features),
pd.DataFrame(y_train_enc, columns=tot_labels)],
axis=1).astype({k:int for k in tot_labels})
test_df = pd.concat([pd.DataFrame(X_test_enc, columns=features),
pd.DataFrame(y_test_enc, columns=tot_labels)],
axis=1).astype({k:int for k in tot_labels})
!mkdir -p $data_dir
train_csv, test_csv = f'{data_dir}/train.csv', f'{data_dir}/test.csv'
train_df.to_csv(train_csv)
test_df.to_csv(test_csv)
print(f'wrote data to {train_csv} and {test_csv}, compressing...')
!zip -qr $archive_path $data_dir
# save_data(X_train_enc, X_test_enc, y_train_enc, y_test_enc, X_train.columns, onehot)
def create_verification_data(modelpath, df, onehot, scaler, tot_bins, tot_labels, outpath='verification.csv'):
m = load_model(modelpath)
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
df = df.drop_duplicates()
X_df = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
X = scaler.transform(X_df.values)
y = onehot.transform(df.TOT.values.reshape(-1, 1)).toarray()
predictions, idxs = m.predict(X), []
for i,p in enumerate(predictions):
maxidxs = np.where(p==p.max())[0]
if (maxidxs.shape[0] == 1) and (maxidxs[0] == y[i].tolist().index(1)):
idxs.append(i)
X, y = X[idxs], y[idxs]
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
v_df = pd.concat([pd.DataFrame(X, columns=X_df.columns),
pd.DataFrame(y, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels})
v_df.to_csv(outpath)
print(f'wrote verification data to {outpath}')
return v_df
# create_verification_data(tf_model_path, raw_df.copy(), onehot, scaler, tot_bins, tot_labels)
"""## Save Model & Verification Artifacts to GDrive"""
# GDrive ID's point to files in models/latest folder
artifacts = {
'artifacts/model.zip': '100s5DVwaK6ILlDe2ZCgm2F8JGrY7Wixf', # tf format
'artifacts/model.h5': '1Kyxb1A4E6U_HPaPjRLVnb2OTJtXOzTXX', # h5 format
'artifacts/model.pb': '1Ap3eWHWwAyw_3wOmy237AJF3pWQRnG3_', # pb format
'artifacts/model.nnet': '1HzfGxhKrw9PpeA1cMsexC4FcWv5OPdtB', # nnet format
'artifacts/scaler.pkl': '10EkqHQ3aqEYAxbLS4Q4LRWJ1byNCvAcf', # scaler object
'artifacts/onehot.pkl': '1SeED9m_TeyqtmHRgDe_kd9HVmn2K1hh8' # onehot object
}
# upload all of the artifacts to drive
# for fname,driveid in artifacts.items():
# overwrite_gdrive_file(driveid, fname)
"""
## Visualization
"""
!mkdir -p images
display(tf.keras.utils.plot_model(model, to_file='images/model.png', show_shapes=True, show_layer_names=True, expand_nested=True, dpi=1200, rankdir='LR'))
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
svg_plot = tf.keras.utils.model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='LR')
SVG(svg_plot.create(prog='dot', format='svg'))
# with open('images/model.svg', 'w') as f:
# f.write(svg_plot.decode('utf-8'))
svg_plot.write_svg('images/model.svg')
# Commented out IPython magic to ensure Python compatibility.
# %tensorboard --logdir $tensorboard_logs --host localhost --port 6006 | if not os.path.exists(nnet_tools_path):
install_nnet_tools(nnet_tools_path)
# add folder to PYTHONPATH & JUPYTER_PATH
update_path_vars([nnet_tools_path]) | identifier_body |
rt_network_v3_2_x.py | # -*- coding: utf-8 -*-
"""RT-Network-v3.2.x.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1MfD_C225OafgIsQKVy76p3E2Qam6kXDo
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 2.x
# %load_ext tensorboard
import glob, math, os, re, sys, zipfile
import numpy as np
import pandas as pd
import tensorflow as tf
import seaborn as sns
import pickle as pkl
import matplotlib.pyplot as plt
from functools import reduce
from itertools import cycle
from datetime import datetime
from google.colab import auth
from oauth2client.client import GoogleCredentials
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from IPython.display import display, HTML
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.utils import resample
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, InputLayer
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
"""## Authorize Google Drive"""
auth.authenticate_user()
"""## Setup & Install
Basic setup and install additional dependencies
"""
# Some global variables and general settings
saved_model_dir = './saved_model'
tensorboard_logs = './logs'
pd.options.display.float_format = '{:.2f}'.format
sns.set_context('notebook')
nnet_tools_path = os.path.abspath('NNet')
def print_html(string, tag='span', color=None, size=None):
size = f'font-size:{size};' if size else ''
color = f'color:{color};' if color else ''
display(HTML(f'<{tag} style="{color}{size}">{string}</{tag}>'))
def print_heading(string, color=None):
print_html(string, tag='h3', color=color)
def print_message(string, color=None):
print_html(string, color=color)
def download_file_from_gdrive(gdrive_id, output_file):
# Authenticate google drive
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# Download csv from GDrive...
dataset = drive.CreateFile({'id': gdrive_id})
dataset_filename = dataset['title']
print_message(f'Downloading {dataset_filename} ({gdrive_id}) from GDrive. Please wait...')
dataset.GetContentFile(output_file)
print_message(f'Download {gdrive_id} completed.')
def download_and_unzip(src_url, out_dir='./', zip_file='dl.zip', remove_zip=True):
print(f'Downloading {src_url} to {zip_file}')
!wget $src_url -O $zip_file -q --show-progress
print(f'Download complete. Unzipping {zip_file}')
z = zipfile.ZipFile(zip_file, 'r')
z.extractall(out_dir)
print(f'Unzipped to {out_dir}. Cleaning up...')
z.close()
if remove_zip: os.remove(zip_file)
def overwrite_gdrive_file(gdrive_id, input_file):
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
drive_file = drive.CreateFile({'id': gdrive_id})
drive_file.SetContentFile(input_file)
drive_file.Upload()
drive_filename = drive_file['title']
print(f'Wrote {input_file} to GDrive {drive_filename} ({gdrive_id}).')
def update_path_vars(paths=[]):
python_path = os.environ.get('PYTHONPATH') or ''
jupyter_path = os.environ.get('JUPYTER_PATH') or ''
for path in paths:
if not path in python_path:
python_path += f':{path}'
if not path in jupyter_path:
jupyter_path += f':{path}'
os.environ['PYTHONPATH'] = python_path
os.environ['JUPYTER_PATH'] = jupyter_path
def install_nnet_tools(nnet_tools_path):
nnet_tools_url = 'https://github.com/sisl/NNet/archive/master.zip'
download_and_unzip(nnet_tools_url)
!mv ./NNet-master $nnet_tools_path
def setup_nnet_tools(nnet_tools_path):
# install nnet tools if not already installed.
if not os.path.exists(nnet_tools_path):
install_nnet_tools(nnet_tools_path)
# add folder to PYTHONPATH & JUPYTER_PATH
update_path_vars([nnet_tools_path])
# delete sample data
!rm -rf sample_data
# setup nnet tools (for converting model to Stanford's nnet format)
setup_nnet_tools(nnet_tools_path)
# used for conversion to NNet format
from NNet.utils.writeNNet import writeNNet
"""## Download Dataset"""
# GDrive ID of csv file (AllData_ReactionTime.csv)
# https://drive.google.com/file/d/1WNnAd8lYWMT_mQWwiN6dP2KC6NTLFjXn
gdrive_id='1WNnAd8lYWMT_mQWwiN6dP2KC6NTLFjXn'
dataset_file='all_data_rt.csv'
# GDrive ID of adversarial csv
# https://drive.google.com/file/d/1N9QZm0NXXYPCQnBNrJHPzmWHkU9SSkbg
adv_dataset_gdrive_id='1N9QZm0NXXYPCQnBNrJHPzmWHkU9SSkbg'
adv_dataset_file = 'adv_dataset.csv'
# load the dataset from gdrive if it doesn't exist in the runtime's filesystem.
if not os.path.exists(dataset_file):
download_file_from_gdrive(gdrive_id, dataset_file)
if not os.path.exists(adv_dataset_file):
download_file_from_gdrive(adv_dataset_gdrive_id, adv_dataset_file)
"""## Import Dataset"""
raw_columns = ['ID', 'Name', 'FixationDuration', 'FixationStart', 'FixationSeq',
'FixationX', 'FixationY', 'GazeDirectionLeftZ', 'GazeDirectionRightZ',
'PupilLeft', 'PupilRight', 'InterpolatedGazeX', 'InterpolatedGazeY',
'AutoThrottle', 'AutoWheel', 'CurrentThrottle', 'CurrentWheel',
'Distance3D', 'MPH', 'ManualBrake', 'ManualThrottle', 'ManualWheel',
'RangeW', 'RightLaneDist', 'RightLaneType', 'LeftLaneDist', 'LeftLaneType',
'ReactionTime']
raw_df = pd.read_csv(dataset_file, usecols=raw_columns)
raw_df.set_index(['ID'], inplace=True)
pure_df = pd.read_csv(dataset_file)
pure_df.set_index(['ID'], inplace=True)
pure_df
# 5 class using mean & sdev
def create_tot_categories(rt_column):
rt_mean = round(rt_column.mean())
rt_sdev = round(rt_column.std())
bound_1 = rt_mean - rt_sdev
bound_2 = rt_mean - rt_sdev // 2
bound_3 = rt_mean + rt_sdev // 2
bound_4 = rt_mean + rt_sdev
bins = [float('-inf'), bound_1, bound_2, bound_3, bound_4, float('inf')]
labels = np.array(['fast', 'med_fast', 'med', 'med_slow', 'slow'], dtype=object)
return (bins, labels)
# make a copy the raw data
df = raw_df.copy()
# compute 'TOT' categories
tot_bins, tot_labels = create_tot_categories(df.ReactionTime)
n_categories = len(tot_labels)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
# convert leftlane type and rightlanetype to categorical:
# add the class to the dataframe
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
# # Select a handful of ppl for saving resource
df['Name'] = df['Name'].str.upper()
chunk_users = [[f'{i}'.zfill(3) + f'_M{j}' for j in range(1, 4)] for i in [13, 14]]
chunk_users = [u for l in chunk_users for u in l]
all_users = df.copy()
df = df.loc[df['Name'].isin(chunk_users)]
def upsample_minority_TOTs(X_train, y_train, tot_labels, random_state=27):
# contat the training data together.
X = pd.concat([X_train, y_train], axis=1)
# separate majority and minority classes
buckets = {l: X[X.TOT == l] for l in tot_labels}
maj_label, majority = reduce(lambda a,b: b if b[1].shape[0] > a[1].shape[0] else a, buckets.items())
minorities = {k:v for k,v in buckets.items() if k != maj_label}
# upsample the minority classes
for k,v in minorities.items():
buckets[k] = resample(v, replace=True, n_samples=majority.shape[0], random_state=random_state)
upsampled = pd.concat(buckets.values()).sample(frac=1)
# split the upsampled data into X and y
y_train = upsampled['TOT']
X_train = upsampled.drop('TOT', axis=1)
return X_train, y_train
def prepare_inputs(X_train, X_test):
# scales inputs using "standard scaler", and returns 2D numpy array
scaler = StandardScaler().fit(pd.concat([X_train, X_test]))
X_train = scaler.transform(X_train.values)
X_test = scaler.transform(X_test.values)
return X_train, X_test, scaler
def prepare_target(y_train, y_test, categories):
# convert target to categorical, and returns 2D numpy array
y_train = y_train.to_numpy().reshape(-1,1)
y_test = y_test.to_numpy().reshape(-1,1)
onehot = OneHotEncoder(categories=categories)
onehot.fit(np.concatenate([y_train, y_test]))
y_train = onehot.transform(y_train).toarray()
y_test = onehot.transform(y_test).toarray()
return y_train, y_test, onehot
# split features and targets
y = df.TOT
X = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
# make results easier to reproduce
random_state = 27
# split train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, stratify=y, random_state=random_state)
# upsample the training data
X_train, y_train = upsample_minority_TOTs(X_train, y_train, tot_labels)
# scale the inputs
X_train_enc, X_test_enc, scaler = prepare_inputs(X_train, X_test)
# categorize outputs
y_train_enc, y_test_enc, onehot = prepare_target(y_train, y_test, categories=[tot_labels])
print_heading('TOT Value Counts')
print(y_train.value_counts())
# Prepare data used for extended evaluation and verification (all participants)
# split features and targets
Y_verification = all_users.TOT
X_verification = all_users.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
# upsample minority classes
X_verification, Y_verification = upsample_minority_TOTs(X_verification, Y_verification, tot_labels)
# scale the inputs
X_verification = scaler.transform(X_verification.values)
# categorize outputs
Y_verification = onehot.transform(Y_verification.to_numpy().reshape(-1, 1)).toarray()
def add_adversarial_data(X_train, y_train, adversarial_df):
y_cols = [f'y{y}' for y in range(y_train.shape[1])]
y_adv = adv_df[y_cols].values
X_adv = adv_df.drop(y_cols, axis=1).values
return np.append(X_train, X_adv, axis=0), np.append(y_train, y_adv, axis=0)
# adv_df = pd.read_csv(adv_dataset_file)
# X_train_enc, y_train_enc = add_adversarial_data(X_train_enc, y_train_enc, adv_df)
# save the column names & indexes for use during verification
feature_names = list(X.columns)
# display the feature names
print_heading('Feature Names')
print_message(feature_names)
# print the TOT categories
print_heading('TOT Categories')
print('\n'.join(['%s: %9.2f, %7.2f' % (tot_labels[i].rjust(8), tot_bins[i], tot_bins[i+1]) for i in range(n_categories)]))
def display_processed_data(feature_names, unencoded=True, encoded=True, describe=True):
if unencoded:
print_heading('Unencoded Data')
display(pd.concat([X_train, y_train], axis=1).describe())
if encoded:
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
print_heading('Encoded Data')
display(pd.concat([pd.DataFrame(X_train_enc, columns=feature_names),
pd.DataFrame(y_train_enc, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels}).describe())
display_processed_data(feature_names, unencoded=False)
"""
## Build & Train NN"""
# cleanup the old training logs and models
!rm -rf $tensorboard_logs model-*.h5 $saved_model_dir
!mkdir -p $tensorboard_logs
# training callbacks
mc_file = 'model-best-{epoch:02d}-{val_loss:.2f}.h5'
es_cb = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
mc_cb = ModelCheckpoint(mc_file, monitor='val_accuracy', verbose=1, save_best_only=True)
# tb_cb = TensorBoard(log_dir=tensorboard_logs, histogram_freq=1, write_graph=True, write_images=True)
# v3.2.2
# loss: 0.3316 - accuracy: 0.8707 - val_loss: 0.3212 - val_accuracy: 0.874
# 1) Train: 0.869, 2) Test: 0.847
model = Sequential()
model.add(InputLayer(input_shape=(X_train_enc.shape[1],)))
model.add(Dense(23, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(18, activation='relu'))
model.add(Dense(11, activation='relu'))
model.add(Dense(n_categories, activation='softmax')) # logits layer
optimizer = 'adam'
# optimizer = tf.keras.optimizers.Adam(learning_rate=0.02)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# fit the keras model on the dataset
history = model.fit(X_train_enc, y_train_enc,
validation_data=(X_test_enc, y_test_enc),
# epochs=30,
epochs=1,
batch_size=16,
callbacks=[es_cb, mc_cb])
# callbacks=[es_cb, mc_cb, tb_cb])
# pick best model file from filesystem
best_model_path = sorted(glob.glob('model-best-*.h5'), key=lambda f: int(re.search(r'\d+', f).group()))[-1]
print_heading('Best Model:')
print_message(best_model_path)
# cleanup old model
!rm -rf $saved_model_dir
# save model in tf and h5 formats
tf_model_path = f'{saved_model_dir}/model'
h5_model_path = f'{saved_model_dir}/model.h5'
model.save(tf_model_path) # save_format='tf'
model.save(h5_model_path, save_format='h5')
print_heading(f'Evaluating {best_model_path}')
!mkdir -p images
# load the saved best model
saved_model = load_model(tf_model_path)
# evaluate the model
_, train_acc = saved_model.evaluate(X_train_enc, y_train_enc, verbose=2)
_, test_acc = saved_model.evaluate(X_test_enc, y_test_enc, verbose=1)
print('Accuracy of test: %.2f' % (test_acc*100))
print('Accuracy of the: '+'1) Train: %.3f, 2) Test: %.3f' % (train_acc, test_acc))
# plot training history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend(['train', 'test'], loc='upper left')
plt.ylabel('Loss')
plt.savefig('images/training_history.png', dpi=300)
plt.show()
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('images/accuracy_history.png', dpi=300)
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('images/loss_history.png', dpi=300)
plt.show()
#note in kera model.predict() will return predict probabilities
pred_prob = saved_model.predict(X_test_enc, verbose=0)
fpr, tpr, threshold = metrics.roc_curve(y_test_enc.ravel(), pred_prob.ravel())
roc_auc = metrics.auc(fpr, tpr)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_categories):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test_enc[:,i], pred_prob[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr['micro'], tpr['micro'], _ = metrics.roc_curve(y_test_enc.ravel(), pred_prob.ravel())
roc_auc['micro'] = metrics.auc(fpr['micro'], tpr['micro'])
# Compute macro-average ROC curve and ROC area
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(3)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_categories):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_categories
fpr['macro'] = all_fpr
tpr['macro'] = mean_tpr
roc_auc['macro'] = metrics.auc(fpr['macro'], tpr['macro'])
plt.figure(1)
plt.plot(fpr['micro'], tpr['micro'],
label='micro-average ROC curve (area = {0:0.2f})' \
''.format(roc_auc['micro']),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr['macro'], tpr['macro'],
label='macro-average ROC curve (area = {0:0.2f})' \
''.format(roc_auc['macro']),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'red', 'blue'])
for i, color in zip(range(n_categories), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label='ROC curve of class {0} (area = {1:0.2f})' \
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Result for Receiver operating characteristic to multi-class of Reaction Time')
plt.legend(loc='lower right')
plt.savefig('images/roc.png', dpi=300)
plt.show()
print_heading(f'Extended Evaluation (all participants)')
def extended_evaluation(model_path, X, Y):
# load the saved best model
model = load_model(model_path)
# evaluate the model
_, accuracy = model.evaluate(X, Y, verbose=2)
print('Accuracy: %.2f' % (accuracy * 100))
extended_evaluation(tf_model_path, X_verification, Y_verification)
"""## Create Verification Artifacts
"""
def compute_nnet_params(model_file, df, scaler):
outputs = df['TOT']
inputs = df.drop(['Name', 'TOT', 'ReactionTime'], axis=1)
enc_inputs = pd.DataFrame(scaler.transform(inputs.values), columns=inputs.columns)
# compute sdev, mins, and maxs for inputs
input_sdev = enc_inputs.std().to_numpy()
input_mins = enc_inputs.min().to_numpy()
input_maxs = enc_inputs.max().to_numpy()
# extend input maxs and mins by std dev
input_mins -= input_sdev
input_maxs += input_sdev
# maraboupy only supports normalization (not standardization)
# use mean=0, and range=1 to neutralize maraboupy normalization
means = np.zeros(enc_inputs.shape[1]+1, dtype=int)
ranges = np.ones(enc_inputs.shape[1]+1, dtype=int)
# extract weights and biases from model
model = load_model(model_file)
model_params = model.get_weights()
weights = [w.T for w in model_params[0:len(model_params):2]]
biases = model_params[1:len(model_params):2]
return (weights, biases, input_mins, input_maxs, means, ranges)
def save_nnet(weights, biases, input_mins, input_maxs, means, ranges, output_path):
# write model in nnet format.
writeNNet(weights, biases, input_mins, input_maxs, means, ranges, output_path)
def | (scaler, onehot, output_dir):
pkl.dump(scaler, open(f'{output_dir}/scaler.pkl', 'wb'))
pkl.dump(onehot, open(f'{output_dir}/onehot.pkl', 'wb'))
def save_verification_data(modelpath, df, onehot, scaler, tot_bins, tot_labels, outpath='artifacts/verification.csv'):
m = load_model(modelpath)
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
df = df.drop_duplicates()
X_df = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
X = scaler.transform(X_df.values)
y = onehot.transform(df.TOT.values.reshape(-1, 1)).toarray()
predictions, idxs = m.predict(X), []
for i,p in enumerate(predictions):
maxidxs = np.where(p==p.max())[0]
if (maxidxs.shape[0] == 1) and (maxidxs[0] == y[i].tolist().index(1)):
idxs.append(i)
X, y = X[idxs], y[idxs]
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
v_df = pd.concat([pd.DataFrame(X, columns=X_df.columns),
pd.DataFrame(y, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels})
v_df.to_csv(outpath)
print(f'wrote verification data to {outpath}')
def create_verification_artifacts(tf_model_path, h5_model_path, df, feature_names, tot_bins, tot_labels, scaler, onehot):
print_heading(f'Creating verification artifacts...')
output_dir='artifacts'
archive_path = 'artifacts.zip'
h5_path = os.path.join(output_dir, 'model.h5')
pb_path = os.path.join(output_dir, 'model.pb')
nnet_path = os.path.join(output_dir, 'model.nnet')
model_zip = os.path.join(output_dir, 'model.zip')
# clear previous folder
!rm -rf $output_dir
# create the folder
!mkdir -p $output_dir
# zip up the tf model, and copy to artifacts
!cd $tf_model_path/.. && zip -qr ../$model_zip model && cd - > /dev/null
# copy the pb model file
!cp $tf_model_path/saved_model.pb $pb_path
# copy the h5 model file
!cp $h5_model_path $h5_path
# copy the images to artifacts
!cp -r images $output_dir
# extract params for nnet format
nnet_params = compute_nnet_params(tf_model_path, df, scaler)
weights, biases, input_mins, input_maxs, means, ranges = nnet_params
# write the model to nnet file.
save_nnet(weights, biases, input_mins, input_maxs, means, ranges, nnet_path)
# write encoders to file
save_encoders(scaler, onehot, output_dir)
# save verification data
save_verification_data(tf_model_path, df.copy(), onehot, scaler, tot_bins, tot_labels)
# create a zip archive of artifacts
!zip -rq $archive_path $output_dir
print_message(f'Saved artifacts to {archive_path}')
# create artifacts used for verification
create_verification_artifacts(tf_model_path, h5_model_path, df, feature_names, tot_bins, tot_labels, scaler, onehot)
def save_data(X_train_enc, X_test_enc, y_train_enc, y_test_enc, features, onehot, data_dir='data', archive_path='data.zip'):
tot_labels = onehot.get_feature_names(input_features=['TOT'])
train_df = pd.concat([pd.DataFrame(X_train_enc, columns=features),
pd.DataFrame(y_train_enc, columns=tot_labels)],
axis=1).astype({k:int for k in tot_labels})
test_df = pd.concat([pd.DataFrame(X_test_enc, columns=features),
pd.DataFrame(y_test_enc, columns=tot_labels)],
axis=1).astype({k:int for k in tot_labels})
!mkdir -p $data_dir
train_csv, test_csv = f'{data_dir}/train.csv', f'{data_dir}/test.csv'
train_df.to_csv(train_csv)
test_df.to_csv(test_csv)
print(f'wrote data to {train_csv} and {test_csv}, compressing...')
!zip -qr $archive_path $data_dir
# save_data(X_train_enc, X_test_enc, y_train_enc, y_test_enc, X_train.columns, onehot)
def create_verification_data(modelpath, df, onehot, scaler, tot_bins, tot_labels, outpath='verification.csv'):
m = load_model(modelpath)
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
df = df.drop_duplicates()
X_df = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
X = scaler.transform(X_df.values)
y = onehot.transform(df.TOT.values.reshape(-1, 1)).toarray()
predictions, idxs = m.predict(X), []
for i,p in enumerate(predictions):
maxidxs = np.where(p==p.max())[0]
if (maxidxs.shape[0] == 1) and (maxidxs[0] == y[i].tolist().index(1)):
idxs.append(i)
X, y = X[idxs], y[idxs]
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
v_df = pd.concat([pd.DataFrame(X, columns=X_df.columns),
pd.DataFrame(y, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels})
v_df.to_csv(outpath)
print(f'wrote verification data to {outpath}')
return v_df
# create_verification_data(tf_model_path, raw_df.copy(), onehot, scaler, tot_bins, tot_labels)
"""## Save Model & Verification Artifacts to GDrive"""
# GDrive ID's point to files in models/latest folder
artifacts = {
'artifacts/model.zip': '100s5DVwaK6ILlDe2ZCgm2F8JGrY7Wixf', # tf format
'artifacts/model.h5': '1Kyxb1A4E6U_HPaPjRLVnb2OTJtXOzTXX', # h5 format
'artifacts/model.pb': '1Ap3eWHWwAyw_3wOmy237AJF3pWQRnG3_', # pb format
'artifacts/model.nnet': '1HzfGxhKrw9PpeA1cMsexC4FcWv5OPdtB', # nnet format
'artifacts/scaler.pkl': '10EkqHQ3aqEYAxbLS4Q4LRWJ1byNCvAcf', # scaler object
'artifacts/onehot.pkl': '1SeED9m_TeyqtmHRgDe_kd9HVmn2K1hh8' # onehot object
}
# upload all of the artifacts to drive
# for fname,driveid in artifacts.items():
# overwrite_gdrive_file(driveid, fname)
"""
## Visualization
"""
!mkdir -p images
display(tf.keras.utils.plot_model(model, to_file='images/model.png', show_shapes=True, show_layer_names=True, expand_nested=True, dpi=1200, rankdir='LR'))
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
svg_plot = tf.keras.utils.model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='LR')
SVG(svg_plot.create(prog='dot', format='svg'))
# with open('images/model.svg', 'w') as f:
# f.write(svg_plot.decode('utf-8'))
svg_plot.write_svg('images/model.svg')
# Commented out IPython magic to ensure Python compatibility.
# %tensorboard --logdir $tensorboard_logs --host localhost --port 6006 | save_encoders | identifier_name |
rt_network_v3_2_x.py | # -*- coding: utf-8 -*-
"""RT-Network-v3.2.x.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1MfD_C225OafgIsQKVy76p3E2Qam6kXDo
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 2.x
# %load_ext tensorboard
import glob, math, os, re, sys, zipfile
import numpy as np
import pandas as pd
import tensorflow as tf
import seaborn as sns
import pickle as pkl
import matplotlib.pyplot as plt
from functools import reduce
from itertools import cycle
from datetime import datetime
from google.colab import auth
from oauth2client.client import GoogleCredentials
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from IPython.display import display, HTML
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.utils import resample
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, InputLayer
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
"""## Authorize Google Drive"""
auth.authenticate_user()
"""## Setup & Install
Basic setup and install additional dependencies
"""
# Some global variables and general settings
saved_model_dir = './saved_model'
tensorboard_logs = './logs'
pd.options.display.float_format = '{:.2f}'.format
sns.set_context('notebook')
nnet_tools_path = os.path.abspath('NNet')
def print_html(string, tag='span', color=None, size=None):
size = f'font-size:{size};' if size else ''
color = f'color:{color};' if color else ''
display(HTML(f'<{tag} style="{color}{size}">{string}</{tag}>'))
def print_heading(string, color=None):
print_html(string, tag='h3', color=color)
def print_message(string, color=None):
print_html(string, color=color)
def download_file_from_gdrive(gdrive_id, output_file):
# Authenticate google drive
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# Download csv from GDrive...
dataset = drive.CreateFile({'id': gdrive_id})
dataset_filename = dataset['title']
print_message(f'Downloading {dataset_filename} ({gdrive_id}) from GDrive. Please wait...')
dataset.GetContentFile(output_file)
print_message(f'Download {gdrive_id} completed.')
def download_and_unzip(src_url, out_dir='./', zip_file='dl.zip', remove_zip=True):
print(f'Downloading {src_url} to {zip_file}')
!wget $src_url -O $zip_file -q --show-progress
print(f'Download complete. Unzipping {zip_file}')
z = zipfile.ZipFile(zip_file, 'r')
z.extractall(out_dir)
print(f'Unzipped to {out_dir}. Cleaning up...')
z.close()
if remove_zip: os.remove(zip_file)
def overwrite_gdrive_file(gdrive_id, input_file):
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
drive_file = drive.CreateFile({'id': gdrive_id})
drive_file.SetContentFile(input_file)
drive_file.Upload()
drive_filename = drive_file['title']
print(f'Wrote {input_file} to GDrive {drive_filename} ({gdrive_id}).')
def update_path_vars(paths=[]):
python_path = os.environ.get('PYTHONPATH') or ''
jupyter_path = os.environ.get('JUPYTER_PATH') or ''
for path in paths:
if not path in python_path:
python_path += f':{path}'
if not path in jupyter_path:
jupyter_path += f':{path}'
os.environ['PYTHONPATH'] = python_path
os.environ['JUPYTER_PATH'] = jupyter_path
def install_nnet_tools(nnet_tools_path):
nnet_tools_url = 'https://github.com/sisl/NNet/archive/master.zip'
download_and_unzip(nnet_tools_url)
!mv ./NNet-master $nnet_tools_path
def setup_nnet_tools(nnet_tools_path):
# install nnet tools if not already installed.
if not os.path.exists(nnet_tools_path):
install_nnet_tools(nnet_tools_path)
# add folder to PYTHONPATH & JUPYTER_PATH
update_path_vars([nnet_tools_path])
# delete sample data
!rm -rf sample_data
# setup nnet tools (for converting model to Stanford's nnet format)
setup_nnet_tools(nnet_tools_path)
# used for conversion to NNet format
from NNet.utils.writeNNet import writeNNet
"""## Download Dataset"""
# GDrive ID of csv file (AllData_ReactionTime.csv)
# https://drive.google.com/file/d/1WNnAd8lYWMT_mQWwiN6dP2KC6NTLFjXn
gdrive_id='1WNnAd8lYWMT_mQWwiN6dP2KC6NTLFjXn'
dataset_file='all_data_rt.csv'
# GDrive ID of adversarial csv
# https://drive.google.com/file/d/1N9QZm0NXXYPCQnBNrJHPzmWHkU9SSkbg
adv_dataset_gdrive_id='1N9QZm0NXXYPCQnBNrJHPzmWHkU9SSkbg'
adv_dataset_file = 'adv_dataset.csv'
# load the dataset from gdrive if it doesn't exist in the runtime's filesystem.
if not os.path.exists(dataset_file):
download_file_from_gdrive(gdrive_id, dataset_file)
if not os.path.exists(adv_dataset_file):
download_file_from_gdrive(adv_dataset_gdrive_id, adv_dataset_file)
"""## Import Dataset"""
raw_columns = ['ID', 'Name', 'FixationDuration', 'FixationStart', 'FixationSeq',
'FixationX', 'FixationY', 'GazeDirectionLeftZ', 'GazeDirectionRightZ',
'PupilLeft', 'PupilRight', 'InterpolatedGazeX', 'InterpolatedGazeY',
'AutoThrottle', 'AutoWheel', 'CurrentThrottle', 'CurrentWheel',
'Distance3D', 'MPH', 'ManualBrake', 'ManualThrottle', 'ManualWheel',
'RangeW', 'RightLaneDist', 'RightLaneType', 'LeftLaneDist', 'LeftLaneType',
'ReactionTime']
raw_df = pd.read_csv(dataset_file, usecols=raw_columns)
raw_df.set_index(['ID'], inplace=True)
pure_df = pd.read_csv(dataset_file)
pure_df.set_index(['ID'], inplace=True)
pure_df
# 5 class using mean & sdev
def create_tot_categories(rt_column):
rt_mean = round(rt_column.mean())
rt_sdev = round(rt_column.std())
bound_1 = rt_mean - rt_sdev
bound_2 = rt_mean - rt_sdev // 2
bound_3 = rt_mean + rt_sdev // 2
bound_4 = rt_mean + rt_sdev
bins = [float('-inf'), bound_1, bound_2, bound_3, bound_4, float('inf')]
labels = np.array(['fast', 'med_fast', 'med', 'med_slow', 'slow'], dtype=object)
return (bins, labels)
# make a copy the raw data
df = raw_df.copy()
# compute 'TOT' categories
tot_bins, tot_labels = create_tot_categories(df.ReactionTime)
n_categories = len(tot_labels)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
# convert leftlane type and rightlanetype to categorical:
# add the class to the dataframe
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
# # Select a handful of ppl for saving resource
df['Name'] = df['Name'].str.upper()
chunk_users = [[f'{i}'.zfill(3) + f'_M{j}' for j in range(1, 4)] for i in [13, 14]]
chunk_users = [u for l in chunk_users for u in l]
all_users = df.copy()
df = df.loc[df['Name'].isin(chunk_users)]
def upsample_minority_TOTs(X_train, y_train, tot_labels, random_state=27):
# contat the training data together.
X = pd.concat([X_train, y_train], axis=1)
# separate majority and minority classes
buckets = {l: X[X.TOT == l] for l in tot_labels}
maj_label, majority = reduce(lambda a,b: b if b[1].shape[0] > a[1].shape[0] else a, buckets.items())
minorities = {k:v for k,v in buckets.items() if k != maj_label}
# upsample the minority classes
for k,v in minorities.items():
buckets[k] = resample(v, replace=True, n_samples=majority.shape[0], random_state=random_state)
upsampled = pd.concat(buckets.values()).sample(frac=1)
# split the upsampled data into X and y
y_train = upsampled['TOT']
X_train = upsampled.drop('TOT', axis=1)
return X_train, y_train
def prepare_inputs(X_train, X_test):
# scales inputs using "standard scaler", and returns 2D numpy array
scaler = StandardScaler().fit(pd.concat([X_train, X_test]))
X_train = scaler.transform(X_train.values)
X_test = scaler.transform(X_test.values)
return X_train, X_test, scaler
def prepare_target(y_train, y_test, categories):
# convert target to categorical, and returns 2D numpy array
y_train = y_train.to_numpy().reshape(-1,1)
y_test = y_test.to_numpy().reshape(-1,1)
onehot = OneHotEncoder(categories=categories)
onehot.fit(np.concatenate([y_train, y_test]))
y_train = onehot.transform(y_train).toarray()
y_test = onehot.transform(y_test).toarray()
return y_train, y_test, onehot
# split features and targets
y = df.TOT
X = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
# make results easier to reproduce
random_state = 27
# split train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, stratify=y, random_state=random_state)
# upsample the training data
X_train, y_train = upsample_minority_TOTs(X_train, y_train, tot_labels)
# scale the inputs
X_train_enc, X_test_enc, scaler = prepare_inputs(X_train, X_test)
# categorize outputs
y_train_enc, y_test_enc, onehot = prepare_target(y_train, y_test, categories=[tot_labels])
print_heading('TOT Value Counts')
print(y_train.value_counts())
# Prepare data used for extended evaluation and verification (all participants)
# split features and targets
Y_verification = all_users.TOT
X_verification = all_users.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
# upsample minority classes
X_verification, Y_verification = upsample_minority_TOTs(X_verification, Y_verification, tot_labels)
# scale the inputs
X_verification = scaler.transform(X_verification.values)
# categorize outputs
Y_verification = onehot.transform(Y_verification.to_numpy().reshape(-1, 1)).toarray()
def add_adversarial_data(X_train, y_train, adversarial_df):
y_cols = [f'y{y}' for y in range(y_train.shape[1])]
y_adv = adv_df[y_cols].values
X_adv = adv_df.drop(y_cols, axis=1).values
return np.append(X_train, X_adv, axis=0), np.append(y_train, y_adv, axis=0)
# adv_df = pd.read_csv(adv_dataset_file)
# X_train_enc, y_train_enc = add_adversarial_data(X_train_enc, y_train_enc, adv_df)
# save the column names & indexes for use during verification
feature_names = list(X.columns)
# display the feature names
print_heading('Feature Names')
print_message(feature_names)
# print the TOT categories
print_heading('TOT Categories')
print('\n'.join(['%s: %9.2f, %7.2f' % (tot_labels[i].rjust(8), tot_bins[i], tot_bins[i+1]) for i in range(n_categories)]))
def display_processed_data(feature_names, unencoded=True, encoded=True, describe=True):
if unencoded:
|
if encoded:
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
print_heading('Encoded Data')
display(pd.concat([pd.DataFrame(X_train_enc, columns=feature_names),
pd.DataFrame(y_train_enc, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels}).describe())
display_processed_data(feature_names, unencoded=False)
"""
## Build & Train NN"""
# cleanup the old training logs and models
!rm -rf $tensorboard_logs model-*.h5 $saved_model_dir
!mkdir -p $tensorboard_logs
# training callbacks
mc_file = 'model-best-{epoch:02d}-{val_loss:.2f}.h5'
es_cb = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
mc_cb = ModelCheckpoint(mc_file, monitor='val_accuracy', verbose=1, save_best_only=True)
# tb_cb = TensorBoard(log_dir=tensorboard_logs, histogram_freq=1, write_graph=True, write_images=True)
# v3.2.2
# loss: 0.3316 - accuracy: 0.8707 - val_loss: 0.3212 - val_accuracy: 0.874
# 1) Train: 0.869, 2) Test: 0.847
model = Sequential()
model.add(InputLayer(input_shape=(X_train_enc.shape[1],)))
model.add(Dense(23, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(18, activation='relu'))
model.add(Dense(11, activation='relu'))
model.add(Dense(n_categories, activation='softmax')) # logits layer
optimizer = 'adam'
# optimizer = tf.keras.optimizers.Adam(learning_rate=0.02)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# fit the keras model on the dataset
history = model.fit(X_train_enc, y_train_enc,
validation_data=(X_test_enc, y_test_enc),
# epochs=30,
epochs=1,
batch_size=16,
callbacks=[es_cb, mc_cb])
# callbacks=[es_cb, mc_cb, tb_cb])
# pick best model file from filesystem
best_model_path = sorted(glob.glob('model-best-*.h5'), key=lambda f: int(re.search(r'\d+', f).group()))[-1]
print_heading('Best Model:')
print_message(best_model_path)
# cleanup old model
!rm -rf $saved_model_dir
# save model in tf and h5 formats
tf_model_path = f'{saved_model_dir}/model'
h5_model_path = f'{saved_model_dir}/model.h5'
model.save(tf_model_path) # save_format='tf'
model.save(h5_model_path, save_format='h5')
print_heading(f'Evaluating {best_model_path}')
!mkdir -p images
# load the saved best model
saved_model = load_model(tf_model_path)
# evaluate the model
_, train_acc = saved_model.evaluate(X_train_enc, y_train_enc, verbose=2)
_, test_acc = saved_model.evaluate(X_test_enc, y_test_enc, verbose=1)
print('Accuracy of test: %.2f' % (test_acc*100))
print('Accuracy of the: '+'1) Train: %.3f, 2) Test: %.3f' % (train_acc, test_acc))
# plot training history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend(['train', 'test'], loc='upper left')
plt.ylabel('Loss')
plt.savefig('images/training_history.png', dpi=300)
plt.show()
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('images/accuracy_history.png', dpi=300)
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('images/loss_history.png', dpi=300)
plt.show()
#note in kera model.predict() will return predict probabilities
pred_prob = saved_model.predict(X_test_enc, verbose=0)
fpr, tpr, threshold = metrics.roc_curve(y_test_enc.ravel(), pred_prob.ravel())
roc_auc = metrics.auc(fpr, tpr)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_categories):
fpr[i], tpr[i], _ = metrics.roc_curve(y_test_enc[:,i], pred_prob[:, i])
roc_auc[i] = metrics.auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr['micro'], tpr['micro'], _ = metrics.roc_curve(y_test_enc.ravel(), pred_prob.ravel())
roc_auc['micro'] = metrics.auc(fpr['micro'], tpr['micro'])
# Compute macro-average ROC curve and ROC area
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(3)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_categories):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_categories
fpr['macro'] = all_fpr
tpr['macro'] = mean_tpr
roc_auc['macro'] = metrics.auc(fpr['macro'], tpr['macro'])
plt.figure(1)
plt.plot(fpr['micro'], tpr['micro'],
label='micro-average ROC curve (area = {0:0.2f})' \
''.format(roc_auc['micro']),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr['macro'], tpr['macro'],
label='macro-average ROC curve (area = {0:0.2f})' \
''.format(roc_auc['macro']),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'red', 'blue'])
for i, color in zip(range(n_categories), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=2,
label='ROC curve of class {0} (area = {1:0.2f})' \
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Result for Receiver operating characteristic to multi-class of Reaction Time')
plt.legend(loc='lower right')
plt.savefig('images/roc.png', dpi=300)
plt.show()
print_heading(f'Extended Evaluation (all participants)')
def extended_evaluation(model_path, X, Y):
# load the saved best model
model = load_model(model_path)
# evaluate the model
_, accuracy = model.evaluate(X, Y, verbose=2)
print('Accuracy: %.2f' % (accuracy * 100))
extended_evaluation(tf_model_path, X_verification, Y_verification)
"""## Create Verification Artifacts
"""
def compute_nnet_params(model_file, df, scaler):
outputs = df['TOT']
inputs = df.drop(['Name', 'TOT', 'ReactionTime'], axis=1)
enc_inputs = pd.DataFrame(scaler.transform(inputs.values), columns=inputs.columns)
# compute sdev, mins, and maxs for inputs
input_sdev = enc_inputs.std().to_numpy()
input_mins = enc_inputs.min().to_numpy()
input_maxs = enc_inputs.max().to_numpy()
# extend input maxs and mins by std dev
input_mins -= input_sdev
input_maxs += input_sdev
# maraboupy only supports normalization (not standardization)
# use mean=0, and range=1 to neutralize maraboupy normalization
means = np.zeros(enc_inputs.shape[1]+1, dtype=int)
ranges = np.ones(enc_inputs.shape[1]+1, dtype=int)
# extract weights and biases from model
model = load_model(model_file)
model_params = model.get_weights()
weights = [w.T for w in model_params[0:len(model_params):2]]
biases = model_params[1:len(model_params):2]
return (weights, biases, input_mins, input_maxs, means, ranges)
def save_nnet(weights, biases, input_mins, input_maxs, means, ranges, output_path):
# write model in nnet format.
writeNNet(weights, biases, input_mins, input_maxs, means, ranges, output_path)
def save_encoders(scaler, onehot, output_dir):
pkl.dump(scaler, open(f'{output_dir}/scaler.pkl', 'wb'))
pkl.dump(onehot, open(f'{output_dir}/onehot.pkl', 'wb'))
def save_verification_data(modelpath, df, onehot, scaler, tot_bins, tot_labels, outpath='artifacts/verification.csv'):
m = load_model(modelpath)
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
df = df.drop_duplicates()
X_df = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
X = scaler.transform(X_df.values)
y = onehot.transform(df.TOT.values.reshape(-1, 1)).toarray()
predictions, idxs = m.predict(X), []
for i,p in enumerate(predictions):
maxidxs = np.where(p==p.max())[0]
if (maxidxs.shape[0] == 1) and (maxidxs[0] == y[i].tolist().index(1)):
idxs.append(i)
X, y = X[idxs], y[idxs]
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
v_df = pd.concat([pd.DataFrame(X, columns=X_df.columns),
pd.DataFrame(y, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels})
v_df.to_csv(outpath)
print(f'wrote verification data to {outpath}')
def create_verification_artifacts(tf_model_path, h5_model_path, df, feature_names, tot_bins, tot_labels, scaler, onehot):
print_heading(f'Creating verification artifacts...')
output_dir='artifacts'
archive_path = 'artifacts.zip'
h5_path = os.path.join(output_dir, 'model.h5')
pb_path = os.path.join(output_dir, 'model.pb')
nnet_path = os.path.join(output_dir, 'model.nnet')
model_zip = os.path.join(output_dir, 'model.zip')
# clear previous folder
!rm -rf $output_dir
# create the folder
!mkdir -p $output_dir
# zip up the tf model, and copy to artifacts
!cd $tf_model_path/.. && zip -qr ../$model_zip model && cd - > /dev/null
# copy the pb model file
!cp $tf_model_path/saved_model.pb $pb_path
# copy the h5 model file
!cp $h5_model_path $h5_path
# copy the images to artifacts
!cp -r images $output_dir
# extract params for nnet format
nnet_params = compute_nnet_params(tf_model_path, df, scaler)
weights, biases, input_mins, input_maxs, means, ranges = nnet_params
# write the model to nnet file.
save_nnet(weights, biases, input_mins, input_maxs, means, ranges, nnet_path)
# write encoders to file
save_encoders(scaler, onehot, output_dir)
# save verification data
save_verification_data(tf_model_path, df.copy(), onehot, scaler, tot_bins, tot_labels)
# create a zip archive of artifacts
!zip -rq $archive_path $output_dir
print_message(f'Saved artifacts to {archive_path}')
# create artifacts used for verification
create_verification_artifacts(tf_model_path, h5_model_path, df, feature_names, tot_bins, tot_labels, scaler, onehot)
def save_data(X_train_enc, X_test_enc, y_train_enc, y_test_enc, features, onehot, data_dir='data', archive_path='data.zip'):
tot_labels = onehot.get_feature_names(input_features=['TOT'])
train_df = pd.concat([pd.DataFrame(X_train_enc, columns=features),
pd.DataFrame(y_train_enc, columns=tot_labels)],
axis=1).astype({k:int for k in tot_labels})
test_df = pd.concat([pd.DataFrame(X_test_enc, columns=features),
pd.DataFrame(y_test_enc, columns=tot_labels)],
axis=1).astype({k:int for k in tot_labels})
!mkdir -p $data_dir
train_csv, test_csv = f'{data_dir}/train.csv', f'{data_dir}/test.csv'
train_df.to_csv(train_csv)
test_df.to_csv(test_csv)
print(f'wrote data to {train_csv} and {test_csv}, compressing...')
!zip -qr $archive_path $data_dir
# save_data(X_train_enc, X_test_enc, y_train_enc, y_test_enc, X_train.columns, onehot)
def create_verification_data(modelpath, df, onehot, scaler, tot_bins, tot_labels, outpath='verification.csv'):
m = load_model(modelpath)
df['TOT'] = pd.cut(df.ReactionTime, bins=tot_bins, labels=tot_labels).astype(object)
df.RightLaneType = df.RightLaneType.astype(int)
df.LeftLaneType = df.LeftLaneType.astype(int)
df = df.drop_duplicates()
X_df = df.drop(['Name', 'ReactionTime', 'TOT'], axis=1)
X = scaler.transform(X_df.values)
y = onehot.transform(df.TOT.values.reshape(-1, 1)).toarray()
predictions, idxs = m.predict(X), []
for i,p in enumerate(predictions):
maxidxs = np.where(p==p.max())[0]
if (maxidxs.shape[0] == 1) and (maxidxs[0] == y[i].tolist().index(1)):
idxs.append(i)
X, y = X[idxs], y[idxs]
enc_tot_labels = onehot.get_feature_names(input_features=['TOT'])
v_df = pd.concat([pd.DataFrame(X, columns=X_df.columns),
pd.DataFrame(y, columns=enc_tot_labels)],
axis=1).astype({k:int for k in enc_tot_labels})
v_df.to_csv(outpath)
print(f'wrote verification data to {outpath}')
return v_df
# create_verification_data(tf_model_path, raw_df.copy(), onehot, scaler, tot_bins, tot_labels)
"""## Save Model & Verification Artifacts to GDrive"""
# GDrive ID's point to files in models/latest folder
artifacts = {
'artifacts/model.zip': '100s5DVwaK6ILlDe2ZCgm2F8JGrY7Wixf', # tf format
'artifacts/model.h5': '1Kyxb1A4E6U_HPaPjRLVnb2OTJtXOzTXX', # h5 format
'artifacts/model.pb': '1Ap3eWHWwAyw_3wOmy237AJF3pWQRnG3_', # pb format
'artifacts/model.nnet': '1HzfGxhKrw9PpeA1cMsexC4FcWv5OPdtB', # nnet format
'artifacts/scaler.pkl': '10EkqHQ3aqEYAxbLS4Q4LRWJ1byNCvAcf', # scaler object
'artifacts/onehot.pkl': '1SeED9m_TeyqtmHRgDe_kd9HVmn2K1hh8' # onehot object
}
# upload all of the artifacts to drive
# for fname,driveid in artifacts.items():
# overwrite_gdrive_file(driveid, fname)
"""
## Visualization
"""
!mkdir -p images
display(tf.keras.utils.plot_model(model, to_file='images/model.png', show_shapes=True, show_layer_names=True, expand_nested=True, dpi=1200, rankdir='LR'))
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
svg_plot = tf.keras.utils.model_to_dot(model, show_shapes=False, show_layer_names=True, rankdir='LR')
SVG(svg_plot.create(prog='dot', format='svg'))
# with open('images/model.svg', 'w') as f:
# f.write(svg_plot.decode('utf-8'))
svg_plot.write_svg('images/model.svg')
# Commented out IPython magic to ensure Python compatibility.
# %tensorboard --logdir $tensorboard_logs --host localhost --port 6006 | print_heading('Unencoded Data')
display(pd.concat([X_train, y_train], axis=1).describe()) | conditional_block |
cdb.py | # CDB implemented in python ... with a pythonic interface!
# Starting point provided by Yusuke Shinyama
# Eric Ritezel -- February 17, 2007
#
# 20070218 - longstream optimization started
# there's something slow about this. low memory usage, though.
# 20070219 - had dream that led to increased performance.
#
from struct import unpack, pack
import array
# calc hash value with a given key (6.644s against 50k | 8.679s w/o psyco)
def calc_hash(string):
h = 5381
for c in array.array('B', string): h = ((h << 5) + h) ^ c
return h & 0xffffffffL
# attempt to use psyco for binding calc hash -- not a big deal
try:
from psyco import bind
bind(calc_hash)
except:pass
class reader(object):
"""
This is a reader for the CDB system from Dave Bernstein.
It is pythonic, and it doesn't follow his interface, but that's okay.
THIS IS IN NO WAY THREAD SAFE -- DO NOT DOUBT THE MIGHTY FILESYSTEM
Here's how it works:
[header] <- 256 pairs of uint32 structures [absolute offset][length]
... positioning works like this: header[hash & 0xff]
[header]
[data] <- we're jumping over this;
... each data node consists of [key_length][value_length][key][value]
[data]
[hash_lookup_table] <- there's 256 of these; they're full of babies
... each one has [hash][absolute offset]
... each is (2*entries) long for hash searches
[hash_lookup_table]
Usage:
>>> (build a cdb)
>>> read = reader("test.cdb")
>>> print 'read["a key"] =', read["a key"]
>>> for (key, value) in read.iteritems():
... print key, '= (',
... for values in value:
... print value + ',',
... print ')'
"""
def __init__(self, infile):
"""Open the file connection."""
if isinstance(infile, str): self.filep = open(infile, "r+b")
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# get the least pos_bucket position (beginning of subtables)
self.header = unpack('<512L', self.filep.read(2048))
# find the end of the data
self.enddata = min(self.header[0::2])
def __get(self,index,single=True):
return_value = []
hash_prime = calc_hash(index)
# pull data from the cached header
headhash = hash_prime % 256
pos_bucket= self.header[headhash + headhash]
ncells = self.header[headhash + headhash + 1]
# since the 256*8 bytes are all zeroed, this means the hash
# was invalid as we pulled it.
if ncells == 0: raise KeyError
# calculate predictive lookup
offset = (hash_prime >> 8) % ncells
# set a die badly flag (throw key error)
found = False
# loop through the number of cells in the hash range
for step in range(ncells):
self.filep.seek(pos_bucket + ((offset + step) % ncells) * 8)
# grab the hash and position in the data stream
(hash, pointer) = unpack('<LL', self.filep.read(8))
# throw an error if the hash just dumped us in the dirt
if pointer == 0:
# if there were no keys found, complain (else break)
if not found: raise KeyError
break
# check that the hash values check
if hash == hash_prime:
# seek to the location indicated
self.filep.seek(pointer)
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
key = self.filep.read(klen)
value = self.filep.read(vlen)
# make sure that the keys match
if key == index:
return_value.append(value)
# if we're only looking for one item, break out
if single: break
# set found flag for multiple value end condition
found = True
# if there were no records hit, dump a keyerror
else: raise KeyError
# throw back a tuple of the values found for key
return tuple(return_value)
def __getitem__(self,index):
# shortcut to __get
if not isinstance(index, str): raise TypeError
self.__get(index)
def get(self,index,default=None):
try:
return self.__get(index,single=False)
except:
if default is not None: return default
raise KeyError
def has_key(self,index):
"""A simple analog of the has_key dict function."""
try:
self.__get(index)
return True
except:
return False
def iteritems(self):
"""A straight pull of the items in the cdb."""
self.filep.seek(self.start + 2048)
# iterate until we hit the enddata marker
while self.filep.tell() < self.enddata - 1:
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
# yield the key and value as a tuple
yield (self.filep.read(klen), self.filep.read(vlen))
def close(self):
"""Close out the file connection."""
self.filep.close()
class builder(object):
"""
The Constant Database system is by DJB (the greatest hero on the interwub)
I just happen to implement it here bceause it's 1.fast, 2.good, 3.fast.
And I need all three aspects.
Usage:
>>> build = builder("test.cdb")
>>> build['a key'] = 'some value n for stupid'
>>> build.close()
The resultant CDB is read by any compatible lib (including reader above)
Access times are good, but can be made mucho faster with psyco.
"""
def __init__(self, infile):
if isinstance(infile, str):
self.filep = open(infile, "w+b")
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# track pointers and hash table data
self.hashbucket = [ array.array('L') for i in range(256) ]
# skip past header storage (file header + 2048)
self.position_hash = self.start + 2048
self.filep.seek(self.position_hash)
def __setitem__(self, index, value):
"""CDB supports multiple values for each key. Problems? Too bad."""
# create value and key storage
self.filep.write(pack('<LL',len(index), len(value)))
self.filep.write(index)
self.filep.write(value)
# grab a hash for the key
hash = calc_hash(index)
# dump a new hash into our bucket
self.hashbucket[hash % 256].fromlist([hash, self.position_hash])
self.position_hash += 8 + (len(index) + len(value))
def close(self):
from sys import byteorder
# preinitialize array and find byteorder
cell = array.array('L')
shouldswap = (byteorder == 'big')
# iterate completed values for the hash bucket
for hpindex in [ i for i in xrange(256) ]:
ncells = self.hashbucket[hpindex].buffer_info()[1]
if ncells <= 0:
self.hashbucket[hpindex].append(0)
continue
# create blank cell structure
cell.fromlist([ 0 for i in xrange(ncells+ncells) ])
# loop over hash pairs (xrange with parameters = fast)
for i in xrange(0, ncells, 2):
# pull hash from the hashbucket
hash = self.hashbucket[hpindex].pop(0)
# predictive lookup for jump
index = (hash >> 8) % ncells
# skip occupied cells
while cell[index+index] != 0: index = (index + 1) % ncells
# pull pointer and assign hash/pointer set to cell
cell[index+index] = hash
cell[index+index+1] = self.hashbucket[hpindex].pop(0)
# push length back onto stack
self.hashbucket[hpindex].append(ncells)
# write the hash table (swap bytes if we're bigendian)
if shouldswap: cell.byteswap()
cell.tofile(self.filep)
del cell[:]
# skip to start of file
self.filep.seek(self.start)
# dump some information about the hash pairs into the header
for i in xrange(256):
self.filep.write(pack('<LL', self.position_hash, self.hashbucket[i][0]))
self.position_hash += 8 * self.hashbucket[i].pop()
# free up the hashbucket and cell
del(cell)
del(self.hashbucket)
self.filep.close()
# a rather complete test suite
if __name__ == "__main__":
import os,sys,time
from random import randint, seed
import hotshot, hotshot.stats
# make python behave for our massive crunching needs
sys.setcheckinterval(10000)
# utility to write data
def randstr(): return "".join([ chr(randint(65,90)) for i in xrange(randint(1,32)) ])
def make_data(n):
print "TEST: Making test data"
return [ (randstr(),randstr()) for i in xrange(n)]
def test_write(testlist, fname="test.cdb"):
starttime = time.time()
# initialize a builder system for a cdb
print "TEST: Building CDB"
a = builder(fname)
# run the test
for (item,value) in testlist: a[item] = value
a['meat'] = "moo"
a['meat'] = "baa"
a['meat'] = "bow wow" | a['meat'] = "mew"
a['meat'] = "ouch"
# close the builder
a.close()
print "TEST: %fs to run build" % (time.time() - starttime)
def test_read(fname="test.cdb"):
print "TEST: Doing read of",fname
cdb = reader(fname)
print 'TEST: Should be False: cdb["not a key"] =', cdb.has_key("not a key")
if cdb.has_key("meat"):
print 'TEST: Multiple values: cdb["meat"] =', cdb.get("meat")
starttime = time.time()
print "TEST: Reconstructing keys from database"
testlist = {}
for (key, values) in cdb.iteritems(): testlist[key]=None
print "TEST: %fs to run fetch" % (time.time() - starttime)
starttime = time.time()
print "TEST: Reading",len(testlist),"entries by access key"
for slug in testlist.keys(): cdb.get(slug)
print "TEST: %fs to run fetch" % (time.time() - starttime)
cdb.close()
def test_massive(testlist, fname="stress.cdb", massive=10**5):
starttime = time.time()
print "TEST: Massive stress test for large databases (%d entries)" % massive
a = builder(fname)
for i in xrange(massive):
a[testlist[i%len(testlist)][0]] = testlist[i%len(testlist)][1]
if not i % (massive / 37): print '.', #print "%3.1f%% complete" % (float(i) / (5*(10**6))*100)
a.close()
print 'done'
print "TEST: %fs to run write" % (time.time() - starttime)
##############################################
###############TESTSUITEBLOCK#################
##############################################
data = make_data(1000)
test_massive(data, massive=10000)
del(data)
test_read(fname='stress.cdb')
exit(1)
# launch profiler test suite
prof = hotshot.Profile("pycdb.prof")
data = make_data(500000)
prof.runcall(test_write, data)
prof.runcall(test_read)
prof.runcall(test_massive, data, massive=500000, fname="stress.cdb")
prof.runcall(test_read, fname="stress.cdb", nomeat=True)
prof.close()
print "TEST: Loading hotshot stats"
stats = hotshot.stats.load("pycdb.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20) | random_line_split |
|
cdb.py | # CDB implemented in python ... with a pythonic interface!
# Starting point provided by Yusuke Shinyama
# Eric Ritezel -- February 17, 2007
#
# 20070218 - longstream optimization started
# there's something slow about this. low memory usage, though.
# 20070219 - had dream that led to increased performance.
#
from struct import unpack, pack
import array
# calc hash value with a given key (6.644s against 50k | 8.679s w/o psyco)
def calc_hash(string):
h = 5381
for c in array.array('B', string): h = ((h << 5) + h) ^ c
return h & 0xffffffffL
# attempt to use psyco for binding calc hash -- not a big deal
try:
from psyco import bind
bind(calc_hash)
except:pass
class reader(object):
"""
This is a reader for the CDB system from Dave Bernstein.
It is pythonic, and it doesn't follow his interface, but that's okay.
THIS IS IN NO WAY THREAD SAFE -- DO NOT DOUBT THE MIGHTY FILESYSTEM
Here's how it works:
[header] <- 256 pairs of uint32 structures [absolute offset][length]
... positioning works like this: header[hash & 0xff]
[header]
[data] <- we're jumping over this;
... each data node consists of [key_length][value_length][key][value]
[data]
[hash_lookup_table] <- there's 256 of these; they're full of babies
... each one has [hash][absolute offset]
... each is (2*entries) long for hash searches
[hash_lookup_table]
Usage:
>>> (build a cdb)
>>> read = reader("test.cdb")
>>> print 'read["a key"] =', read["a key"]
>>> for (key, value) in read.iteritems():
... print key, '= (',
... for values in value:
... print value + ',',
... print ')'
"""
def __init__(self, infile):
"""Open the file connection."""
if isinstance(infile, str): self.filep = open(infile, "r+b")
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# get the least pos_bucket position (beginning of subtables)
self.header = unpack('<512L', self.filep.read(2048))
# find the end of the data
self.enddata = min(self.header[0::2])
def __get(self,index,single=True):
return_value = []
hash_prime = calc_hash(index)
# pull data from the cached header
headhash = hash_prime % 256
pos_bucket= self.header[headhash + headhash]
ncells = self.header[headhash + headhash + 1]
# since the 256*8 bytes are all zeroed, this means the hash
# was invalid as we pulled it.
if ncells == 0: raise KeyError
# calculate predictive lookup
offset = (hash_prime >> 8) % ncells
# set a die badly flag (throw key error)
found = False
# loop through the number of cells in the hash range
for step in range(ncells):
self.filep.seek(pos_bucket + ((offset + step) % ncells) * 8)
# grab the hash and position in the data stream
(hash, pointer) = unpack('<LL', self.filep.read(8))
# throw an error if the hash just dumped us in the dirt
if pointer == 0:
# if there were no keys found, complain (else break)
if not found: raise KeyError
break
# check that the hash values check
if hash == hash_prime:
# seek to the location indicated
self.filep.seek(pointer)
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
key = self.filep.read(klen)
value = self.filep.read(vlen)
# make sure that the keys match
if key == index:
return_value.append(value)
# if we're only looking for one item, break out
if single: break
# set found flag for multiple value end condition
found = True
# if there were no records hit, dump a keyerror
else: raise KeyError
# throw back a tuple of the values found for key
return tuple(return_value)
def __getitem__(self,index):
# shortcut to __get
if not isinstance(index, str): raise TypeError
self.__get(index)
def get(self,index,default=None):
try:
return self.__get(index,single=False)
except:
if default is not None: return default
raise KeyError
def has_key(self,index):
"""A simple analog of the has_key dict function."""
try:
self.__get(index)
return True
except:
return False
def iteritems(self):
"""A straight pull of the items in the cdb."""
self.filep.seek(self.start + 2048)
# iterate until we hit the enddata marker
while self.filep.tell() < self.enddata - 1:
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
# yield the key and value as a tuple
yield (self.filep.read(klen), self.filep.read(vlen))
def close(self):
"""Close out the file connection."""
self.filep.close()
class builder(object):
"""
The Constant Database system is by DJB (the greatest hero on the interwub)
I just happen to implement it here bceause it's 1.fast, 2.good, 3.fast.
And I need all three aspects.
Usage:
>>> build = builder("test.cdb")
>>> build['a key'] = 'some value n for stupid'
>>> build.close()
The resultant CDB is read by any compatible lib (including reader above)
Access times are good, but can be made mucho faster with psyco.
"""
def __init__(self, infile):
if isinstance(infile, str):
self.filep = open(infile, "w+b")
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# track pointers and hash table data
self.hashbucket = [ array.array('L') for i in range(256) ]
# skip past header storage (file header + 2048)
self.position_hash = self.start + 2048
self.filep.seek(self.position_hash)
def __setitem__(self, index, value):
"""CDB supports multiple values for each key. Problems? Too bad."""
# create value and key storage
self.filep.write(pack('<LL',len(index), len(value)))
self.filep.write(index)
self.filep.write(value)
# grab a hash for the key
hash = calc_hash(index)
# dump a new hash into our bucket
self.hashbucket[hash % 256].fromlist([hash, self.position_hash])
self.position_hash += 8 + (len(index) + len(value))
def close(self):
from sys import byteorder
# preinitialize array and find byteorder
cell = array.array('L')
shouldswap = (byteorder == 'big')
# iterate completed values for the hash bucket
for hpindex in [ i for i in xrange(256) ]:
ncells = self.hashbucket[hpindex].buffer_info()[1]
if ncells <= 0:
self.hashbucket[hpindex].append(0)
continue
# create blank cell structure
cell.fromlist([ 0 for i in xrange(ncells+ncells) ])
# loop over hash pairs (xrange with parameters = fast)
for i in xrange(0, ncells, 2):
# pull hash from the hashbucket
hash = self.hashbucket[hpindex].pop(0)
# predictive lookup for jump
index = (hash >> 8) % ncells
# skip occupied cells
while cell[index+index] != 0: index = (index + 1) % ncells
# pull pointer and assign hash/pointer set to cell
cell[index+index] = hash
cell[index+index+1] = self.hashbucket[hpindex].pop(0)
# push length back onto stack
self.hashbucket[hpindex].append(ncells)
# write the hash table (swap bytes if we're bigendian)
if shouldswap: cell.byteswap()
cell.tofile(self.filep)
del cell[:]
# skip to start of file
self.filep.seek(self.start)
# dump some information about the hash pairs into the header
for i in xrange(256):
self.filep.write(pack('<LL', self.position_hash, self.hashbucket[i][0]))
self.position_hash += 8 * self.hashbucket[i].pop()
# free up the hashbucket and cell
del(cell)
del(self.hashbucket)
self.filep.close()
# a rather complete test suite
if __name__ == "__main__":
import os,sys,time
from random import randint, seed
import hotshot, hotshot.stats
# make python behave for our massive crunching needs
sys.setcheckinterval(10000)
# utility to write data
def randstr(): return "".join([ chr(randint(65,90)) for i in xrange(randint(1,32)) ])
def make_data(n):
print "TEST: Making test data"
return [ (randstr(),randstr()) for i in xrange(n)]
def test_write(testlist, fname="test.cdb"):
starttime = time.time()
# initialize a builder system for a cdb
print "TEST: Building CDB"
a = builder(fname)
# run the test
for (item,value) in testlist: a[item] = value
a['meat'] = "moo"
a['meat'] = "baa"
a['meat'] = "bow wow"
a['meat'] = "mew"
a['meat'] = "ouch"
# close the builder
a.close()
print "TEST: %fs to run build" % (time.time() - starttime)
def test_read(fname="test.cdb"):
print "TEST: Doing read of",fname
cdb = reader(fname)
print 'TEST: Should be False: cdb["not a key"] =', cdb.has_key("not a key")
if cdb.has_key("meat"):
print 'TEST: Multiple values: cdb["meat"] =', cdb.get("meat")
starttime = time.time()
print "TEST: Reconstructing keys from database"
testlist = {}
for (key, values) in cdb.iteritems(): testlist[key]=None
print "TEST: %fs to run fetch" % (time.time() - starttime)
starttime = time.time()
print "TEST: Reading",len(testlist),"entries by access key"
for slug in testlist.keys(): cdb.get(slug)
print "TEST: %fs to run fetch" % (time.time() - starttime)
cdb.close()
def test_massive(testlist, fname="stress.cdb", massive=10**5):
|
##############################################
###############TESTSUITEBLOCK#################
##############################################
data = make_data(1000)
test_massive(data, massive=10000)
del(data)
test_read(fname='stress.cdb')
exit(1)
# launch profiler test suite
prof = hotshot.Profile("pycdb.prof")
data = make_data(500000)
prof.runcall(test_write, data)
prof.runcall(test_read)
prof.runcall(test_massive, data, massive=500000, fname="stress.cdb")
prof.runcall(test_read, fname="stress.cdb", nomeat=True)
prof.close()
print "TEST: Loading hotshot stats"
stats = hotshot.stats.load("pycdb.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
| starttime = time.time()
print "TEST: Massive stress test for large databases (%d entries)" % massive
a = builder(fname)
for i in xrange(massive):
a[testlist[i%len(testlist)][0]] = testlist[i%len(testlist)][1]
if not i % (massive / 37): print '.', #print "%3.1f%% complete" % (float(i) / (5*(10**6))*100)
a.close()
print 'done'
print "TEST: %fs to run write" % (time.time() - starttime) | identifier_body |
cdb.py | # CDB implemented in python ... with a pythonic interface!
# Starting point provided by Yusuke Shinyama
# Eric Ritezel -- February 17, 2007
#
# 20070218 - longstream optimization started
# there's something slow about this. low memory usage, though.
# 20070219 - had dream that led to increased performance.
#
from struct import unpack, pack
import array
# calc hash value with a given key (6.644s against 50k | 8.679s w/o psyco)
def calc_hash(string):
h = 5381
for c in array.array('B', string): h = ((h << 5) + h) ^ c
return h & 0xffffffffL
# attempt to use psyco for binding calc hash -- not a big deal
try:
from psyco import bind
bind(calc_hash)
except:pass
class reader(object):
"""
This is a reader for the CDB system from Dave Bernstein.
It is pythonic, and it doesn't follow his interface, but that's okay.
THIS IS IN NO WAY THREAD SAFE -- DO NOT DOUBT THE MIGHTY FILESYSTEM
Here's how it works:
[header] <- 256 pairs of uint32 structures [absolute offset][length]
... positioning works like this: header[hash & 0xff]
[header]
[data] <- we're jumping over this;
... each data node consists of [key_length][value_length][key][value]
[data]
[hash_lookup_table] <- there's 256 of these; they're full of babies
... each one has [hash][absolute offset]
... each is (2*entries) long for hash searches
[hash_lookup_table]
Usage:
>>> (build a cdb)
>>> read = reader("test.cdb")
>>> print 'read["a key"] =', read["a key"]
>>> for (key, value) in read.iteritems():
... print key, '= (',
... for values in value:
... print value + ',',
... print ')'
"""
def __init__(self, infile):
"""Open the file connection."""
if isinstance(infile, str): self.filep = open(infile, "r+b")
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# get the least pos_bucket position (beginning of subtables)
self.header = unpack('<512L', self.filep.read(2048))
# find the end of the data
self.enddata = min(self.header[0::2])
def | (self,index,single=True):
return_value = []
hash_prime = calc_hash(index)
# pull data from the cached header
headhash = hash_prime % 256
pos_bucket= self.header[headhash + headhash]
ncells = self.header[headhash + headhash + 1]
# since the 256*8 bytes are all zeroed, this means the hash
# was invalid as we pulled it.
if ncells == 0: raise KeyError
# calculate predictive lookup
offset = (hash_prime >> 8) % ncells
# set a die badly flag (throw key error)
found = False
# loop through the number of cells in the hash range
for step in range(ncells):
self.filep.seek(pos_bucket + ((offset + step) % ncells) * 8)
# grab the hash and position in the data stream
(hash, pointer) = unpack('<LL', self.filep.read(8))
# throw an error if the hash just dumped us in the dirt
if pointer == 0:
# if there were no keys found, complain (else break)
if not found: raise KeyError
break
# check that the hash values check
if hash == hash_prime:
# seek to the location indicated
self.filep.seek(pointer)
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
key = self.filep.read(klen)
value = self.filep.read(vlen)
# make sure that the keys match
if key == index:
return_value.append(value)
# if we're only looking for one item, break out
if single: break
# set found flag for multiple value end condition
found = True
# if there were no records hit, dump a keyerror
else: raise KeyError
# throw back a tuple of the values found for key
return tuple(return_value)
def __getitem__(self,index):
# shortcut to __get
if not isinstance(index, str): raise TypeError
self.__get(index)
def get(self,index,default=None):
try:
return self.__get(index,single=False)
except:
if default is not None: return default
raise KeyError
def has_key(self,index):
"""A simple analog of the has_key dict function."""
try:
self.__get(index)
return True
except:
return False
def iteritems(self):
"""A straight pull of the items in the cdb."""
self.filep.seek(self.start + 2048)
# iterate until we hit the enddata marker
while self.filep.tell() < self.enddata - 1:
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
# yield the key and value as a tuple
yield (self.filep.read(klen), self.filep.read(vlen))
def close(self):
"""Close out the file connection."""
self.filep.close()
class builder(object):
"""
The Constant Database system is by DJB (the greatest hero on the interwub)
I just happen to implement it here bceause it's 1.fast, 2.good, 3.fast.
And I need all three aspects.
Usage:
>>> build = builder("test.cdb")
>>> build['a key'] = 'some value n for stupid'
>>> build.close()
The resultant CDB is read by any compatible lib (including reader above)
Access times are good, but can be made mucho faster with psyco.
"""
def __init__(self, infile):
if isinstance(infile, str):
self.filep = open(infile, "w+b")
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# track pointers and hash table data
self.hashbucket = [ array.array('L') for i in range(256) ]
# skip past header storage (file header + 2048)
self.position_hash = self.start + 2048
self.filep.seek(self.position_hash)
def __setitem__(self, index, value):
"""CDB supports multiple values for each key. Problems? Too bad."""
# create value and key storage
self.filep.write(pack('<LL',len(index), len(value)))
self.filep.write(index)
self.filep.write(value)
# grab a hash for the key
hash = calc_hash(index)
# dump a new hash into our bucket
self.hashbucket[hash % 256].fromlist([hash, self.position_hash])
self.position_hash += 8 + (len(index) + len(value))
def close(self):
from sys import byteorder
# preinitialize array and find byteorder
cell = array.array('L')
shouldswap = (byteorder == 'big')
# iterate completed values for the hash bucket
for hpindex in [ i for i in xrange(256) ]:
ncells = self.hashbucket[hpindex].buffer_info()[1]
if ncells <= 0:
self.hashbucket[hpindex].append(0)
continue
# create blank cell structure
cell.fromlist([ 0 for i in xrange(ncells+ncells) ])
# loop over hash pairs (xrange with parameters = fast)
for i in xrange(0, ncells, 2):
# pull hash from the hashbucket
hash = self.hashbucket[hpindex].pop(0)
# predictive lookup for jump
index = (hash >> 8) % ncells
# skip occupied cells
while cell[index+index] != 0: index = (index + 1) % ncells
# pull pointer and assign hash/pointer set to cell
cell[index+index] = hash
cell[index+index+1] = self.hashbucket[hpindex].pop(0)
# push length back onto stack
self.hashbucket[hpindex].append(ncells)
# write the hash table (swap bytes if we're bigendian)
if shouldswap: cell.byteswap()
cell.tofile(self.filep)
del cell[:]
# skip to start of file
self.filep.seek(self.start)
# dump some information about the hash pairs into the header
for i in xrange(256):
self.filep.write(pack('<LL', self.position_hash, self.hashbucket[i][0]))
self.position_hash += 8 * self.hashbucket[i].pop()
# free up the hashbucket and cell
del(cell)
del(self.hashbucket)
self.filep.close()
# a rather complete test suite
if __name__ == "__main__":
import os,sys,time
from random import randint, seed
import hotshot, hotshot.stats
# make python behave for our massive crunching needs
sys.setcheckinterval(10000)
# utility to write data
def randstr(): return "".join([ chr(randint(65,90)) for i in xrange(randint(1,32)) ])
def make_data(n):
print "TEST: Making test data"
return [ (randstr(),randstr()) for i in xrange(n)]
def test_write(testlist, fname="test.cdb"):
starttime = time.time()
# initialize a builder system for a cdb
print "TEST: Building CDB"
a = builder(fname)
# run the test
for (item,value) in testlist: a[item] = value
a['meat'] = "moo"
a['meat'] = "baa"
a['meat'] = "bow wow"
a['meat'] = "mew"
a['meat'] = "ouch"
# close the builder
a.close()
print "TEST: %fs to run build" % (time.time() - starttime)
def test_read(fname="test.cdb"):
print "TEST: Doing read of",fname
cdb = reader(fname)
print 'TEST: Should be False: cdb["not a key"] =', cdb.has_key("not a key")
if cdb.has_key("meat"):
print 'TEST: Multiple values: cdb["meat"] =', cdb.get("meat")
starttime = time.time()
print "TEST: Reconstructing keys from database"
testlist = {}
for (key, values) in cdb.iteritems(): testlist[key]=None
print "TEST: %fs to run fetch" % (time.time() - starttime)
starttime = time.time()
print "TEST: Reading",len(testlist),"entries by access key"
for slug in testlist.keys(): cdb.get(slug)
print "TEST: %fs to run fetch" % (time.time() - starttime)
cdb.close()
def test_massive(testlist, fname="stress.cdb", massive=10**5):
starttime = time.time()
print "TEST: Massive stress test for large databases (%d entries)" % massive
a = builder(fname)
for i in xrange(massive):
a[testlist[i%len(testlist)][0]] = testlist[i%len(testlist)][1]
if not i % (massive / 37): print '.', #print "%3.1f%% complete" % (float(i) / (5*(10**6))*100)
a.close()
print 'done'
print "TEST: %fs to run write" % (time.time() - starttime)
##############################################
###############TESTSUITEBLOCK#################
##############################################
data = make_data(1000)
test_massive(data, massive=10000)
del(data)
test_read(fname='stress.cdb')
exit(1)
# launch profiler test suite
prof = hotshot.Profile("pycdb.prof")
data = make_data(500000)
prof.runcall(test_write, data)
prof.runcall(test_read)
prof.runcall(test_massive, data, massive=500000, fname="stress.cdb")
prof.runcall(test_read, fname="stress.cdb", nomeat=True)
prof.close()
print "TEST: Loading hotshot stats"
stats = hotshot.stats.load("pycdb.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
| __get | identifier_name |
cdb.py | # CDB implemented in python ... with a pythonic interface!
# Starting point provided by Yusuke Shinyama
# Eric Ritezel -- February 17, 2007
#
# 20070218 - longstream optimization started
# there's something slow about this. low memory usage, though.
# 20070219 - had dream that led to increased performance.
#
from struct import unpack, pack
import array
# calc hash value with a given key (6.644s against 50k | 8.679s w/o psyco)
def calc_hash(string):
h = 5381
for c in array.array('B', string): h = ((h << 5) + h) ^ c
return h & 0xffffffffL
# attempt to use psyco for binding calc hash -- not a big deal
try:
from psyco import bind
bind(calc_hash)
except:pass
class reader(object):
"""
This is a reader for the CDB system from Dave Bernstein.
It is pythonic, and it doesn't follow his interface, but that's okay.
THIS IS IN NO WAY THREAD SAFE -- DO NOT DOUBT THE MIGHTY FILESYSTEM
Here's how it works:
[header] <- 256 pairs of uint32 structures [absolute offset][length]
... positioning works like this: header[hash & 0xff]
[header]
[data] <- we're jumping over this;
... each data node consists of [key_length][value_length][key][value]
[data]
[hash_lookup_table] <- there's 256 of these; they're full of babies
... each one has [hash][absolute offset]
... each is (2*entries) long for hash searches
[hash_lookup_table]
Usage:
>>> (build a cdb)
>>> read = reader("test.cdb")
>>> print 'read["a key"] =', read["a key"]
>>> for (key, value) in read.iteritems():
... print key, '= (',
... for values in value:
... print value + ',',
... print ')'
"""
def __init__(self, infile):
"""Open the file connection."""
if isinstance(infile, str): self.filep = open(infile, "r+b")
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# get the least pos_bucket position (beginning of subtables)
self.header = unpack('<512L', self.filep.read(2048))
# find the end of the data
self.enddata = min(self.header[0::2])
def __get(self,index,single=True):
return_value = []
hash_prime = calc_hash(index)
# pull data from the cached header
headhash = hash_prime % 256
pos_bucket= self.header[headhash + headhash]
ncells = self.header[headhash + headhash + 1]
# since the 256*8 bytes are all zeroed, this means the hash
# was invalid as we pulled it.
if ncells == 0: raise KeyError
# calculate predictive lookup
offset = (hash_prime >> 8) % ncells
# set a die badly flag (throw key error)
found = False
# loop through the number of cells in the hash range
for step in range(ncells):
self.filep.seek(pos_bucket + ((offset + step) % ncells) * 8)
# grab the hash and position in the data stream
(hash, pointer) = unpack('<LL', self.filep.read(8))
# throw an error if the hash just dumped us in the dirt
if pointer == 0:
# if there were no keys found, complain (else break)
if not found: raise KeyError
break
# check that the hash values check
if hash == hash_prime:
# seek to the location indicated
self.filep.seek(pointer)
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
key = self.filep.read(klen)
value = self.filep.read(vlen)
# make sure that the keys match
if key == index:
return_value.append(value)
# if we're only looking for one item, break out
if single: break
# set found flag for multiple value end condition
found = True
# if there were no records hit, dump a keyerror
else: raise KeyError
# throw back a tuple of the values found for key
return tuple(return_value)
def __getitem__(self,index):
# shortcut to __get
if not isinstance(index, str): raise TypeError
self.__get(index)
def get(self,index,default=None):
try:
return self.__get(index,single=False)
except:
if default is not None: return default
raise KeyError
def has_key(self,index):
"""A simple analog of the has_key dict function."""
try:
self.__get(index)
return True
except:
return False
def iteritems(self):
"""A straight pull of the items in the cdb."""
self.filep.seek(self.start + 2048)
# iterate until we hit the enddata marker
while self.filep.tell() < self.enddata - 1:
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
# yield the key and value as a tuple
yield (self.filep.read(klen), self.filep.read(vlen))
def close(self):
"""Close out the file connection."""
self.filep.close()
class builder(object):
"""
The Constant Database system is by DJB (the greatest hero on the interwub)
I just happen to implement it here bceause it's 1.fast, 2.good, 3.fast.
And I need all three aspects.
Usage:
>>> build = builder("test.cdb")
>>> build['a key'] = 'some value n for stupid'
>>> build.close()
The resultant CDB is read by any compatible lib (including reader above)
Access times are good, but can be made mucho faster with psyco.
"""
def __init__(self, infile):
if isinstance(infile, str):
|
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# track pointers and hash table data
self.hashbucket = [ array.array('L') for i in range(256) ]
# skip past header storage (file header + 2048)
self.position_hash = self.start + 2048
self.filep.seek(self.position_hash)
def __setitem__(self, index, value):
"""CDB supports multiple values for each key. Problems? Too bad."""
# create value and key storage
self.filep.write(pack('<LL',len(index), len(value)))
self.filep.write(index)
self.filep.write(value)
# grab a hash for the key
hash = calc_hash(index)
# dump a new hash into our bucket
self.hashbucket[hash % 256].fromlist([hash, self.position_hash])
self.position_hash += 8 + (len(index) + len(value))
def close(self):
from sys import byteorder
# preinitialize array and find byteorder
cell = array.array('L')
shouldswap = (byteorder == 'big')
# iterate completed values for the hash bucket
for hpindex in [ i for i in xrange(256) ]:
ncells = self.hashbucket[hpindex].buffer_info()[1]
if ncells <= 0:
self.hashbucket[hpindex].append(0)
continue
# create blank cell structure
cell.fromlist([ 0 for i in xrange(ncells+ncells) ])
# loop over hash pairs (xrange with parameters = fast)
for i in xrange(0, ncells, 2):
# pull hash from the hashbucket
hash = self.hashbucket[hpindex].pop(0)
# predictive lookup for jump
index = (hash >> 8) % ncells
# skip occupied cells
while cell[index+index] != 0: index = (index + 1) % ncells
# pull pointer and assign hash/pointer set to cell
cell[index+index] = hash
cell[index+index+1] = self.hashbucket[hpindex].pop(0)
# push length back onto stack
self.hashbucket[hpindex].append(ncells)
# write the hash table (swap bytes if we're bigendian)
if shouldswap: cell.byteswap()
cell.tofile(self.filep)
del cell[:]
# skip to start of file
self.filep.seek(self.start)
# dump some information about the hash pairs into the header
for i in xrange(256):
self.filep.write(pack('<LL', self.position_hash, self.hashbucket[i][0]))
self.position_hash += 8 * self.hashbucket[i].pop()
# free up the hashbucket and cell
del(cell)
del(self.hashbucket)
self.filep.close()
# a rather complete test suite
if __name__ == "__main__":
import os,sys,time
from random import randint, seed
import hotshot, hotshot.stats
# make python behave for our massive crunching needs
sys.setcheckinterval(10000)
# utility to write data
def randstr(): return "".join([ chr(randint(65,90)) for i in xrange(randint(1,32)) ])
def make_data(n):
print "TEST: Making test data"
return [ (randstr(),randstr()) for i in xrange(n)]
def test_write(testlist, fname="test.cdb"):
starttime = time.time()
# initialize a builder system for a cdb
print "TEST: Building CDB"
a = builder(fname)
# run the test
for (item,value) in testlist: a[item] = value
a['meat'] = "moo"
a['meat'] = "baa"
a['meat'] = "bow wow"
a['meat'] = "mew"
a['meat'] = "ouch"
# close the builder
a.close()
print "TEST: %fs to run build" % (time.time() - starttime)
def test_read(fname="test.cdb"):
print "TEST: Doing read of",fname
cdb = reader(fname)
print 'TEST: Should be False: cdb["not a key"] =', cdb.has_key("not a key")
if cdb.has_key("meat"):
print 'TEST: Multiple values: cdb["meat"] =', cdb.get("meat")
starttime = time.time()
print "TEST: Reconstructing keys from database"
testlist = {}
for (key, values) in cdb.iteritems(): testlist[key]=None
print "TEST: %fs to run fetch" % (time.time() - starttime)
starttime = time.time()
print "TEST: Reading",len(testlist),"entries by access key"
for slug in testlist.keys(): cdb.get(slug)
print "TEST: %fs to run fetch" % (time.time() - starttime)
cdb.close()
def test_massive(testlist, fname="stress.cdb", massive=10**5):
starttime = time.time()
print "TEST: Massive stress test for large databases (%d entries)" % massive
a = builder(fname)
for i in xrange(massive):
a[testlist[i%len(testlist)][0]] = testlist[i%len(testlist)][1]
if not i % (massive / 37): print '.', #print "%3.1f%% complete" % (float(i) / (5*(10**6))*100)
a.close()
print 'done'
print "TEST: %fs to run write" % (time.time() - starttime)
##############################################
###############TESTSUITEBLOCK#################
##############################################
data = make_data(1000)
test_massive(data, massive=10000)
del(data)
test_read(fname='stress.cdb')
exit(1)
# launch profiler test suite
prof = hotshot.Profile("pycdb.prof")
data = make_data(500000)
prof.runcall(test_write, data)
prof.runcall(test_read)
prof.runcall(test_massive, data, massive=500000, fname="stress.cdb")
prof.runcall(test_read, fname="stress.cdb", nomeat=True)
prof.close()
print "TEST: Loading hotshot stats"
stats = hotshot.stats.load("pycdb.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
| self.filep = open(infile, "w+b") | conditional_block |
gimli.rs | //! Support for symbolication using the `gimli` crate on crates.io
//!
//! This implementation is largely a work in progress and is off by default for
//! all platforms, but it's hoped to be developed over time! Long-term this is
//! intended to wholesale replace the `libbacktrace.rs` implementation.
use self::gimli::read::EndianSlice;
use self::gimli::LittleEndian as Endian;
use self::mmap::Mmap;
use self::stash::Stash;
use super::BytesOrWideString;
use super::ResolveWhat;
use super::SymbolName;
use addr2line::gimli;
use core::convert::TryInto;
use core::mem;
use core::u32;
use libc::c_void;
use mystd::ffi::OsString;
use mystd::fs::File;
use mystd::path::Path;
use mystd::prelude::v1::*;
#[cfg(backtrace_in_libstd)]
mod mystd {
pub use crate::*;
}
#[cfg(not(backtrace_in_libstd))]
extern crate std as mystd;
cfg_if::cfg_if! {
if #[cfg(windows)] {
#[path = "gimli/mmap_windows.rs"]
mod mmap;
} else if #[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "openbsd",
target_os = "solaris",
))] {
#[path = "gimli/mmap_unix.rs"]
mod mmap;
} else {
#[path = "gimli/mmap_fake.rs"]
mod mmap;
}
}
mod stash;
const MAPPINGS_CACHE_SIZE: usize = 4;
struct Context<'a> {
dwarf: addr2line::Context<EndianSlice<'a, Endian>>,
object: Object<'a>,
}
struct Mapping {
// 'static lifetime is a lie to hack around lack of support for self-referential structs.
cx: Context<'static>,
_map: Mmap,
_stash: Stash,
}
fn cx<'data>(stash: &'data Stash, object: Object<'data>) -> Option<Context<'data>> {
fn load_section<'data, S>(stash: &'data Stash, obj: &Object<'data>) -> S
where
S: gimli::Section<gimli::EndianSlice<'data, Endian>>,
{
let data = obj.section(stash, S::section_name()).unwrap_or(&[]);
S::from(EndianSlice::new(data, Endian))
}
let dwarf = addr2line::Context::from_sections(
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
gimli::EndianSlice::new(&[], Endian),
)
.ok()?;
Some(Context { dwarf, object })
}
macro_rules! mk {
(Mapping { $map:expr, $inner:expr, $stash:expr }) => {{
fn assert_lifetimes<'a>(_: &'a Mmap, _: &Context<'a>, _: &'a Stash) {}
assert_lifetimes(&$map, &$inner, &$stash);
Mapping {
// Convert to 'static lifetimes since the symbols should
// only borrow `map` and `stash` and we're preserving them below.
cx: unsafe { core::mem::transmute::<Context<'_>, Context<'static>>($inner) },
_map: $map,
_stash: $stash,
}
}};
}
fn mmap(path: &Path) -> Option<Mmap> {
let file = File::open(path).ok()?;
let len = file.metadata().ok()?.len().try_into().ok()?;
unsafe { Mmap::map(&file, len) }
}
cfg_if::cfg_if! {
if #[cfg(windows)] {
use core::mem::MaybeUninit;
use super::super::windows::*;
use mystd::os::windows::prelude::*;
use alloc::vec;
mod coff;
use self::coff::Object;
// For loading native libraries on Windows, see some discussion on
// rust-lang/rust#71060 for the various strategies here.
fn native_libraries() -> Vec<Library> {
let mut ret = Vec::new();
unsafe { add_loaded_images(&mut ret); }
return ret;
}
unsafe fn add_loaded_images(ret: &mut Vec<Library>) {
let snap = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, 0);
if snap == INVALID_HANDLE_VALUE {
return;
}
let mut me = MaybeUninit::<MODULEENTRY32W>::zeroed().assume_init();
me.dwSize = mem::size_of_val(&me) as DWORD;
if Module32FirstW(snap, &mut me) == TRUE {
loop {
if let Some(lib) = load_library(&me) {
ret.push(lib);
}
if Module32NextW(snap, &mut me) != TRUE {
break;
}
}
}
CloseHandle(snap);
}
unsafe fn load_library(me: &MODULEENTRY32W) -> Option<Library> {
let pos = me
.szExePath
.iter()
.position(|i| *i == 0)
.unwrap_or(me.szExePath.len());
let name = OsString::from_wide(&me.szExePath[..pos]);
// MinGW libraries currently don't support ASLR
// (rust-lang/rust#16514), but DLLs can still be relocated around in
// the address space. It appears that addresses in debug info are
// all as-if this library was loaded at its "image base", which is a
// field in its COFF file headers. Since this is what debuginfo
// seems to list we parse the symbol table and store addresses as if
// the library was loaded at "image base" as well.
//
// The library may not be loaded at "image base", however.
// (presumably something else may be loaded there?) This is where
// the `bias` field comes into play, and we need to figure out the
// value of `bias` here. Unfortunately though it's not clear how to
// acquire this from a loaded module. What we do have, however, is
// the actual load address (`modBaseAddr`).
//
// As a bit of a cop-out for now we mmap the file, read the file
// header information, then drop the mmap. This is wasteful because
// we'll probably reopen the mmap later, but this should work well
// enough for now.
//
// Once we have the `image_base` (desired load location) and the
// `base_addr` (actual load location) we can fill in the `bias`
// (difference between the actual and desired) and then the stated
// address of each segment is the `image_base` since that's what the
// file says.
//
// For now it appears that unlike ELF/MachO we can make do with one
// segment per library, using `modBaseSize` as the whole size.
let mmap = mmap(name.as_ref())?;
let image_base = coff::get_image_base(&mmap)?;
let base_addr = me.modBaseAddr as usize;
Some(Library {
name,
bias: base_addr.wrapping_sub(image_base),
segments: vec![LibrarySegment {
stated_virtual_memory_address: image_base,
len: me.modBaseSize as usize,
}],
})
}
} else if #[cfg(target_os = "macos")] {
// macOS uses the Mach-O file format and uses DYLD-specific APIs to
// load a list of native libraries that are part of the appplication.
use mystd::os::unix::prelude::*;
use mystd::ffi::{OsStr, CStr};
mod macho;
use self::macho::Object;
#[allow(deprecated)]
fn native_libraries() -> Vec<Library> {
let mut ret = Vec::new();
let images = unsafe { libc::_dyld_image_count() };
for i in 0..images {
ret.extend(native_library(i));
}
return ret;
}
#[allow(deprecated)]
fn native_library(i: u32) -> Option<Library> {
use object::macho;
use object::read::macho::{MachHeader, Segment};
use object::{Bytes, NativeEndian};
// Fetch the name of this library which corresponds to the path of
// where to load it as well.
let name = unsafe {
let name = libc::_dyld_get_image_name(i);
if name.is_null() {
return None;
}
CStr::from_ptr(name)
};
// Load the image header of this library and delegate to `object` to
// parse all the load commands so we can figure out all the segments
// involved here.
let (mut load_commands, endian) = unsafe {
let header = libc::_dyld_get_image_header(i);
if header.is_null() {
return None;
}
match (*header).magic {
macho::MH_MAGIC => {
let endian = NativeEndian;
let header = &*(header as *const macho::MachHeader32<NativeEndian>);
let data = core::slice::from_raw_parts(
header as *const _ as *const u8,
mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize
);
(header.load_commands(endian, Bytes(data)).ok()?, endian)
}
macho::MH_MAGIC_64 => {
let endian = NativeEndian;
let header = &*(header as *const macho::MachHeader64<NativeEndian>);
let data = core::slice::from_raw_parts(
header as *const _ as *const u8,
mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize
);
(header.load_commands(endian, Bytes(data)).ok()?, endian)
}
_ => return None,
}
};
// Iterate over the segments and register known regions for segments
// that we find. Additionally record information bout text segments
// for processing later, see comments below.
let mut segments = Vec::new();
let mut first_text = 0;
let mut text_fileoff_zero = false;
while let Some(cmd) = load_commands.next().ok()? {
if let Some((seg, _)) = cmd.segment_32().ok()? {
if seg.name() == b"__TEXT" {
first_text = segments.len();
if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 {
text_fileoff_zero = true;
}
}
segments.push(LibrarySegment {
len: seg.vmsize(endian).try_into().ok()?,
stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?,
});
}
if let Some((seg, _)) = cmd.segment_64().ok()? {
if seg.name() == b"__TEXT" {
first_text = segments.len();
if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 {
text_fileoff_zero = true;
}
}
segments.push(LibrarySegment {
len: seg.vmsize(endian).try_into().ok()?,
stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?,
});
}
}
// Determine the "slide" for this library which ends up being the
// bias we use to figure out where in memory objects are loaded.
// This is a bit of a weird computation though and is the result of
// trying a few things in the wild and seeing what sticks.
//
// The general idea is that the `bias` plus a segment's
// `stated_virtual_memory_address` is going to be where in the
// actual address space the segment resides. The other thing we rely
// on though is that a real address minus the `bias` is the index to
// look up in the symbol table and debuginfo.
//
// It turns out, though, that for system loaded libraries these
// calculations are incorrect. For native executables, however, it
// appears correct. Lifting some logic from LLDB's source it has
// some special-casing for the first `__TEXT` section loaded from
// file offset 0 with a nonzero size. For whatever reason when this
// is present it appears to mean that the symbol table is relative
// to just the vmaddr slide for the library. If it's *not* present
// then the symbol table is relative to the the vmaddr slide plus
// the segment's stated address.
//
// To handle this situation if we *don't* find a text section at
// file offset zero then we increase the bias by the first text
// sections's stated address and decrease all stated addresses by
// that amount as well. That way the symbol table is always appears
// relative to the library's bias amount. This appears to have the
// right results for symbolizing via the symbol table.
//
// Honestly I'm not entirely sure whether this is right or if
// there's something else that should indicate how to do this. For
// now though this seems to work well enough (?) and we should
// always be able to tweak this over time if necessary.
//
// For some more information see #318
let mut slide = unsafe { libc::_dyld_get_image_vmaddr_slide(i) as usize };
if !text_fileoff_zero {
let adjust = segments[first_text].stated_virtual_memory_address;
for segment in segments.iter_mut() {
segment.stated_virtual_memory_address -= adjust;
}
slide += adjust;
}
Some(Library {
name: OsStr::from_bytes(name.to_bytes()).to_owned(),
segments,
bias: slide,
})
}
} else if #[cfg(any(
target_os = "linux",
target_os = "fuchsia",
))] {
// Other Unix (e.g. Linux) platforms use ELF as an object file format
// and typically implement an API called `dl_iterate_phdr` to load
// native libraries.
use mystd::os::unix::prelude::*;
use mystd::ffi::{OsStr, CStr};
mod elf;
use self::elf::Object;
fn native_libraries() -> Vec<Library> {
let mut ret = Vec::new();
unsafe {
libc::dl_iterate_phdr(Some(callback), &mut ret as *mut _ as *mut _);
}
return ret;
}
unsafe extern "C" fn callback(
info: *mut libc::dl_phdr_info,
_size: libc::size_t,
vec: *mut libc::c_void,
) -> libc::c_int {
let libs = &mut *(vec as *mut Vec<Library>);
let name = if (*info).dlpi_name.is_null() || *(*info).dlpi_name == 0{
if libs.is_empty() {
mystd::env::current_exe().map(|e| e.into()).unwrap_or_default()
} else {
OsString::new()
}
} else {
let bytes = CStr::from_ptr((*info).dlpi_name).to_bytes();
OsStr::from_bytes(bytes).to_owned()
};
let headers = core::slice::from_raw_parts((*info).dlpi_phdr, (*info).dlpi_phnum as usize);
libs.push(Library {
name,
segments: headers
.iter()
.map(|header| LibrarySegment {
len: (*header).p_memsz as usize,
stated_virtual_memory_address: (*header).p_vaddr as usize,
})
.collect(),
bias: (*info).dlpi_addr as usize,
});
0
}
} else if #[cfg(target_env = "libnx")] {
// DevkitA64 doesn't natively support debug info, but the build system will place debug
// info at the path `romfs:/debug_info.elf`.
mod elf;
use self::elf::Object;
fn native_libraries() -> Vec<Library> {
extern "C" {
static __start__: u8;
}
let bias = unsafe { &__start__ } as *const u8 as usize;
let mut ret = Vec::new();
let mut segments = Vec::new();
segments.push(LibrarySegment {
stated_virtual_memory_address: 0,
len: usize::max_value() - bias,
});
let path = "romfs:/debug_info.elf";
ret.push(Library {
name: path.into(),
segments,
bias,
});
ret
}
} else {
// Everything else should use ELF, but doesn't know how to load native
// libraries.
use mystd::os::unix::prelude::*;
mod elf;
use self::elf::Object;
fn native_libraries() -> Vec<Library> {
Vec::new()
}
}
}
#[derive(Default)]
struct Cache {
/// All known shared libraries that have been loaded.
libraries: Vec<Library>,
/// Mappings cache where we retain parsed dwarf information.
///
/// This list has a fixed capacity for its entire liftime which never
/// increases. The `usize` element of each pair is an index into `libraries`
/// above where `usize::max_value()` represents the current executable. The
/// `Mapping` is corresponding parsed dwarf information.
///
/// Note that this is basically an LRU cache and we'll be shifting things
/// around in here as we symbolize addresses.
mappings: Vec<(usize, Mapping)>,
}
struct Library {
name: OsString,
/// Segments of this library loaded into memory, and where they're loaded.
segments: Vec<LibrarySegment>,
/// The "bias" of this library, typically where it's loaded into memory.
/// This value is added to each segment's stated address to get the actual
/// virtual memory address that the segment is loaded into. Additionally
/// this bias is subtracted from real virtual memory addresses to index into
/// debuginfo and the symbol table.
bias: usize,
}
struct LibrarySegment {
/// The stated address of this segment in the object file. This is not
/// actually where the segment is loaded, but rather this address plus the
/// containing library's `bias` is where to find it.
stated_virtual_memory_address: usize,
/// The size of ths segment in memory.
len: usize,
}
// unsafe because this is required to be externally synchronized
pub unsafe fn clear_symbol_cache() {
Cache::with_global(|cache| cache.mappings.clear());
}
impl Cache {
fn new() -> Cache {
Cache {
mappings: Vec::with_capacity(MAPPINGS_CACHE_SIZE),
libraries: native_libraries(),
}
}
// unsafe because this is required to be externally synchronized
unsafe fn with_global(f: impl FnOnce(&mut Self)) |
fn avma_to_svma(&self, addr: *const u8) -> Option<(usize, *const u8)> {
self.libraries
.iter()
.enumerate()
.filter_map(|(i, lib)| {
// First up, test if this `lib` has any segment containing the
// `addr` (handling relocation). If this check passes then we
// can continue below and actually translate the address.
//
// Note that we're using `wrapping_add` here to avoid overflow
// checks. It's been seen in the wild that the SVMA + bias
// computation overflows. It seems a bit odd that would happen
// but there's not a huge amount we can do about it other than
// probably just ignore those segments since they're likely
// pointing off into space. This originally came up in
// rust-lang/backtrace-rs#329.
if !lib.segments.iter().any(|s| {
let svma = s.stated_virtual_memory_address;
let start = svma.wrapping_add(lib.bias);
let end = start.wrapping_add(s.len);
let address = addr as usize;
start <= address && address < end
}) {
return None;
}
// Now that we know `lib` contains `addr`, we can offset with
// the bias to find the stated virutal memory address.
let svma = (addr as usize).wrapping_sub(lib.bias);
Some((i, svma as *const u8))
})
.next()
}
fn mapping_for_lib<'a>(&'a mut self, lib: usize) -> Option<&'a Context<'a>> {
let idx = self.mappings.iter().position(|(idx, _)| *idx == lib);
// Invariant: after this conditional completes without early returning
// from an error, the cache entry for this path is at index 0.
if let Some(idx) = idx {
// When the mapping is already in the cache, move it to the front.
if idx != 0 {
let entry = self.mappings.remove(idx);
self.mappings.insert(0, entry);
}
} else {
// When the mapping is not in the cache, create a new mapping,
// insert it into the front of the cache, and evict the oldest cache
// entry if necessary.
let name = &self.libraries[lib].name;
let mapping = Mapping::new(name.as_ref())?;
if self.mappings.len() == MAPPINGS_CACHE_SIZE {
self.mappings.pop();
}
self.mappings.insert(0, (lib, mapping));
}
let cx: &'a Context<'static> = &self.mappings[0].1.cx;
// don't leak the `'static` lifetime, make sure it's scoped to just
// ourselves
Some(unsafe { mem::transmute::<&'a Context<'static>, &'a Context<'a>>(cx) })
}
}
pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) {
let addr = what.address_or_ip();
let mut call = |sym: Symbol<'_>| {
// Extend the lifetime of `sym` to `'static` since we are unfortunately
// required to here, but it's ony ever going out as a reference so no
// reference to it should be persisted beyond this frame anyway.
let sym = mem::transmute::<Symbol<'_>, Symbol<'static>>(sym);
(cb)(&super::Symbol { inner: sym });
};
Cache::with_global(|cache| {
let (lib, addr) = match cache.avma_to_svma(addr as *const u8) {
Some(pair) => pair,
None => return,
};
// Finally, get a cached mapping or create a new mapping for this file, and
// evaluate the DWARF info to find the file/line/name for this address.
let cx = match cache.mapping_for_lib(lib) {
Some(cx) => cx,
None => return,
};
let mut any_frames = false;
if let Ok(mut frames) = cx.dwarf.find_frames(addr as u64) {
while let Ok(Some(frame)) = frames.next() {
any_frames = true;
call(Symbol::Frame {
addr: addr as *mut c_void,
location: frame.location,
name: frame.function.map(|f| f.name.slice()),
});
}
}
if !any_frames {
if let Some(name) = cx.object.search_symtab(addr as u64) {
call(Symbol::Symtab {
addr: addr as *mut c_void,
name,
});
}
}
});
}
pub enum Symbol<'a> {
/// We were able to locate frame information for this symbol, and
/// `addr2line`'s frame internally has all the nitty gritty details.
Frame {
addr: *mut c_void,
location: Option<addr2line::Location<'a>>,
name: Option<&'a [u8]>,
},
/// Couldn't find debug information, but we found it in the symbol table of
/// the elf executable.
Symtab { addr: *mut c_void, name: &'a [u8] },
}
impl Symbol<'_> {
pub fn name(&self) -> Option<SymbolName<'_>> {
match self {
Symbol::Frame { name, .. } => {
let name = name.as_ref()?;
Some(SymbolName::new(name))
}
Symbol::Symtab { name, .. } => Some(SymbolName::new(name)),
}
}
pub fn addr(&self) -> Option<*mut c_void> {
match self {
Symbol::Frame { addr, .. } => Some(*addr),
Symbol::Symtab { .. } => None,
}
}
pub fn filename_raw(&self) -> Option<BytesOrWideString<'_>> {
match self {
Symbol::Frame { location, .. } => {
let file = location.as_ref()?.file?;
Some(BytesOrWideString::Bytes(file.as_bytes()))
}
Symbol::Symtab { .. } => None,
}
}
pub fn filename(&self) -> Option<&Path> {
match self {
Symbol::Frame { location, .. } => {
let file = location.as_ref()?.file?;
Some(Path::new(file))
}
Symbol::Symtab { .. } => None,
}
}
pub fn lineno(&self) -> Option<u32> {
match self {
Symbol::Frame { location, .. } => location.as_ref()?.line,
Symbol::Symtab { .. } => None,
}
}
pub fn colno(&self) -> Option<u32> {
match self {
Symbol::Frame { location, .. } => location.as_ref()?.column,
Symbol::Symtab { .. } => None,
}
}
}
| {
// A very small, very simple LRU cache for debug info mappings.
//
// The hit rate should be very high, since the typical stack doesn't cross
// between many shared libraries.
//
// The `addr2line::Context` structures are pretty expensive to create. Its
// cost is expected to be amortized by subsequent `locate` queries, which
// leverage the structures built when constructing `addr2line::Context`s to
// get nice speedups. If we didn't have this cache, that amortization would
// never happen, and symbolicating backtraces would be ssssllllooooowwww.
static mut MAPPINGS_CACHE: Option<Cache> = None;
f(MAPPINGS_CACHE.get_or_insert_with(|| Cache::new()))
} | identifier_body |
gimli.rs | //! Support for symbolication using the `gimli` crate on crates.io
//!
//! This implementation is largely a work in progress and is off by default for
//! all platforms, but it's hoped to be developed over time! Long-term this is
//! intended to wholesale replace the `libbacktrace.rs` implementation.
use self::gimli::read::EndianSlice;
use self::gimli::LittleEndian as Endian;
use self::mmap::Mmap;
use self::stash::Stash;
use super::BytesOrWideString;
use super::ResolveWhat;
use super::SymbolName;
use addr2line::gimli;
use core::convert::TryInto;
use core::mem;
use core::u32;
use libc::c_void;
use mystd::ffi::OsString;
use mystd::fs::File;
use mystd::path::Path;
use mystd::prelude::v1::*;
#[cfg(backtrace_in_libstd)]
mod mystd {
pub use crate::*;
}
#[cfg(not(backtrace_in_libstd))]
extern crate std as mystd;
cfg_if::cfg_if! {
if #[cfg(windows)] {
#[path = "gimli/mmap_windows.rs"]
mod mmap;
} else if #[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "openbsd",
target_os = "solaris",
))] {
#[path = "gimli/mmap_unix.rs"]
mod mmap;
} else {
#[path = "gimli/mmap_fake.rs"]
mod mmap;
}
}
mod stash;
const MAPPINGS_CACHE_SIZE: usize = 4;
struct Context<'a> {
dwarf: addr2line::Context<EndianSlice<'a, Endian>>,
object: Object<'a>,
}
struct Mapping {
// 'static lifetime is a lie to hack around lack of support for self-referential structs.
cx: Context<'static>,
_map: Mmap,
_stash: Stash,
}
fn cx<'data>(stash: &'data Stash, object: Object<'data>) -> Option<Context<'data>> {
fn load_section<'data, S>(stash: &'data Stash, obj: &Object<'data>) -> S
where
S: gimli::Section<gimli::EndianSlice<'data, Endian>>,
{
let data = obj.section(stash, S::section_name()).unwrap_or(&[]);
S::from(EndianSlice::new(data, Endian))
}
let dwarf = addr2line::Context::from_sections(
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
gimli::EndianSlice::new(&[], Endian),
)
.ok()?;
Some(Context { dwarf, object })
}
macro_rules! mk {
(Mapping { $map:expr, $inner:expr, $stash:expr }) => {{
fn assert_lifetimes<'a>(_: &'a Mmap, _: &Context<'a>, _: &'a Stash) {}
assert_lifetimes(&$map, &$inner, &$stash);
Mapping {
// Convert to 'static lifetimes since the symbols should
// only borrow `map` and `stash` and we're preserving them below.
cx: unsafe { core::mem::transmute::<Context<'_>, Context<'static>>($inner) },
_map: $map,
_stash: $stash,
}
}};
}
fn mmap(path: &Path) -> Option<Mmap> {
let file = File::open(path).ok()?;
let len = file.metadata().ok()?.len().try_into().ok()?;
unsafe { Mmap::map(&file, len) }
}
cfg_if::cfg_if! {
if #[cfg(windows)] {
use core::mem::MaybeUninit;
use super::super::windows::*;
use mystd::os::windows::prelude::*;
use alloc::vec;
mod coff;
use self::coff::Object;
// For loading native libraries on Windows, see some discussion on
// rust-lang/rust#71060 for the various strategies here.
fn native_libraries() -> Vec<Library> {
let mut ret = Vec::new();
unsafe { add_loaded_images(&mut ret); }
return ret;
}
unsafe fn add_loaded_images(ret: &mut Vec<Library>) {
let snap = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, 0);
if snap == INVALID_HANDLE_VALUE {
return;
}
let mut me = MaybeUninit::<MODULEENTRY32W>::zeroed().assume_init();
me.dwSize = mem::size_of_val(&me) as DWORD;
if Module32FirstW(snap, &mut me) == TRUE {
loop {
if let Some(lib) = load_library(&me) {
ret.push(lib);
}
if Module32NextW(snap, &mut me) != TRUE {
break;
}
}
}
CloseHandle(snap);
}
unsafe fn load_library(me: &MODULEENTRY32W) -> Option<Library> {
let pos = me
.szExePath
.iter()
.position(|i| *i == 0)
.unwrap_or(me.szExePath.len());
let name = OsString::from_wide(&me.szExePath[..pos]);
// MinGW libraries currently don't support ASLR
// (rust-lang/rust#16514), but DLLs can still be relocated around in
// the address space. It appears that addresses in debug info are
// all as-if this library was loaded at its "image base", which is a
// field in its COFF file headers. Since this is what debuginfo
// seems to list we parse the symbol table and store addresses as if
// the library was loaded at "image base" as well.
//
// The library may not be loaded at "image base", however.
// (presumably something else may be loaded there?) This is where
// the `bias` field comes into play, and we need to figure out the
// value of `bias` here. Unfortunately though it's not clear how to
// acquire this from a loaded module. What we do have, however, is
// the actual load address (`modBaseAddr`).
//
// As a bit of a cop-out for now we mmap the file, read the file
// header information, then drop the mmap. This is wasteful because
// we'll probably reopen the mmap later, but this should work well
// enough for now.
//
// Once we have the `image_base` (desired load location) and the
// `base_addr` (actual load location) we can fill in the `bias`
// (difference between the actual and desired) and then the stated
// address of each segment is the `image_base` since that's what the
// file says.
//
// For now it appears that unlike ELF/MachO we can make do with one
// segment per library, using `modBaseSize` as the whole size.
let mmap = mmap(name.as_ref())?;
let image_base = coff::get_image_base(&mmap)?;
let base_addr = me.modBaseAddr as usize;
Some(Library {
name,
bias: base_addr.wrapping_sub(image_base),
segments: vec![LibrarySegment {
stated_virtual_memory_address: image_base,
len: me.modBaseSize as usize,
}],
})
}
} else if #[cfg(target_os = "macos")] {
// macOS uses the Mach-O file format and uses DYLD-specific APIs to
// load a list of native libraries that are part of the appplication.
use mystd::os::unix::prelude::*;
use mystd::ffi::{OsStr, CStr};
mod macho;
use self::macho::Object;
#[allow(deprecated)]
fn native_libraries() -> Vec<Library> {
let mut ret = Vec::new();
let images = unsafe { libc::_dyld_image_count() };
for i in 0..images {
ret.extend(native_library(i));
}
return ret;
}
#[allow(deprecated)]
fn native_library(i: u32) -> Option<Library> {
use object::macho;
use object::read::macho::{MachHeader, Segment};
use object::{Bytes, NativeEndian};
// Fetch the name of this library which corresponds to the path of
// where to load it as well.
let name = unsafe {
let name = libc::_dyld_get_image_name(i);
if name.is_null() {
return None;
}
CStr::from_ptr(name)
};
// Load the image header of this library and delegate to `object` to
// parse all the load commands so we can figure out all the segments
// involved here.
let (mut load_commands, endian) = unsafe {
let header = libc::_dyld_get_image_header(i);
if header.is_null() {
return None;
}
match (*header).magic {
macho::MH_MAGIC => {
let endian = NativeEndian;
let header = &*(header as *const macho::MachHeader32<NativeEndian>);
let data = core::slice::from_raw_parts(
header as *const _ as *const u8,
mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize
);
(header.load_commands(endian, Bytes(data)).ok()?, endian)
}
macho::MH_MAGIC_64 => {
let endian = NativeEndian;
let header = &*(header as *const macho::MachHeader64<NativeEndian>);
let data = core::slice::from_raw_parts(
header as *const _ as *const u8,
mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize
);
(header.load_commands(endian, Bytes(data)).ok()?, endian)
}
_ => return None,
}
};
// Iterate over the segments and register known regions for segments
// that we find. Additionally record information bout text segments
// for processing later, see comments below.
let mut segments = Vec::new();
let mut first_text = 0;
let mut text_fileoff_zero = false;
while let Some(cmd) = load_commands.next().ok()? {
if let Some((seg, _)) = cmd.segment_32().ok()? {
if seg.name() == b"__TEXT" {
first_text = segments.len();
if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 {
text_fileoff_zero = true;
}
}
segments.push(LibrarySegment {
len: seg.vmsize(endian).try_into().ok()?,
stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?,
});
}
if let Some((seg, _)) = cmd.segment_64().ok()? {
if seg.name() == b"__TEXT" {
first_text = segments.len();
if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 {
text_fileoff_zero = true;
}
}
segments.push(LibrarySegment {
len: seg.vmsize(endian).try_into().ok()?,
stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?,
});
}
}
// Determine the "slide" for this library which ends up being the
// bias we use to figure out where in memory objects are loaded.
// This is a bit of a weird computation though and is the result of
// trying a few things in the wild and seeing what sticks.
//
// The general idea is that the `bias` plus a segment's
// `stated_virtual_memory_address` is going to be where in the
// actual address space the segment resides. The other thing we rely
// on though is that a real address minus the `bias` is the index to
// look up in the symbol table and debuginfo.
//
// It turns out, though, that for system loaded libraries these
// calculations are incorrect. For native executables, however, it
// appears correct. Lifting some logic from LLDB's source it has
// some special-casing for the first `__TEXT` section loaded from
// file offset 0 with a nonzero size. For whatever reason when this
// is present it appears to mean that the symbol table is relative
// to just the vmaddr slide for the library. If it's *not* present
// then the symbol table is relative to the the vmaddr slide plus
// the segment's stated address.
//
// To handle this situation if we *don't* find a text section at
// file offset zero then we increase the bias by the first text
// sections's stated address and decrease all stated addresses by
// that amount as well. That way the symbol table is always appears
// relative to the library's bias amount. This appears to have the
// right results for symbolizing via the symbol table.
//
// Honestly I'm not entirely sure whether this is right or if
// there's something else that should indicate how to do this. For
// now though this seems to work well enough (?) and we should
// always be able to tweak this over time if necessary.
//
// For some more information see #318
let mut slide = unsafe { libc::_dyld_get_image_vmaddr_slide(i) as usize };
if !text_fileoff_zero {
let adjust = segments[first_text].stated_virtual_memory_address;
for segment in segments.iter_mut() {
segment.stated_virtual_memory_address -= adjust;
}
slide += adjust;
}
Some(Library {
name: OsStr::from_bytes(name.to_bytes()).to_owned(),
segments,
bias: slide,
})
}
} else if #[cfg(any(
target_os = "linux",
target_os = "fuchsia",
))] {
// Other Unix (e.g. Linux) platforms use ELF as an object file format
// and typically implement an API called `dl_iterate_phdr` to load
// native libraries.
use mystd::os::unix::prelude::*;
use mystd::ffi::{OsStr, CStr};
mod elf;
use self::elf::Object;
fn native_libraries() -> Vec<Library> {
let mut ret = Vec::new();
unsafe {
libc::dl_iterate_phdr(Some(callback), &mut ret as *mut _ as *mut _);
}
return ret;
}
unsafe extern "C" fn callback(
info: *mut libc::dl_phdr_info,
_size: libc::size_t,
vec: *mut libc::c_void,
) -> libc::c_int {
let libs = &mut *(vec as *mut Vec<Library>);
let name = if (*info).dlpi_name.is_null() || *(*info).dlpi_name == 0{
if libs.is_empty() {
mystd::env::current_exe().map(|e| e.into()).unwrap_or_default()
} else {
OsString::new()
}
} else {
let bytes = CStr::from_ptr((*info).dlpi_name).to_bytes();
OsStr::from_bytes(bytes).to_owned()
};
let headers = core::slice::from_raw_parts((*info).dlpi_phdr, (*info).dlpi_phnum as usize);
libs.push(Library {
name,
segments: headers
.iter()
.map(|header| LibrarySegment {
len: (*header).p_memsz as usize,
stated_virtual_memory_address: (*header).p_vaddr as usize,
})
.collect(),
bias: (*info).dlpi_addr as usize,
});
0
}
} else if #[cfg(target_env = "libnx")] {
// DevkitA64 doesn't natively support debug info, but the build system will place debug
// info at the path `romfs:/debug_info.elf`.
mod elf;
use self::elf::Object;
fn native_libraries() -> Vec<Library> {
extern "C" {
static __start__: u8;
}
let bias = unsafe { &__start__ } as *const u8 as usize;
let mut ret = Vec::new();
let mut segments = Vec::new();
segments.push(LibrarySegment {
stated_virtual_memory_address: 0,
len: usize::max_value() - bias,
});
let path = "romfs:/debug_info.elf";
ret.push(Library {
name: path.into(),
segments,
bias,
});
ret
}
} else {
// Everything else should use ELF, but doesn't know how to load native
// libraries.
use mystd::os::unix::prelude::*;
mod elf;
use self::elf::Object;
fn native_libraries() -> Vec<Library> {
Vec::new()
}
}
}
#[derive(Default)]
struct Cache {
/// All known shared libraries that have been loaded.
libraries: Vec<Library>,
/// Mappings cache where we retain parsed dwarf information.
///
/// This list has a fixed capacity for its entire liftime which never
/// increases. The `usize` element of each pair is an index into `libraries`
/// above where `usize::max_value()` represents the current executable. The
/// `Mapping` is corresponding parsed dwarf information.
/// | }
struct Library {
name: OsString,
/// Segments of this library loaded into memory, and where they're loaded.
segments: Vec<LibrarySegment>,
/// The "bias" of this library, typically where it's loaded into memory.
/// This value is added to each segment's stated address to get the actual
/// virtual memory address that the segment is loaded into. Additionally
/// this bias is subtracted from real virtual memory addresses to index into
/// debuginfo and the symbol table.
bias: usize,
}
struct LibrarySegment {
/// The stated address of this segment in the object file. This is not
/// actually where the segment is loaded, but rather this address plus the
/// containing library's `bias` is where to find it.
stated_virtual_memory_address: usize,
/// The size of ths segment in memory.
len: usize,
}
// unsafe because this is required to be externally synchronized
pub unsafe fn clear_symbol_cache() {
Cache::with_global(|cache| cache.mappings.clear());
}
impl Cache {
fn new() -> Cache {
Cache {
mappings: Vec::with_capacity(MAPPINGS_CACHE_SIZE),
libraries: native_libraries(),
}
}
// unsafe because this is required to be externally synchronized
unsafe fn with_global(f: impl FnOnce(&mut Self)) {
// A very small, very simple LRU cache for debug info mappings.
//
// The hit rate should be very high, since the typical stack doesn't cross
// between many shared libraries.
//
// The `addr2line::Context` structures are pretty expensive to create. Its
// cost is expected to be amortized by subsequent `locate` queries, which
// leverage the structures built when constructing `addr2line::Context`s to
// get nice speedups. If we didn't have this cache, that amortization would
// never happen, and symbolicating backtraces would be ssssllllooooowwww.
static mut MAPPINGS_CACHE: Option<Cache> = None;
f(MAPPINGS_CACHE.get_or_insert_with(|| Cache::new()))
}
fn avma_to_svma(&self, addr: *const u8) -> Option<(usize, *const u8)> {
self.libraries
.iter()
.enumerate()
.filter_map(|(i, lib)| {
// First up, test if this `lib` has any segment containing the
// `addr` (handling relocation). If this check passes then we
// can continue below and actually translate the address.
//
// Note that we're using `wrapping_add` here to avoid overflow
// checks. It's been seen in the wild that the SVMA + bias
// computation overflows. It seems a bit odd that would happen
// but there's not a huge amount we can do about it other than
// probably just ignore those segments since they're likely
// pointing off into space. This originally came up in
// rust-lang/backtrace-rs#329.
if !lib.segments.iter().any(|s| {
let svma = s.stated_virtual_memory_address;
let start = svma.wrapping_add(lib.bias);
let end = start.wrapping_add(s.len);
let address = addr as usize;
start <= address && address < end
}) {
return None;
}
// Now that we know `lib` contains `addr`, we can offset with
// the bias to find the stated virutal memory address.
let svma = (addr as usize).wrapping_sub(lib.bias);
Some((i, svma as *const u8))
})
.next()
}
fn mapping_for_lib<'a>(&'a mut self, lib: usize) -> Option<&'a Context<'a>> {
let idx = self.mappings.iter().position(|(idx, _)| *idx == lib);
// Invariant: after this conditional completes without early returning
// from an error, the cache entry for this path is at index 0.
if let Some(idx) = idx {
// When the mapping is already in the cache, move it to the front.
if idx != 0 {
let entry = self.mappings.remove(idx);
self.mappings.insert(0, entry);
}
} else {
// When the mapping is not in the cache, create a new mapping,
// insert it into the front of the cache, and evict the oldest cache
// entry if necessary.
let name = &self.libraries[lib].name;
let mapping = Mapping::new(name.as_ref())?;
if self.mappings.len() == MAPPINGS_CACHE_SIZE {
self.mappings.pop();
}
self.mappings.insert(0, (lib, mapping));
}
let cx: &'a Context<'static> = &self.mappings[0].1.cx;
// don't leak the `'static` lifetime, make sure it's scoped to just
// ourselves
Some(unsafe { mem::transmute::<&'a Context<'static>, &'a Context<'a>>(cx) })
}
}
pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) {
let addr = what.address_or_ip();
let mut call = |sym: Symbol<'_>| {
// Extend the lifetime of `sym` to `'static` since we are unfortunately
// required to here, but it's ony ever going out as a reference so no
// reference to it should be persisted beyond this frame anyway.
let sym = mem::transmute::<Symbol<'_>, Symbol<'static>>(sym);
(cb)(&super::Symbol { inner: sym });
};
Cache::with_global(|cache| {
let (lib, addr) = match cache.avma_to_svma(addr as *const u8) {
Some(pair) => pair,
None => return,
};
// Finally, get a cached mapping or create a new mapping for this file, and
// evaluate the DWARF info to find the file/line/name for this address.
let cx = match cache.mapping_for_lib(lib) {
Some(cx) => cx,
None => return,
};
let mut any_frames = false;
if let Ok(mut frames) = cx.dwarf.find_frames(addr as u64) {
while let Ok(Some(frame)) = frames.next() {
any_frames = true;
call(Symbol::Frame {
addr: addr as *mut c_void,
location: frame.location,
name: frame.function.map(|f| f.name.slice()),
});
}
}
if !any_frames {
if let Some(name) = cx.object.search_symtab(addr as u64) {
call(Symbol::Symtab {
addr: addr as *mut c_void,
name,
});
}
}
});
}
pub enum Symbol<'a> {
/// We were able to locate frame information for this symbol, and
/// `addr2line`'s frame internally has all the nitty gritty details.
Frame {
addr: *mut c_void,
location: Option<addr2line::Location<'a>>,
name: Option<&'a [u8]>,
},
/// Couldn't find debug information, but we found it in the symbol table of
/// the elf executable.
Symtab { addr: *mut c_void, name: &'a [u8] },
}
impl Symbol<'_> {
pub fn name(&self) -> Option<SymbolName<'_>> {
match self {
Symbol::Frame { name, .. } => {
let name = name.as_ref()?;
Some(SymbolName::new(name))
}
Symbol::Symtab { name, .. } => Some(SymbolName::new(name)),
}
}
pub fn addr(&self) -> Option<*mut c_void> {
match self {
Symbol::Frame { addr, .. } => Some(*addr),
Symbol::Symtab { .. } => None,
}
}
pub fn filename_raw(&self) -> Option<BytesOrWideString<'_>> {
match self {
Symbol::Frame { location, .. } => {
let file = location.as_ref()?.file?;
Some(BytesOrWideString::Bytes(file.as_bytes()))
}
Symbol::Symtab { .. } => None,
}
}
pub fn filename(&self) -> Option<&Path> {
match self {
Symbol::Frame { location, .. } => {
let file = location.as_ref()?.file?;
Some(Path::new(file))
}
Symbol::Symtab { .. } => None,
}
}
pub fn lineno(&self) -> Option<u32> {
match self {
Symbol::Frame { location, .. } => location.as_ref()?.line,
Symbol::Symtab { .. } => None,
}
}
pub fn colno(&self) -> Option<u32> {
match self {
Symbol::Frame { location, .. } => location.as_ref()?.column,
Symbol::Symtab { .. } => None,
}
}
} | /// Note that this is basically an LRU cache and we'll be shifting things
/// around in here as we symbolize addresses.
mappings: Vec<(usize, Mapping)>, | random_line_split |
gimli.rs | //! Support for symbolication using the `gimli` crate on crates.io
//!
//! This implementation is largely a work in progress and is off by default for
//! all platforms, but it's hoped to be developed over time! Long-term this is
//! intended to wholesale replace the `libbacktrace.rs` implementation.
use self::gimli::read::EndianSlice;
use self::gimli::LittleEndian as Endian;
use self::mmap::Mmap;
use self::stash::Stash;
use super::BytesOrWideString;
use super::ResolveWhat;
use super::SymbolName;
use addr2line::gimli;
use core::convert::TryInto;
use core::mem;
use core::u32;
use libc::c_void;
use mystd::ffi::OsString;
use mystd::fs::File;
use mystd::path::Path;
use mystd::prelude::v1::*;
#[cfg(backtrace_in_libstd)]
mod mystd {
pub use crate::*;
}
#[cfg(not(backtrace_in_libstd))]
extern crate std as mystd;
cfg_if::cfg_if! {
if #[cfg(windows)] {
#[path = "gimli/mmap_windows.rs"]
mod mmap;
} else if #[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "openbsd",
target_os = "solaris",
))] {
#[path = "gimli/mmap_unix.rs"]
mod mmap;
} else {
#[path = "gimli/mmap_fake.rs"]
mod mmap;
}
}
mod stash;
const MAPPINGS_CACHE_SIZE: usize = 4;
struct Context<'a> {
dwarf: addr2line::Context<EndianSlice<'a, Endian>>,
object: Object<'a>,
}
struct Mapping {
// 'static lifetime is a lie to hack around lack of support for self-referential structs.
cx: Context<'static>,
_map: Mmap,
_stash: Stash,
}
fn cx<'data>(stash: &'data Stash, object: Object<'data>) -> Option<Context<'data>> {
fn load_section<'data, S>(stash: &'data Stash, obj: &Object<'data>) -> S
where
S: gimli::Section<gimli::EndianSlice<'data, Endian>>,
{
let data = obj.section(stash, S::section_name()).unwrap_or(&[]);
S::from(EndianSlice::new(data, Endian))
}
let dwarf = addr2line::Context::from_sections(
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
load_section(stash, &object),
gimli::EndianSlice::new(&[], Endian),
)
.ok()?;
Some(Context { dwarf, object })
}
macro_rules! mk {
(Mapping { $map:expr, $inner:expr, $stash:expr }) => {{
fn assert_lifetimes<'a>(_: &'a Mmap, _: &Context<'a>, _: &'a Stash) {}
assert_lifetimes(&$map, &$inner, &$stash);
Mapping {
// Convert to 'static lifetimes since the symbols should
// only borrow `map` and `stash` and we're preserving them below.
cx: unsafe { core::mem::transmute::<Context<'_>, Context<'static>>($inner) },
_map: $map,
_stash: $stash,
}
}};
}
fn mmap(path: &Path) -> Option<Mmap> {
let file = File::open(path).ok()?;
let len = file.metadata().ok()?.len().try_into().ok()?;
unsafe { Mmap::map(&file, len) }
}
cfg_if::cfg_if! {
if #[cfg(windows)] {
use core::mem::MaybeUninit;
use super::super::windows::*;
use mystd::os::windows::prelude::*;
use alloc::vec;
mod coff;
use self::coff::Object;
// For loading native libraries on Windows, see some discussion on
// rust-lang/rust#71060 for the various strategies here.
fn native_libraries() -> Vec<Library> {
let mut ret = Vec::new();
unsafe { add_loaded_images(&mut ret); }
return ret;
}
unsafe fn add_loaded_images(ret: &mut Vec<Library>) {
let snap = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, 0);
if snap == INVALID_HANDLE_VALUE {
return;
}
let mut me = MaybeUninit::<MODULEENTRY32W>::zeroed().assume_init();
me.dwSize = mem::size_of_val(&me) as DWORD;
if Module32FirstW(snap, &mut me) == TRUE {
loop {
if let Some(lib) = load_library(&me) {
ret.push(lib);
}
if Module32NextW(snap, &mut me) != TRUE {
break;
}
}
}
CloseHandle(snap);
}
unsafe fn load_library(me: &MODULEENTRY32W) -> Option<Library> {
let pos = me
.szExePath
.iter()
.position(|i| *i == 0)
.unwrap_or(me.szExePath.len());
let name = OsString::from_wide(&me.szExePath[..pos]);
// MinGW libraries currently don't support ASLR
// (rust-lang/rust#16514), but DLLs can still be relocated around in
// the address space. It appears that addresses in debug info are
// all as-if this library was loaded at its "image base", which is a
// field in its COFF file headers. Since this is what debuginfo
// seems to list we parse the symbol table and store addresses as if
// the library was loaded at "image base" as well.
//
// The library may not be loaded at "image base", however.
// (presumably something else may be loaded there?) This is where
// the `bias` field comes into play, and we need to figure out the
// value of `bias` here. Unfortunately though it's not clear how to
// acquire this from a loaded module. What we do have, however, is
// the actual load address (`modBaseAddr`).
//
// As a bit of a cop-out for now we mmap the file, read the file
// header information, then drop the mmap. This is wasteful because
// we'll probably reopen the mmap later, but this should work well
// enough for now.
//
// Once we have the `image_base` (desired load location) and the
// `base_addr` (actual load location) we can fill in the `bias`
// (difference between the actual and desired) and then the stated
// address of each segment is the `image_base` since that's what the
// file says.
//
// For now it appears that unlike ELF/MachO we can make do with one
// segment per library, using `modBaseSize` as the whole size.
let mmap = mmap(name.as_ref())?;
let image_base = coff::get_image_base(&mmap)?;
let base_addr = me.modBaseAddr as usize;
Some(Library {
name,
bias: base_addr.wrapping_sub(image_base),
segments: vec![LibrarySegment {
stated_virtual_memory_address: image_base,
len: me.modBaseSize as usize,
}],
})
}
} else if #[cfg(target_os = "macos")] {
// macOS uses the Mach-O file format and uses DYLD-specific APIs to
// load a list of native libraries that are part of the appplication.
use mystd::os::unix::prelude::*;
use mystd::ffi::{OsStr, CStr};
mod macho;
use self::macho::Object;
#[allow(deprecated)]
fn native_libraries() -> Vec<Library> {
let mut ret = Vec::new();
let images = unsafe { libc::_dyld_image_count() };
for i in 0..images {
ret.extend(native_library(i));
}
return ret;
}
#[allow(deprecated)]
fn native_library(i: u32) -> Option<Library> {
use object::macho;
use object::read::macho::{MachHeader, Segment};
use object::{Bytes, NativeEndian};
// Fetch the name of this library which corresponds to the path of
// where to load it as well.
let name = unsafe {
let name = libc::_dyld_get_image_name(i);
if name.is_null() {
return None;
}
CStr::from_ptr(name)
};
// Load the image header of this library and delegate to `object` to
// parse all the load commands so we can figure out all the segments
// involved here.
let (mut load_commands, endian) = unsafe {
let header = libc::_dyld_get_image_header(i);
if header.is_null() {
return None;
}
match (*header).magic {
macho::MH_MAGIC => {
let endian = NativeEndian;
let header = &*(header as *const macho::MachHeader32<NativeEndian>);
let data = core::slice::from_raw_parts(
header as *const _ as *const u8,
mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize
);
(header.load_commands(endian, Bytes(data)).ok()?, endian)
}
macho::MH_MAGIC_64 => {
let endian = NativeEndian;
let header = &*(header as *const macho::MachHeader64<NativeEndian>);
let data = core::slice::from_raw_parts(
header as *const _ as *const u8,
mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize
);
(header.load_commands(endian, Bytes(data)).ok()?, endian)
}
_ => return None,
}
};
// Iterate over the segments and register known regions for segments
// that we find. Additionally record information bout text segments
// for processing later, see comments below.
let mut segments = Vec::new();
let mut first_text = 0;
let mut text_fileoff_zero = false;
while let Some(cmd) = load_commands.next().ok()? {
if let Some((seg, _)) = cmd.segment_32().ok()? {
if seg.name() == b"__TEXT" {
first_text = segments.len();
if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 {
text_fileoff_zero = true;
}
}
segments.push(LibrarySegment {
len: seg.vmsize(endian).try_into().ok()?,
stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?,
});
}
if let Some((seg, _)) = cmd.segment_64().ok()? {
if seg.name() == b"__TEXT" {
first_text = segments.len();
if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 {
text_fileoff_zero = true;
}
}
segments.push(LibrarySegment {
len: seg.vmsize(endian).try_into().ok()?,
stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?,
});
}
}
// Determine the "slide" for this library which ends up being the
// bias we use to figure out where in memory objects are loaded.
// This is a bit of a weird computation though and is the result of
// trying a few things in the wild and seeing what sticks.
//
// The general idea is that the `bias` plus a segment's
// `stated_virtual_memory_address` is going to be where in the
// actual address space the segment resides. The other thing we rely
// on though is that a real address minus the `bias` is the index to
// look up in the symbol table and debuginfo.
//
// It turns out, though, that for system loaded libraries these
// calculations are incorrect. For native executables, however, it
// appears correct. Lifting some logic from LLDB's source it has
// some special-casing for the first `__TEXT` section loaded from
// file offset 0 with a nonzero size. For whatever reason when this
// is present it appears to mean that the symbol table is relative
// to just the vmaddr slide for the library. If it's *not* present
// then the symbol table is relative to the the vmaddr slide plus
// the segment's stated address.
//
// To handle this situation if we *don't* find a text section at
// file offset zero then we increase the bias by the first text
// sections's stated address and decrease all stated addresses by
// that amount as well. That way the symbol table is always appears
// relative to the library's bias amount. This appears to have the
// right results for symbolizing via the symbol table.
//
// Honestly I'm not entirely sure whether this is right or if
// there's something else that should indicate how to do this. For
// now though this seems to work well enough (?) and we should
// always be able to tweak this over time if necessary.
//
// For some more information see #318
let mut slide = unsafe { libc::_dyld_get_image_vmaddr_slide(i) as usize };
if !text_fileoff_zero {
let adjust = segments[first_text].stated_virtual_memory_address;
for segment in segments.iter_mut() {
segment.stated_virtual_memory_address -= adjust;
}
slide += adjust;
}
Some(Library {
name: OsStr::from_bytes(name.to_bytes()).to_owned(),
segments,
bias: slide,
})
}
} else if #[cfg(any(
target_os = "linux",
target_os = "fuchsia",
))] {
// Other Unix (e.g. Linux) platforms use ELF as an object file format
// and typically implement an API called `dl_iterate_phdr` to load
// native libraries.
use mystd::os::unix::prelude::*;
use mystd::ffi::{OsStr, CStr};
mod elf;
use self::elf::Object;
fn native_libraries() -> Vec<Library> {
let mut ret = Vec::new();
unsafe {
libc::dl_iterate_phdr(Some(callback), &mut ret as *mut _ as *mut _);
}
return ret;
}
unsafe extern "C" fn callback(
info: *mut libc::dl_phdr_info,
_size: libc::size_t,
vec: *mut libc::c_void,
) -> libc::c_int {
let libs = &mut *(vec as *mut Vec<Library>);
let name = if (*info).dlpi_name.is_null() || *(*info).dlpi_name == 0{
if libs.is_empty() {
mystd::env::current_exe().map(|e| e.into()).unwrap_or_default()
} else {
OsString::new()
}
} else {
let bytes = CStr::from_ptr((*info).dlpi_name).to_bytes();
OsStr::from_bytes(bytes).to_owned()
};
let headers = core::slice::from_raw_parts((*info).dlpi_phdr, (*info).dlpi_phnum as usize);
libs.push(Library {
name,
segments: headers
.iter()
.map(|header| LibrarySegment {
len: (*header).p_memsz as usize,
stated_virtual_memory_address: (*header).p_vaddr as usize,
})
.collect(),
bias: (*info).dlpi_addr as usize,
});
0
}
} else if #[cfg(target_env = "libnx")] {
// DevkitA64 doesn't natively support debug info, but the build system will place debug
// info at the path `romfs:/debug_info.elf`.
mod elf;
use self::elf::Object;
fn native_libraries() -> Vec<Library> {
extern "C" {
static __start__: u8;
}
let bias = unsafe { &__start__ } as *const u8 as usize;
let mut ret = Vec::new();
let mut segments = Vec::new();
segments.push(LibrarySegment {
stated_virtual_memory_address: 0,
len: usize::max_value() - bias,
});
let path = "romfs:/debug_info.elf";
ret.push(Library {
name: path.into(),
segments,
bias,
});
ret
}
} else {
// Everything else should use ELF, but doesn't know how to load native
// libraries.
use mystd::os::unix::prelude::*;
mod elf;
use self::elf::Object;
fn native_libraries() -> Vec<Library> {
Vec::new()
}
}
}
#[derive(Default)]
struct Cache {
/// All known shared libraries that have been loaded.
libraries: Vec<Library>,
/// Mappings cache where we retain parsed dwarf information.
///
/// This list has a fixed capacity for its entire liftime which never
/// increases. The `usize` element of each pair is an index into `libraries`
/// above where `usize::max_value()` represents the current executable. The
/// `Mapping` is corresponding parsed dwarf information.
///
/// Note that this is basically an LRU cache and we'll be shifting things
/// around in here as we symbolize addresses.
mappings: Vec<(usize, Mapping)>,
}
struct Library {
name: OsString,
/// Segments of this library loaded into memory, and where they're loaded.
segments: Vec<LibrarySegment>,
/// The "bias" of this library, typically where it's loaded into memory.
/// This value is added to each segment's stated address to get the actual
/// virtual memory address that the segment is loaded into. Additionally
/// this bias is subtracted from real virtual memory addresses to index into
/// debuginfo and the symbol table.
bias: usize,
}
struct LibrarySegment {
/// The stated address of this segment in the object file. This is not
/// actually where the segment is loaded, but rather this address plus the
/// containing library's `bias` is where to find it.
stated_virtual_memory_address: usize,
/// The size of ths segment in memory.
len: usize,
}
// unsafe because this is required to be externally synchronized
pub unsafe fn clear_symbol_cache() {
Cache::with_global(|cache| cache.mappings.clear());
}
impl Cache {
fn new() -> Cache {
Cache {
mappings: Vec::with_capacity(MAPPINGS_CACHE_SIZE),
libraries: native_libraries(),
}
}
// unsafe because this is required to be externally synchronized
unsafe fn with_global(f: impl FnOnce(&mut Self)) {
// A very small, very simple LRU cache for debug info mappings.
//
// The hit rate should be very high, since the typical stack doesn't cross
// between many shared libraries.
//
// The `addr2line::Context` structures are pretty expensive to create. Its
// cost is expected to be amortized by subsequent `locate` queries, which
// leverage the structures built when constructing `addr2line::Context`s to
// get nice speedups. If we didn't have this cache, that amortization would
// never happen, and symbolicating backtraces would be ssssllllooooowwww.
static mut MAPPINGS_CACHE: Option<Cache> = None;
f(MAPPINGS_CACHE.get_or_insert_with(|| Cache::new()))
}
fn avma_to_svma(&self, addr: *const u8) -> Option<(usize, *const u8)> {
self.libraries
.iter()
.enumerate()
.filter_map(|(i, lib)| {
// First up, test if this `lib` has any segment containing the
// `addr` (handling relocation). If this check passes then we
// can continue below and actually translate the address.
//
// Note that we're using `wrapping_add` here to avoid overflow
// checks. It's been seen in the wild that the SVMA + bias
// computation overflows. It seems a bit odd that would happen
// but there's not a huge amount we can do about it other than
// probably just ignore those segments since they're likely
// pointing off into space. This originally came up in
// rust-lang/backtrace-rs#329.
if !lib.segments.iter().any(|s| {
let svma = s.stated_virtual_memory_address;
let start = svma.wrapping_add(lib.bias);
let end = start.wrapping_add(s.len);
let address = addr as usize;
start <= address && address < end
}) {
return None;
}
// Now that we know `lib` contains `addr`, we can offset with
// the bias to find the stated virutal memory address.
let svma = (addr as usize).wrapping_sub(lib.bias);
Some((i, svma as *const u8))
})
.next()
}
fn | <'a>(&'a mut self, lib: usize) -> Option<&'a Context<'a>> {
let idx = self.mappings.iter().position(|(idx, _)| *idx == lib);
// Invariant: after this conditional completes without early returning
// from an error, the cache entry for this path is at index 0.
if let Some(idx) = idx {
// When the mapping is already in the cache, move it to the front.
if idx != 0 {
let entry = self.mappings.remove(idx);
self.mappings.insert(0, entry);
}
} else {
// When the mapping is not in the cache, create a new mapping,
// insert it into the front of the cache, and evict the oldest cache
// entry if necessary.
let name = &self.libraries[lib].name;
let mapping = Mapping::new(name.as_ref())?;
if self.mappings.len() == MAPPINGS_CACHE_SIZE {
self.mappings.pop();
}
self.mappings.insert(0, (lib, mapping));
}
let cx: &'a Context<'static> = &self.mappings[0].1.cx;
// don't leak the `'static` lifetime, make sure it's scoped to just
// ourselves
Some(unsafe { mem::transmute::<&'a Context<'static>, &'a Context<'a>>(cx) })
}
}
pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) {
let addr = what.address_or_ip();
let mut call = |sym: Symbol<'_>| {
// Extend the lifetime of `sym` to `'static` since we are unfortunately
// required to here, but it's ony ever going out as a reference so no
// reference to it should be persisted beyond this frame anyway.
let sym = mem::transmute::<Symbol<'_>, Symbol<'static>>(sym);
(cb)(&super::Symbol { inner: sym });
};
Cache::with_global(|cache| {
let (lib, addr) = match cache.avma_to_svma(addr as *const u8) {
Some(pair) => pair,
None => return,
};
// Finally, get a cached mapping or create a new mapping for this file, and
// evaluate the DWARF info to find the file/line/name for this address.
let cx = match cache.mapping_for_lib(lib) {
Some(cx) => cx,
None => return,
};
let mut any_frames = false;
if let Ok(mut frames) = cx.dwarf.find_frames(addr as u64) {
while let Ok(Some(frame)) = frames.next() {
any_frames = true;
call(Symbol::Frame {
addr: addr as *mut c_void,
location: frame.location,
name: frame.function.map(|f| f.name.slice()),
});
}
}
if !any_frames {
if let Some(name) = cx.object.search_symtab(addr as u64) {
call(Symbol::Symtab {
addr: addr as *mut c_void,
name,
});
}
}
});
}
pub enum Symbol<'a> {
/// We were able to locate frame information for this symbol, and
/// `addr2line`'s frame internally has all the nitty gritty details.
Frame {
addr: *mut c_void,
location: Option<addr2line::Location<'a>>,
name: Option<&'a [u8]>,
},
/// Couldn't find debug information, but we found it in the symbol table of
/// the elf executable.
Symtab { addr: *mut c_void, name: &'a [u8] },
}
impl Symbol<'_> {
pub fn name(&self) -> Option<SymbolName<'_>> {
match self {
Symbol::Frame { name, .. } => {
let name = name.as_ref()?;
Some(SymbolName::new(name))
}
Symbol::Symtab { name, .. } => Some(SymbolName::new(name)),
}
}
pub fn addr(&self) -> Option<*mut c_void> {
match self {
Symbol::Frame { addr, .. } => Some(*addr),
Symbol::Symtab { .. } => None,
}
}
pub fn filename_raw(&self) -> Option<BytesOrWideString<'_>> {
match self {
Symbol::Frame { location, .. } => {
let file = location.as_ref()?.file?;
Some(BytesOrWideString::Bytes(file.as_bytes()))
}
Symbol::Symtab { .. } => None,
}
}
pub fn filename(&self) -> Option<&Path> {
match self {
Symbol::Frame { location, .. } => {
let file = location.as_ref()?.file?;
Some(Path::new(file))
}
Symbol::Symtab { .. } => None,
}
}
pub fn lineno(&self) -> Option<u32> {
match self {
Symbol::Frame { location, .. } => location.as_ref()?.line,
Symbol::Symtab { .. } => None,
}
}
pub fn colno(&self) -> Option<u32> {
match self {
Symbol::Frame { location, .. } => location.as_ref()?.column,
Symbol::Symtab { .. } => None,
}
}
}
| mapping_for_lib | identifier_name |
main.py | # By Monsterovich
# This script reposts user's track from the comments
from soundcloud import Client as Soundcloud
from requests import HTTPError
from time import strftime, time, gmtime
import logging
import os
import sys
import imp
from scgb.database import Database
BOT_VERSION = '1.3.3'
banlist = {
'user': {},
'track': {},
'playlist': {},
}
config = None
db = None
soundcloud = None
should_update_description = False
def bot_init():
global db
global config
# Init log
logging.basicConfig(stream=sys.stdout, level=logging.INFO, datefmt='[%Y-%m-%d %H:%M:%S]', format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# Init config
if len(sys.argv) > 1:
config = imp.load_source('scgb_config', sys.argv[1])
elif os.path.exists('config.py'):
config = imp.load_source('scgb_config', os.path.join(os.getcwd(), 'config.py'))
else:
logging.critical('Please, rename config.py.template to config.py and edit it.\nOr specify a config to load on the command line: py scgb.py <config file>')
sys.exit(1)
# Init database
db = Database(config.stats_database)
# Init banlist
load_banlist()
# Init soundcloud client
init_api()
def init_api():
"""Authenticate with SoundCloud API.
Cache access token in the secrets file."""
global soundcloud
import json
SECRETS_VERSION = 1
# Load secrets file
if os.path.exists(config.token_cache):
with open(config.token_cache, 'r', encoding='utf-8') as f:
secrets = json.load(f)
else:
secrets = {}
# Try to reuse the cached access token
if secrets\
and secrets['version'] == SECRETS_VERSION\
and secrets['access_token_acquired_at'] + secrets['access_token_expires_in'] > time() - 5 * 60\
and secrets['username'] == config.username:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
access_token=secrets['access_token']
)
return
# Get a new access token
logging.info('Getting a new access token')
try:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
username=config.username,
password=config.password
)
except HTTPError as e:
if e.response.status_code == 401:
logging.critical('Incorrect API key, login or password. Please, edit config.py.')
sys.exit(1)
else:
raise
# Save the token
secrets = {
'version': SECRETS_VERSION,
'username': config.username,
'access_token': soundcloud.access_token,
'access_token_acquired_at': time(),
'access_token_expires_in': soundcloud.token.expires_in,
}
with open(config.token_cache, 'w', encoding='utf-8') as f:
secrets = json.dump(secrets, f, indent='\t', ensure_ascii=False)
def load_banlist():
"""Load the banlist."""
# create banlist if it doesn't exist
if not os.path.exists(config.banlistfile):
open(config.banlistfile, 'ab').close()
with open(config.banlistfile, 'r') as file:
for line in file:
line = line.strip()
if line == '' or line.startswith('//'):
continue # skip empty lines and comments
values = line.split(None, 2)
what = values[0]
if what not in ['user', 'track', 'playlist']:
logging.warning('Banlist error: unknown ban type: %s', what)
continue
try:
id = int(values[1])
except ValueError:
logging.warning('Banlist error: %d is not a %s id number', id, what)
continue
if len(values) > 2:
banlist[what][id] = values[2]
else:
banlist[what][id] = "No reason given."
def check_comments():
"""Download all comments and process them."""
# Get the id of the group track
try:
group_track = soundcloud.get('/me/tracks')[config.post_track_id]
except HTTPError as e:
if e.response.status_code == 404:
logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)
sys.exit(1)
else:
raise
# Get the comment list for the group track
comments = soundcloud.get('/tracks/%d/comments' % group_track.id)
if not comments:
logging.info('Nothing found...')
return
# Process each comment and delete it
for comment in reversed(comments):
logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)
response = None
# Try to process the comment
try:
response = process_comment(comment)
except HTTPError as e:
if e.response.status_code == 429:
logging.exception('Failed to repost track: too many requests:')
return
elif e.response.status_code // 100 == 4:
logging.exception('Failed to process comment due to a client request error:')
else:
raise
except Exception as e: # Program crash
logging.exception('Failed to process comment:')
else:
if response:
logging.info('The comment would have this response: %s', response)
else:
logging.info('Comment processed successfully')
# Delete the processed comment
try:
soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))
except HTTPError as e:
if e.response.status_code == 404:
logging.warning('Comment already deleted')
else:
raise |
def process_comment(comment):
"""Process a single comment."""
if not comment.body:
logging.info('Empty URL detected.')
return 'Your comment is empty.'
if comment.user_id in banlist['user']:
logging.info('Banned user id: %d', comment.user_id)
return 'You are banned from this group.'
url = comment.body
action = 'repost'
if url.startswith('!'):
action = 'delete'
url = url[1:]
# Resolve the resource to repost
resource = resolve_resource(url)
if resource:
logging.info('Resolved: %s %d', resource.kind, resource.id)
if resource.kind == 'playlist' and not config.allow_playlists:
logging.info('Playlists are not allowed. Skipping.')
return 'Playlists are not allowed in this group.'
else:
logging.info('Not found')
if not resource or resource.kind not in ('track', 'playlist'):
if config.allow_playlists:
return 'The provided link does not lead to a track or playlist.'
else:
return 'The provided link does not lead to a track.'
resource_type = resource.kind
# Check for ownership
if not config.debug_mode and comment.user_id != resource.user_id:
logging.info('Not the author of the resource')
return 'You must be the author of the {} to post it in this group.'.format(resource_type)
# Is the resource banned?
if resource.id in banlist[resource_type]:
reason = banlist[resource_type][resource.id];
logging.info('This resource is banned: %s', reason)
return 'This track or playlist is banned from this group: ' + reason
# Repost/delete if needed
is_reposted = check_repost_exists(resource_type, resource.id)
if action == 'repost':
# Genre filter
if config.allowed_genres is not None:
genres_lowercase = [ genre.lower() for genre in config.allowed_genres ]
if resource.genre.lower() not in genres_lowercase:
logging.info('Genre not allowed: %s', resource.genre)
return 'This genre is not allowed in this group. Allowed genres are: ' + ', '.join(config.allowed_genres)
# Disable bumps if needed
if not config.allow_bumps and db.has_ever_been_posted(resource_type, resource.id):
logging.info('Bumping is disabled and this resource is present in the database.')
return 'Bumping is not allowed in this group.'
# Enforce minimum bump interval
last_reposted = db.last_repost_time(resource_type, resource.id)
if last_reposted is not None and last_reposted > int(time()) - config.min_bump_interval:
logging.info('This %s was posted %d seconds ago, but minimum bump interval is %d.', resource_type, int(time()) - last_reposted, config.min_bump_interval)
return 'This {} is posted to the group too frequently. Try again later.'.format(resource_type)
# Enforce max posts
last_post_count = db.user_last_posts_count(comment.user_id, config.post_limit_interval)
if last_post_count >= config.post_limit:
logging.info('The user has already made %d reposts.', last_post_count)
return 'You have already made {} posts.'.format(config.post_limit)
# Execute the command
if is_reposted:
logging.info('Bumping:')
group_delete(comment.user_id, resource_type, resource.id)
group_repost(comment.user_id, resource_type, resource.id)
else:
group_repost(comment.user_id, resource_type, resource.id)
request_description_update()
elif action == 'delete':
if is_reposted:
group_delete(comment.user_id, resource_type, resource.id)
request_description_update()
else:
logging.info('Resource already deleted')
else:
assert False, 'Unknown action: ' + repr(action)
def resolve_resource(url):
"""Return the resource object downloaded from url, or None, if not found."""
try:
resource = soundcloud.get('/resolve', url=url)
except HTTPError as e:
if e.response.status_code == 404:
return None
else:
raise
return resource
def check_repost_exists(type, id):
"""Return true if the respost exists, according to soundcloud.
Also update the database if a repost is already deleted
on soundcloud, but is not marked as deleted in the db."""
try:
soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))
return True
except HTTPError as e:
if e.response.status_code == 404:
db.mark_as_deleted(type, id)
return False
else:
raise
def group_repost(user_id, resource_type, resource_id):
"""Repost a resource into the group and update the database."""
logging.info('Reposting %s %d...', resource_type, resource_id)
soundcloud.put('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_repost(user_id, resource_type, resource_id)
db.commit()
def group_delete(user_id, resource_type, resource_id):
"""Delete a resource from the group and update the database."""
logging.info('Deleting %s %d...', resource_type, resource_id)
soundcloud.delete('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_deletion(user_id, resource_type, resource_id)
db.commit()
def request_description_update():
"""Set a flag to update the description once all comments are processed."""
global should_update_description
should_update_description = True
def update_description():
"""Update group description."""
track_count = db.track_count
playlist_count = db.playlist_count
keywords = {
'last_update': strftime("%Y-%m-%d %H:%M:%S", gmtime()),
'bot_version': BOT_VERSION,
'track_count': track_count,
'playlist_count': playlist_count,
'user_count': db.user_count,
'post_count': track_count + playlist_count
}
desc = config.description_template.strip()
for keyword, value in keywords.items():
desc = desc.replace(config.keyword_tag + keyword + config.keyword_tag, str(value))
if config.use_advanced_description == 1:
soundcloud.put('/me', **{ 'user[description]': desc })
elif config.use_advanced_description == 2:
original = soundcloud.get('/me').description
if not original:
return
new_desc, _ = original.split(config.stats_keyword, 1)
new_desc += config.stats_keyword + '\n'
new_desc += desc
soundcloud.put('/me', **{ 'user[description]': new_desc })
else:
logging.warning('Unknown value %d for use_advanced_description', config.use_advanced_description)
return
global should_update_description
should_update_description = False
logging.info('Description updated') |
if config.use_advanced_description and should_update_description:
update_description() | random_line_split |
main.py | # By Monsterovich
# This script reposts user's track from the comments
from soundcloud import Client as Soundcloud
from requests import HTTPError
from time import strftime, time, gmtime
import logging
import os
import sys
import imp
from scgb.database import Database
BOT_VERSION = '1.3.3'
banlist = {
'user': {},
'track': {},
'playlist': {},
}
config = None
db = None
soundcloud = None
should_update_description = False
def bot_init():
global db
global config
# Init log
logging.basicConfig(stream=sys.stdout, level=logging.INFO, datefmt='[%Y-%m-%d %H:%M:%S]', format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# Init config
if len(sys.argv) > 1:
config = imp.load_source('scgb_config', sys.argv[1])
elif os.path.exists('config.py'):
config = imp.load_source('scgb_config', os.path.join(os.getcwd(), 'config.py'))
else:
logging.critical('Please, rename config.py.template to config.py and edit it.\nOr specify a config to load on the command line: py scgb.py <config file>')
sys.exit(1)
# Init database
db = Database(config.stats_database)
# Init banlist
load_banlist()
# Init soundcloud client
init_api()
def init_api():
"""Authenticate with SoundCloud API.
Cache access token in the secrets file."""
global soundcloud
import json
SECRETS_VERSION = 1
# Load secrets file
if os.path.exists(config.token_cache):
with open(config.token_cache, 'r', encoding='utf-8') as f:
secrets = json.load(f)
else:
secrets = {}
# Try to reuse the cached access token
if secrets\
and secrets['version'] == SECRETS_VERSION\
and secrets['access_token_acquired_at'] + secrets['access_token_expires_in'] > time() - 5 * 60\
and secrets['username'] == config.username:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
access_token=secrets['access_token']
)
return
# Get a new access token
logging.info('Getting a new access token')
try:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
username=config.username,
password=config.password
)
except HTTPError as e:
if e.response.status_code == 401:
logging.critical('Incorrect API key, login or password. Please, edit config.py.')
sys.exit(1)
else:
raise
# Save the token
secrets = {
'version': SECRETS_VERSION,
'username': config.username,
'access_token': soundcloud.access_token,
'access_token_acquired_at': time(),
'access_token_expires_in': soundcloud.token.expires_in,
}
with open(config.token_cache, 'w', encoding='utf-8') as f:
secrets = json.dump(secrets, f, indent='\t', ensure_ascii=False)
def load_banlist():
"""Load the banlist."""
# create banlist if it doesn't exist
if not os.path.exists(config.banlistfile):
open(config.banlistfile, 'ab').close()
with open(config.banlistfile, 'r') as file:
for line in file:
line = line.strip()
if line == '' or line.startswith('//'):
continue # skip empty lines and comments
values = line.split(None, 2)
what = values[0]
if what not in ['user', 'track', 'playlist']:
logging.warning('Banlist error: unknown ban type: %s', what)
continue
try:
id = int(values[1])
except ValueError:
logging.warning('Banlist error: %d is not a %s id number', id, what)
continue
if len(values) > 2:
banlist[what][id] = values[2]
else:
banlist[what][id] = "No reason given."
def check_comments():
"""Download all comments and process them."""
# Get the id of the group track
try:
group_track = soundcloud.get('/me/tracks')[config.post_track_id]
except HTTPError as e:
if e.response.status_code == 404:
logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)
sys.exit(1)
else:
raise
# Get the comment list for the group track
comments = soundcloud.get('/tracks/%d/comments' % group_track.id)
if not comments:
logging.info('Nothing found...')
return
# Process each comment and delete it
for comment in reversed(comments):
logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)
response = None
# Try to process the comment
try:
response = process_comment(comment)
except HTTPError as e:
if e.response.status_code == 429:
logging.exception('Failed to repost track: too many requests:')
return
elif e.response.status_code // 100 == 4:
logging.exception('Failed to process comment due to a client request error:')
else:
raise
except Exception as e: # Program crash
logging.exception('Failed to process comment:')
else:
if response:
logging.info('The comment would have this response: %s', response)
else:
logging.info('Comment processed successfully')
# Delete the processed comment
try:
soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))
except HTTPError as e:
if e.response.status_code == 404:
logging.warning('Comment already deleted')
else:
raise
if config.use_advanced_description and should_update_description:
update_description()
def process_comment(comment):
"""Process a single comment."""
if not comment.body:
logging.info('Empty URL detected.')
return 'Your comment is empty.'
if comment.user_id in banlist['user']:
logging.info('Banned user id: %d', comment.user_id)
return 'You are banned from this group.'
url = comment.body
action = 'repost'
if url.startswith('!'):
action = 'delete'
url = url[1:]
# Resolve the resource to repost
resource = resolve_resource(url)
if resource:
logging.info('Resolved: %s %d', resource.kind, resource.id)
if resource.kind == 'playlist' and not config.allow_playlists:
logging.info('Playlists are not allowed. Skipping.')
return 'Playlists are not allowed in this group.'
else:
logging.info('Not found')
if not resource or resource.kind not in ('track', 'playlist'):
if config.allow_playlists:
return 'The provided link does not lead to a track or playlist.'
else:
return 'The provided link does not lead to a track.'
resource_type = resource.kind
# Check for ownership
if not config.debug_mode and comment.user_id != resource.user_id:
logging.info('Not the author of the resource')
return 'You must be the author of the {} to post it in this group.'.format(resource_type)
# Is the resource banned?
if resource.id in banlist[resource_type]:
reason = banlist[resource_type][resource.id];
logging.info('This resource is banned: %s', reason)
return 'This track or playlist is banned from this group: ' + reason
# Repost/delete if needed
is_reposted = check_repost_exists(resource_type, resource.id)
if action == 'repost':
# Genre filter
if config.allowed_genres is not None:
genres_lowercase = [ genre.lower() for genre in config.allowed_genres ]
if resource.genre.lower() not in genres_lowercase:
logging.info('Genre not allowed: %s', resource.genre)
return 'This genre is not allowed in this group. Allowed genres are: ' + ', '.join(config.allowed_genres)
# Disable bumps if needed
if not config.allow_bumps and db.has_ever_been_posted(resource_type, resource.id):
logging.info('Bumping is disabled and this resource is present in the database.')
return 'Bumping is not allowed in this group.'
# Enforce minimum bump interval
last_reposted = db.last_repost_time(resource_type, resource.id)
if last_reposted is not None and last_reposted > int(time()) - config.min_bump_interval:
logging.info('This %s was posted %d seconds ago, but minimum bump interval is %d.', resource_type, int(time()) - last_reposted, config.min_bump_interval)
return 'This {} is posted to the group too frequently. Try again later.'.format(resource_type)
# Enforce max posts
last_post_count = db.user_last_posts_count(comment.user_id, config.post_limit_interval)
if last_post_count >= config.post_limit:
logging.info('The user has already made %d reposts.', last_post_count)
return 'You have already made {} posts.'.format(config.post_limit)
# Execute the command
if is_reposted:
logging.info('Bumping:')
group_delete(comment.user_id, resource_type, resource.id)
group_repost(comment.user_id, resource_type, resource.id)
else:
group_repost(comment.user_id, resource_type, resource.id)
request_description_update()
elif action == 'delete':
if is_reposted:
group_delete(comment.user_id, resource_type, resource.id)
request_description_update()
else:
logging.info('Resource already deleted')
else:
assert False, 'Unknown action: ' + repr(action)
def resolve_resource(url):
"""Return the resource object downloaded from url, or None, if not found."""
try:
resource = soundcloud.get('/resolve', url=url)
except HTTPError as e:
if e.response.status_code == 404:
return None
else:
raise
return resource
def check_repost_exists(type, id):
"""Return true if the respost exists, according to soundcloud.
Also update the database if a repost is already deleted
on soundcloud, but is not marked as deleted in the db."""
try:
soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))
return True
except HTTPError as e:
if e.response.status_code == 404:
db.mark_as_deleted(type, id)
return False
else:
raise
def | (user_id, resource_type, resource_id):
"""Repost a resource into the group and update the database."""
logging.info('Reposting %s %d...', resource_type, resource_id)
soundcloud.put('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_repost(user_id, resource_type, resource_id)
db.commit()
def group_delete(user_id, resource_type, resource_id):
"""Delete a resource from the group and update the database."""
logging.info('Deleting %s %d...', resource_type, resource_id)
soundcloud.delete('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_deletion(user_id, resource_type, resource_id)
db.commit()
def request_description_update():
"""Set a flag to update the description once all comments are processed."""
global should_update_description
should_update_description = True
def update_description():
"""Update group description."""
track_count = db.track_count
playlist_count = db.playlist_count
keywords = {
'last_update': strftime("%Y-%m-%d %H:%M:%S", gmtime()),
'bot_version': BOT_VERSION,
'track_count': track_count,
'playlist_count': playlist_count,
'user_count': db.user_count,
'post_count': track_count + playlist_count
}
desc = config.description_template.strip()
for keyword, value in keywords.items():
desc = desc.replace(config.keyword_tag + keyword + config.keyword_tag, str(value))
if config.use_advanced_description == 1:
soundcloud.put('/me', **{ 'user[description]': desc })
elif config.use_advanced_description == 2:
original = soundcloud.get('/me').description
if not original:
return
new_desc, _ = original.split(config.stats_keyword, 1)
new_desc += config.stats_keyword + '\n'
new_desc += desc
soundcloud.put('/me', **{ 'user[description]': new_desc })
else:
logging.warning('Unknown value %d for use_advanced_description', config.use_advanced_description)
return
global should_update_description
should_update_description = False
logging.info('Description updated')
| group_repost | identifier_name |
main.py | # By Monsterovich
# This script reposts user's track from the comments
from soundcloud import Client as Soundcloud
from requests import HTTPError
from time import strftime, time, gmtime
import logging
import os
import sys
import imp
from scgb.database import Database
BOT_VERSION = '1.3.3'
banlist = {
'user': {},
'track': {},
'playlist': {},
}
config = None
db = None
soundcloud = None
should_update_description = False
def bot_init():
|
def init_api():
"""Authenticate with SoundCloud API.
Cache access token in the secrets file."""
global soundcloud
import json
SECRETS_VERSION = 1
# Load secrets file
if os.path.exists(config.token_cache):
with open(config.token_cache, 'r', encoding='utf-8') as f:
secrets = json.load(f)
else:
secrets = {}
# Try to reuse the cached access token
if secrets\
and secrets['version'] == SECRETS_VERSION\
and secrets['access_token_acquired_at'] + secrets['access_token_expires_in'] > time() - 5 * 60\
and secrets['username'] == config.username:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
access_token=secrets['access_token']
)
return
# Get a new access token
logging.info('Getting a new access token')
try:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
username=config.username,
password=config.password
)
except HTTPError as e:
if e.response.status_code == 401:
logging.critical('Incorrect API key, login or password. Please, edit config.py.')
sys.exit(1)
else:
raise
# Save the token
secrets = {
'version': SECRETS_VERSION,
'username': config.username,
'access_token': soundcloud.access_token,
'access_token_acquired_at': time(),
'access_token_expires_in': soundcloud.token.expires_in,
}
with open(config.token_cache, 'w', encoding='utf-8') as f:
secrets = json.dump(secrets, f, indent='\t', ensure_ascii=False)
def load_banlist():
"""Load the banlist."""
# create banlist if it doesn't exist
if not os.path.exists(config.banlistfile):
open(config.banlistfile, 'ab').close()
with open(config.banlistfile, 'r') as file:
for line in file:
line = line.strip()
if line == '' or line.startswith('//'):
continue # skip empty lines and comments
values = line.split(None, 2)
what = values[0]
if what not in ['user', 'track', 'playlist']:
logging.warning('Banlist error: unknown ban type: %s', what)
continue
try:
id = int(values[1])
except ValueError:
logging.warning('Banlist error: %d is not a %s id number', id, what)
continue
if len(values) > 2:
banlist[what][id] = values[2]
else:
banlist[what][id] = "No reason given."
def check_comments():
"""Download all comments and process them."""
# Get the id of the group track
try:
group_track = soundcloud.get('/me/tracks')[config.post_track_id]
except HTTPError as e:
if e.response.status_code == 404:
logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)
sys.exit(1)
else:
raise
# Get the comment list for the group track
comments = soundcloud.get('/tracks/%d/comments' % group_track.id)
if not comments:
logging.info('Nothing found...')
return
# Process each comment and delete it
for comment in reversed(comments):
logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)
response = None
# Try to process the comment
try:
response = process_comment(comment)
except HTTPError as e:
if e.response.status_code == 429:
logging.exception('Failed to repost track: too many requests:')
return
elif e.response.status_code // 100 == 4:
logging.exception('Failed to process comment due to a client request error:')
else:
raise
except Exception as e: # Program crash
logging.exception('Failed to process comment:')
else:
if response:
logging.info('The comment would have this response: %s', response)
else:
logging.info('Comment processed successfully')
# Delete the processed comment
try:
soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))
except HTTPError as e:
if e.response.status_code == 404:
logging.warning('Comment already deleted')
else:
raise
if config.use_advanced_description and should_update_description:
update_description()
def process_comment(comment):
"""Process a single comment."""
if not comment.body:
logging.info('Empty URL detected.')
return 'Your comment is empty.'
if comment.user_id in banlist['user']:
logging.info('Banned user id: %d', comment.user_id)
return 'You are banned from this group.'
url = comment.body
action = 'repost'
if url.startswith('!'):
action = 'delete'
url = url[1:]
# Resolve the resource to repost
resource = resolve_resource(url)
if resource:
logging.info('Resolved: %s %d', resource.kind, resource.id)
if resource.kind == 'playlist' and not config.allow_playlists:
logging.info('Playlists are not allowed. Skipping.')
return 'Playlists are not allowed in this group.'
else:
logging.info('Not found')
if not resource or resource.kind not in ('track', 'playlist'):
if config.allow_playlists:
return 'The provided link does not lead to a track or playlist.'
else:
return 'The provided link does not lead to a track.'
resource_type = resource.kind
# Check for ownership
if not config.debug_mode and comment.user_id != resource.user_id:
logging.info('Not the author of the resource')
return 'You must be the author of the {} to post it in this group.'.format(resource_type)
# Is the resource banned?
if resource.id in banlist[resource_type]:
reason = banlist[resource_type][resource.id];
logging.info('This resource is banned: %s', reason)
return 'This track or playlist is banned from this group: ' + reason
# Repost/delete if needed
is_reposted = check_repost_exists(resource_type, resource.id)
if action == 'repost':
# Genre filter
if config.allowed_genres is not None:
genres_lowercase = [ genre.lower() for genre in config.allowed_genres ]
if resource.genre.lower() not in genres_lowercase:
logging.info('Genre not allowed: %s', resource.genre)
return 'This genre is not allowed in this group. Allowed genres are: ' + ', '.join(config.allowed_genres)
# Disable bumps if needed
if not config.allow_bumps and db.has_ever_been_posted(resource_type, resource.id):
logging.info('Bumping is disabled and this resource is present in the database.')
return 'Bumping is not allowed in this group.'
# Enforce minimum bump interval
last_reposted = db.last_repost_time(resource_type, resource.id)
if last_reposted is not None and last_reposted > int(time()) - config.min_bump_interval:
logging.info('This %s was posted %d seconds ago, but minimum bump interval is %d.', resource_type, int(time()) - last_reposted, config.min_bump_interval)
return 'This {} is posted to the group too frequently. Try again later.'.format(resource_type)
# Enforce max posts
last_post_count = db.user_last_posts_count(comment.user_id, config.post_limit_interval)
if last_post_count >= config.post_limit:
logging.info('The user has already made %d reposts.', last_post_count)
return 'You have already made {} posts.'.format(config.post_limit)
# Execute the command
if is_reposted:
logging.info('Bumping:')
group_delete(comment.user_id, resource_type, resource.id)
group_repost(comment.user_id, resource_type, resource.id)
else:
group_repost(comment.user_id, resource_type, resource.id)
request_description_update()
elif action == 'delete':
if is_reposted:
group_delete(comment.user_id, resource_type, resource.id)
request_description_update()
else:
logging.info('Resource already deleted')
else:
assert False, 'Unknown action: ' + repr(action)
def resolve_resource(url):
"""Return the resource object downloaded from url, or None, if not found."""
try:
resource = soundcloud.get('/resolve', url=url)
except HTTPError as e:
if e.response.status_code == 404:
return None
else:
raise
return resource
def check_repost_exists(type, id):
"""Return true if the respost exists, according to soundcloud.
Also update the database if a repost is already deleted
on soundcloud, but is not marked as deleted in the db."""
try:
soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))
return True
except HTTPError as e:
if e.response.status_code == 404:
db.mark_as_deleted(type, id)
return False
else:
raise
def group_repost(user_id, resource_type, resource_id):
"""Repost a resource into the group and update the database."""
logging.info('Reposting %s %d...', resource_type, resource_id)
soundcloud.put('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_repost(user_id, resource_type, resource_id)
db.commit()
def group_delete(user_id, resource_type, resource_id):
"""Delete a resource from the group and update the database."""
logging.info('Deleting %s %d...', resource_type, resource_id)
soundcloud.delete('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_deletion(user_id, resource_type, resource_id)
db.commit()
def request_description_update():
"""Set a flag to update the description once all comments are processed."""
global should_update_description
should_update_description = True
def update_description():
"""Update group description."""
track_count = db.track_count
playlist_count = db.playlist_count
keywords = {
'last_update': strftime("%Y-%m-%d %H:%M:%S", gmtime()),
'bot_version': BOT_VERSION,
'track_count': track_count,
'playlist_count': playlist_count,
'user_count': db.user_count,
'post_count': track_count + playlist_count
}
desc = config.description_template.strip()
for keyword, value in keywords.items():
desc = desc.replace(config.keyword_tag + keyword + config.keyword_tag, str(value))
if config.use_advanced_description == 1:
soundcloud.put('/me', **{ 'user[description]': desc })
elif config.use_advanced_description == 2:
original = soundcloud.get('/me').description
if not original:
return
new_desc, _ = original.split(config.stats_keyword, 1)
new_desc += config.stats_keyword + '\n'
new_desc += desc
soundcloud.put('/me', **{ 'user[description]': new_desc })
else:
logging.warning('Unknown value %d for use_advanced_description', config.use_advanced_description)
return
global should_update_description
should_update_description = False
logging.info('Description updated')
| global db
global config
# Init log
logging.basicConfig(stream=sys.stdout, level=logging.INFO, datefmt='[%Y-%m-%d %H:%M:%S]', format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# Init config
if len(sys.argv) > 1:
config = imp.load_source('scgb_config', sys.argv[1])
elif os.path.exists('config.py'):
config = imp.load_source('scgb_config', os.path.join(os.getcwd(), 'config.py'))
else:
logging.critical('Please, rename config.py.template to config.py and edit it.\nOr specify a config to load on the command line: py scgb.py <config file>')
sys.exit(1)
# Init database
db = Database(config.stats_database)
# Init banlist
load_banlist()
# Init soundcloud client
init_api() | identifier_body |
main.py | # By Monsterovich
# This script reposts user's track from the comments
from soundcloud import Client as Soundcloud
from requests import HTTPError
from time import strftime, time, gmtime
import logging
import os
import sys
import imp
from scgb.database import Database
BOT_VERSION = '1.3.3'
banlist = {
'user': {},
'track': {},
'playlist': {},
}
config = None
db = None
soundcloud = None
should_update_description = False
def bot_init():
global db
global config
# Init log
logging.basicConfig(stream=sys.stdout, level=logging.INFO, datefmt='[%Y-%m-%d %H:%M:%S]', format='%(asctime)s %(levelname)s %(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# Init config
if len(sys.argv) > 1:
config = imp.load_source('scgb_config', sys.argv[1])
elif os.path.exists('config.py'):
config = imp.load_source('scgb_config', os.path.join(os.getcwd(), 'config.py'))
else:
logging.critical('Please, rename config.py.template to config.py and edit it.\nOr specify a config to load on the command line: py scgb.py <config file>')
sys.exit(1)
# Init database
db = Database(config.stats_database)
# Init banlist
load_banlist()
# Init soundcloud client
init_api()
def init_api():
"""Authenticate with SoundCloud API.
Cache access token in the secrets file."""
global soundcloud
import json
SECRETS_VERSION = 1
# Load secrets file
if os.path.exists(config.token_cache):
with open(config.token_cache, 'r', encoding='utf-8') as f:
secrets = json.load(f)
else:
secrets = {}
# Try to reuse the cached access token
if secrets\
and secrets['version'] == SECRETS_VERSION\
and secrets['access_token_acquired_at'] + secrets['access_token_expires_in'] > time() - 5 * 60\
and secrets['username'] == config.username:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
access_token=secrets['access_token']
)
return
# Get a new access token
logging.info('Getting a new access token')
try:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
username=config.username,
password=config.password
)
except HTTPError as e:
if e.response.status_code == 401:
logging.critical('Incorrect API key, login or password. Please, edit config.py.')
sys.exit(1)
else:
raise
# Save the token
secrets = {
'version': SECRETS_VERSION,
'username': config.username,
'access_token': soundcloud.access_token,
'access_token_acquired_at': time(),
'access_token_expires_in': soundcloud.token.expires_in,
}
with open(config.token_cache, 'w', encoding='utf-8') as f:
secrets = json.dump(secrets, f, indent='\t', ensure_ascii=False)
def load_banlist():
"""Load the banlist."""
# create banlist if it doesn't exist
if not os.path.exists(config.banlistfile):
open(config.banlistfile, 'ab').close()
with open(config.banlistfile, 'r') as file:
for line in file:
line = line.strip()
if line == '' or line.startswith('//'):
continue # skip empty lines and comments
values = line.split(None, 2)
what = values[0]
if what not in ['user', 'track', 'playlist']:
logging.warning('Banlist error: unknown ban type: %s', what)
continue
try:
id = int(values[1])
except ValueError:
logging.warning('Banlist error: %d is not a %s id number', id, what)
continue
if len(values) > 2:
banlist[what][id] = values[2]
else:
banlist[what][id] = "No reason given."
def check_comments():
"""Download all comments and process them."""
# Get the id of the group track
try:
group_track = soundcloud.get('/me/tracks')[config.post_track_id]
except HTTPError as e:
if e.response.status_code == 404:
logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)
sys.exit(1)
else:
raise
# Get the comment list for the group track
comments = soundcloud.get('/tracks/%d/comments' % group_track.id)
if not comments:
logging.info('Nothing found...')
return
# Process each comment and delete it
for comment in reversed(comments):
logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)
response = None
# Try to process the comment
try:
response = process_comment(comment)
except HTTPError as e:
if e.response.status_code == 429:
logging.exception('Failed to repost track: too many requests:')
return
elif e.response.status_code // 100 == 4:
logging.exception('Failed to process comment due to a client request error:')
else:
raise
except Exception as e: # Program crash
logging.exception('Failed to process comment:')
else:
if response:
logging.info('The comment would have this response: %s', response)
else:
logging.info('Comment processed successfully')
# Delete the processed comment
try:
soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))
except HTTPError as e:
if e.response.status_code == 404:
logging.warning('Comment already deleted')
else:
raise
if config.use_advanced_description and should_update_description:
|
def process_comment(comment):
"""Process a single comment."""
if not comment.body:
logging.info('Empty URL detected.')
return 'Your comment is empty.'
if comment.user_id in banlist['user']:
logging.info('Banned user id: %d', comment.user_id)
return 'You are banned from this group.'
url = comment.body
action = 'repost'
if url.startswith('!'):
action = 'delete'
url = url[1:]
# Resolve the resource to repost
resource = resolve_resource(url)
if resource:
logging.info('Resolved: %s %d', resource.kind, resource.id)
if resource.kind == 'playlist' and not config.allow_playlists:
logging.info('Playlists are not allowed. Skipping.')
return 'Playlists are not allowed in this group.'
else:
logging.info('Not found')
if not resource or resource.kind not in ('track', 'playlist'):
if config.allow_playlists:
return 'The provided link does not lead to a track or playlist.'
else:
return 'The provided link does not lead to a track.'
resource_type = resource.kind
# Check for ownership
if not config.debug_mode and comment.user_id != resource.user_id:
logging.info('Not the author of the resource')
return 'You must be the author of the {} to post it in this group.'.format(resource_type)
# Is the resource banned?
if resource.id in banlist[resource_type]:
reason = banlist[resource_type][resource.id];
logging.info('This resource is banned: %s', reason)
return 'This track or playlist is banned from this group: ' + reason
# Repost/delete if needed
is_reposted = check_repost_exists(resource_type, resource.id)
if action == 'repost':
# Genre filter
if config.allowed_genres is not None:
genres_lowercase = [ genre.lower() for genre in config.allowed_genres ]
if resource.genre.lower() not in genres_lowercase:
logging.info('Genre not allowed: %s', resource.genre)
return 'This genre is not allowed in this group. Allowed genres are: ' + ', '.join(config.allowed_genres)
# Disable bumps if needed
if not config.allow_bumps and db.has_ever_been_posted(resource_type, resource.id):
logging.info('Bumping is disabled and this resource is present in the database.')
return 'Bumping is not allowed in this group.'
# Enforce minimum bump interval
last_reposted = db.last_repost_time(resource_type, resource.id)
if last_reposted is not None and last_reposted > int(time()) - config.min_bump_interval:
logging.info('This %s was posted %d seconds ago, but minimum bump interval is %d.', resource_type, int(time()) - last_reposted, config.min_bump_interval)
return 'This {} is posted to the group too frequently. Try again later.'.format(resource_type)
# Enforce max posts
last_post_count = db.user_last_posts_count(comment.user_id, config.post_limit_interval)
if last_post_count >= config.post_limit:
logging.info('The user has already made %d reposts.', last_post_count)
return 'You have already made {} posts.'.format(config.post_limit)
# Execute the command
if is_reposted:
logging.info('Bumping:')
group_delete(comment.user_id, resource_type, resource.id)
group_repost(comment.user_id, resource_type, resource.id)
else:
group_repost(comment.user_id, resource_type, resource.id)
request_description_update()
elif action == 'delete':
if is_reposted:
group_delete(comment.user_id, resource_type, resource.id)
request_description_update()
else:
logging.info('Resource already deleted')
else:
assert False, 'Unknown action: ' + repr(action)
def resolve_resource(url):
"""Return the resource object downloaded from url, or None, if not found."""
try:
resource = soundcloud.get('/resolve', url=url)
except HTTPError as e:
if e.response.status_code == 404:
return None
else:
raise
return resource
def check_repost_exists(type, id):
"""Return true if the respost exists, according to soundcloud.
Also update the database if a repost is already deleted
on soundcloud, but is not marked as deleted in the db."""
try:
soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))
return True
except HTTPError as e:
if e.response.status_code == 404:
db.mark_as_deleted(type, id)
return False
else:
raise
def group_repost(user_id, resource_type, resource_id):
"""Repost a resource into the group and update the database."""
logging.info('Reposting %s %d...', resource_type, resource_id)
soundcloud.put('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_repost(user_id, resource_type, resource_id)
db.commit()
def group_delete(user_id, resource_type, resource_id):
"""Delete a resource from the group and update the database."""
logging.info('Deleting %s %d...', resource_type, resource_id)
soundcloud.delete('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_deletion(user_id, resource_type, resource_id)
db.commit()
def request_description_update():
"""Set a flag to update the description once all comments are processed."""
global should_update_description
should_update_description = True
def update_description():
"""Update group description."""
track_count = db.track_count
playlist_count = db.playlist_count
keywords = {
'last_update': strftime("%Y-%m-%d %H:%M:%S", gmtime()),
'bot_version': BOT_VERSION,
'track_count': track_count,
'playlist_count': playlist_count,
'user_count': db.user_count,
'post_count': track_count + playlist_count
}
desc = config.description_template.strip()
for keyword, value in keywords.items():
desc = desc.replace(config.keyword_tag + keyword + config.keyword_tag, str(value))
if config.use_advanced_description == 1:
soundcloud.put('/me', **{ 'user[description]': desc })
elif config.use_advanced_description == 2:
original = soundcloud.get('/me').description
if not original:
return
new_desc, _ = original.split(config.stats_keyword, 1)
new_desc += config.stats_keyword + '\n'
new_desc += desc
soundcloud.put('/me', **{ 'user[description]': new_desc })
else:
logging.warning('Unknown value %d for use_advanced_description', config.use_advanced_description)
return
global should_update_description
should_update_description = False
logging.info('Description updated')
| update_description() | conditional_block |
sigma-form-upload-file.component.ts | import { Component, Input, OnInit, ChangeDetectionStrategy, Optional, Self, ViewChild, ViewEncapsulation, ChangeDetectorRef, ElementRef } from '@angular/core';
import { ControlValueAccessor, FormControl, NgControl, FormControlName, AbstractControl } from '@angular/forms';
import { CONST_SHARED } from '../../constantes-shared';
import { MatFormFieldControl, MatInput, MatSnackBar } from '@angular/material';
import { InputFileMaxValidator, InputFileMinValidator, InputFileAcceptsValidator, InputFileSize } from '../../form/input.file';
import { UploadFileService } from '../../services/upload.file.service';
import { HttpEventType } from '@angular/common/http';
import { WebcamImage } from 'ngx-webcam';
/**
* Componente usado para estandarizar el campo de carga de archivos
* en todos los formularios del sistema
*/
@Component({
// tslint:disable-next-line: component-selector
selector: 'sigma-form-upload-file',
templateUrl: './sigma-form-upload-file.component.html',
styleUrls: ['./sigma-form-upload-file.component.scss'],
providers: [
{
provide: MatFormFieldControl,
useExisting: SigmaFormUploadFileComponent
}
],
changeDetection: ChangeDetectionStrategy.OnPush,
encapsulation: ViewEncapsulation.None
})
export class SigmaFormUploadFileComponent implements OnInit, ControlValueAccessor {
/** Constantes a usar en el componente */
constants = CONST_SHARED;
/** Archivos seleccionados por el usuario */
files = [];
/** Control de formulario al que se asocia el campo */
control: FormControl = new FormControl();
/** Bandera de control para saber si el campo es requerido en el formulario o no */
required = false;
/** Función encargada de la validación de elementos del campo del formulario */
validateFn: Function;
/** Mensaje que indica el error de tipo de archivo */
errorTipoArchivos = '';
/** Listado de errores personalizados que puede presentar el componente */
basicErrors = [
{ name: 'required', message: this.constants.campoRequerido },
];
/** Objeto con valores a procesar actualizados por el usuario */
object = Object;
/** Listado de errores personalizados permitidos por el componente */
@Input('errors') errors: [] = [];
/** Propiedad Placeholder asociado al campo del formulario */
@Input('placeholder') placeholder: string = '';
/** Cadena de texto con los tipos de archivos aceptados por el componente */
@Input('accept') accept: string = '*';
/** Bandera que indica si el componente admite multiples archivos */
@Input('multiple') multiple: boolean = false;
/** Cantidad maxima de archivos a cargar */
@Input('maxUpload') maxUpload: number = 0;
/** Cantidad mínima de archivos a cargar */
@Input('minUpload') minUpload: number = 0;
/** Bandera que permite identificar si se actualiza la vista del archivo al realizar el cargue */
@Input('autoUpdate') autoUpdate: boolean = true;
/** Bandera que indica si se presentará al usuario la vista previa del archivo */
@Input('showFile') showFile: boolean = false;
/** Tamaño máximo permitido para el cargue del archivo en MBs */
@Input('sizeFile') sizeFile: number = 10;
@Input('action') action: string = 'create';
status = '';
public filesProcessed = 0;
public filesToProcess = 0;
/** Entrada de tipo de componente que define el campo en el formulario */
@ViewChild('input') input: MatInput;
/** Entrada de tipo de componente que define el campo archivo en el formulario */
@ViewChild('inputFile') inputFile: ElementRef;
/** Definición del método que es llamado al momento de cambiar el dato del
* campo del formulario */
onChange = (_: any) => { }
/** Definición del método que es llamado al momento de realizar acción sobre el
* campo del formulario */
onTouch = () => { }
/**
* Método encargado de construir una instancia de componente
*
* @param ngControl Control de tipo de ng del componente del formulario
* @param _controlName Nombre del Control a usar en el formulario
* @param uploadFileService Servicio de carga de archivos
* @param cdRef Referencia a componente de observable para saber si ha cambiado el valor del componente
* @param snackBar Componente usado para abrir un recuadro modal
*/
constructor(
@Optional() @Self() ngControl: NgControl,
@Optional() private _controlName: FormControlName,
private uploadFileService: UploadFileService,
private cdRef: ChangeDetectorRef,
private snackBar: MatSnackBar,
) {
if (ngControl) {
ngControl.valueAccessor = this;
}
}
/** Método encargado de inicializar el componente */
ngOnInit() {
if (this._controlName) {
this.control = this._controlName.control;
}
this.clearInputHiden();
this.activeRequired(this.control);
this.acceptValid(this.control);
this.addTotalFiles(this.control);
this.addSizeFile(this.control);
this.addErrors();
this.setMensajeErrorTipoArchivo();
}
/** Método que permite saber cual es el archivo seleccionado */
selectFile() {
this.onTouch();
this.inputFile.nativeElement.click();
}
/**
* Método encargado de asignar la bandera de activo al contol
* indicado
*
* @param control Control al cual se le asignará la bandera de requerida
*/
activeRequired(control: FormControl) {
if (control.validator != undefined) {
const validator = control.validator({} as AbstractControl);
if (validator && validator.required) {
this.r | *
* Método que permite validar el tipo de archivo con los permitidos
*
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
acceptValid(control: FormControl) {
if (this.accept) {
let validate = this.setValidateFile(control.validator, InputFileAcceptsValidator(this.accept));
control.setValidators(validate);
}
}
/**
* Método encargado de adicionar los errores identificados
* en el validator a la sección de errores del campo del formulario
*/
addErrors() {
if (this.errors.length > 0) {
this.errors.map(item => {
this.basicErrors.push(item);
});
}
}
validateShowAttachFile(): Boolean {
if (this.control.disabled) {
return false;
}
if (this.maxUpload === 0 ) {
return true;
}
if ( this.files && ( this.maxUpload > this.files.length) ) {
return true;
} else {
return false;
}
}
/**
* Método encargado de adicionar el total de archivos al componente indicado
*
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
addTotalFiles(control: FormControl) {
let total = this.files ? this.files.length : 0;
if (this.maxUpload > 0) {
let validate = this.setValidateFile(control.validator, InputFileMaxValidator(this.maxUpload, total));
control.setValidators(validate);
}
if (this.minUpload > 0) {
if (this.action === 'edit') {
total = 0;
}
let validate = this.setValidateFile(control.validator, InputFileMinValidator(this.minUpload, total));
control.setValidators(validate);
}
}
resetForm() {
this.action = 'edit';
this.ngOnInit();
}
/**
* Método encargado de adicionar el tamaño de archivo al control indicado
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
addSizeFile(control) {
let validate = this.setValidateFile(control.validator, InputFileSize(this.sizeFile));
control.setValidators(validate);
}
/**
* Método encargado de realizar la validación del archivo cargado por el usuario
* @param existeValidate Indica si el archivo ya existe o es nuevo
* @param validate Indica si el archivo es de tipo válido
*/
setValidateFile(existeValidate, validate) {
if (existeValidate) {
return [existeValidate, validate];
} else {
return [validate];
}
}
/**
* Método que permite saber si el control al cual se le asigna el archivo
* es válido o no
*/
validControl(): boolean {
if (this.control.disabled) {
return false;
}
if (this.control.errors) {
if (Object.keys(this.control.errors).length > 0) {
return false;
}
}
if (!this.autoUpdate) {
return false;
}
return true;
}
/**
* Método encargado de adicionar los archivos cargados por el cliente al
* listado de archivos del modelo
*
* @param event Evento con los archivos cargados por el usuario
*/
agregarFiles(event) {
this.onTouch();
this.status = 'adding';
this.filesProcessed = 0;
this.filesToProcess = event.target.files.length;
this.control.setValue(event.target.files);
if (!this.validControl() && this.minUpload === 0) {
return;
}
this.files = [];
for (let file in event.target.files) {
if (typeof event.target.files[file] == 'object') {
this.files.push(event.target.files[file]);
this.upload(file, this.files[file]);
}
}
}
setFiles(files: any) {
this.files = files;
this.response();
this.status = 'rewrite';
this.detectChange();
this.clearInputHiden();
}
resetFormConditions() {
this.onTouch();
this.response();
this.status = 'reseting';
this.detectChange();
this.action = 'edit';
this.ngOnInit();
}
/**
* Método que permite la asignación de los errores de tipos de archivo
* a la sección de errores del formulario
*/
setMensajeErrorTipoArchivo() {
this.errorTipoArchivos = this.constants.typeFiles;
if (this.accept) {
let formatos = [] ;
const tipos = this.accept.split(',');
if (tipos) {
tipos.forEach(item => {
item = item.trim();
const index = this.constants.formatoArchivos.findIndex(formato => formato.mimeType.toLowerCase() === item.toLowerCase());
if (index > -1) {
if (formatos.findIndex(f => f === this.constants.formatoArchivos[index].nombreTipoArchivo) === -1) {
formatos.push(this.constants.formatoArchivos[index].nombreTipoArchivo);
}
}
});
}
if ( formatos.length > 0) {
let posicion = 0;
formatos.forEach(formato => {
posicion++;
if (formatos.length === posicion) {
this.errorTipoArchivos += formato + '. ';
} else {
this.errorTipoArchivos += formato + ', ';
}
});
}
}
}
/**
* Método encargado de actualizar el modelo de archivos con los archivos
* ingresados por el usuario
*
* @param key LLave del archivo modificado
* @param file Archivo modificado
*/
upload(key, file: File): void {
this.files[key].success = true;
this.uploadFileService.uploadFile(file).subscribe(event => {
if (event.type === HttpEventType.UploadProgress) {
this.status = 'loading';
this.files[key].progress = Math.round((event.loaded / event.total) * 100);
this.detectChange();
} else if (event.type === HttpEventType.Response) {
this.files[key] = event.body;
this.files[key].success = true;
this.filesProcessed ++;
if (this.filesProcessed === this.filesToProcess) {
this.response();
this.status = 'completed';
this.detectChange();
}
}
}, error => {
this.files[key].success = false;
this.snackBar.open(this.constants.errorSubirArchivo, 'X', {
duration: 5000,
panelClass: ['error-snackbar']
});
this.status = 'error';
this.detectChange();
});
}
/**
* Método que permite detectar los cambios del modelo de
* archivos enviados por el uuario
*/
detectChange() {
try {
this.cdRef.detectChanges();
} catch (error) { }
}
/** Gestión de la petición realizada por el usuario */
response() {
if (!this.control.disabled) {
this.onTouch();
if (this.multiple) {
setTimeout(() => {
this.onChange(this.files);
}, 100);
} else {
this.onChange(this.files[0]);
}
}
}
/**
* Método encargado de presentar en el formulario el nombre del archivo cargado
*
* @param file Archivo del cual se va a presentar la información
*/
showNameFile(file): string {
if (!file) {
return '';
}
if (file.name) {
return file.name;
}
if (file.nombre) {
return file.nombre;
}
}
/** Método encargado de eliminar el ultimo archivo cargado por el usuario */
eliminar() {
this.files = [];
this.control.setValue('');
this.clearInputHiden();
this.control.updateValueAndValidity();
this.onTouch();
this.ngOnInit();
}
/**
* Método encargado de eliminar el indice del archivo de la colección
* de archivos cargados
* @param key Llave del archivo a eliminar
*/
eliminarElemento(key) {
this.onTouch();
this.files.splice(key, 1);
this.response();
this.status = 'deleted';
this.detectChange();
this.clearInputHiden();
}
/** Método encargado de limpiar el archivo cargado por el usuario del modelo */
clearInputHiden() {
this.inputFile.nativeElement.value = '';
}
/**
* Método encargado de obtener el valor de archivo del componente
* que fué adjuntado
*/
getValue() {
return this.constants.adjuntarFile;
}
/**
* Método encargado de establecer el valor digitado por el usuario
* a la variable del modelo del componente
*
* @param value valor digitado por el usuario en el campo del formulario
**/
writeValue(value: any) {
if (typeof value == 'undefined' || value == null || !value) {
this.files = [];
} else if (Array.isArray(value)) {
this.files = value;
} else if (typeof value == 'object' && value) {
if (value['length'] > 0) {
this.files = [value[0]];
} else {
this.files = [value];
}
} else {
this.files = value ? value : [];
}
}
/**
* Método encargado de registar la funcion ingresada al onchange
* del componente
*
* @param fn Funcion con la que se definirá la acción onchange
* del control del formulario
**/
registerOnChange(fn){
this.onChange = fn;
}
/**
* Método encargado de registar la funcion ingresada al ontouched
* del componente
*
* @param fn Funcion con la que se definirá la acción ontouched
* del control del formulario
**/
registerOnTouched(fn) {
this.onTouch = fn;
}
/** Método encargado de establecer el estado de deshabilitado del
* campo del formulario en el componente
*
* @param isDisabled Valor que indica si el campo se encuentra en estado
* dehabilitado
**/
setDisabledState(isDisabled: boolean): void { }
}
| equired = true;
}
}
}
/* | conditional_block |
sigma-form-upload-file.component.ts | import { Component, Input, OnInit, ChangeDetectionStrategy, Optional, Self, ViewChild, ViewEncapsulation, ChangeDetectorRef, ElementRef } from '@angular/core';
import { ControlValueAccessor, FormControl, NgControl, FormControlName, AbstractControl } from '@angular/forms';
import { CONST_SHARED } from '../../constantes-shared';
import { MatFormFieldControl, MatInput, MatSnackBar } from '@angular/material';
import { InputFileMaxValidator, InputFileMinValidator, InputFileAcceptsValidator, InputFileSize } from '../../form/input.file';
import { UploadFileService } from '../../services/upload.file.service';
import { HttpEventType } from '@angular/common/http';
import { WebcamImage } from 'ngx-webcam';
/**
* Componente usado para estandarizar el campo de carga de archivos
* en todos los formularios del sistema
*/
@Component({
// tslint:disable-next-line: component-selector
selector: 'sigma-form-upload-file',
templateUrl: './sigma-form-upload-file.component.html',
styleUrls: ['./sigma-form-upload-file.component.scss'],
providers: [
{
provide: MatFormFieldControl,
useExisting: SigmaFormUploadFileComponent
}
],
changeDetection: ChangeDetectionStrategy.OnPush,
encapsulation: ViewEncapsulation.None
})
export class SigmaFormUploadFileComponent implements OnInit, ControlValueAccessor {
/** Constantes a usar en el componente */
constants = CONST_SHARED;
/** Archivos seleccionados por el usuario */
files = [];
/** Control de formulario al que se asocia el campo */
control: FormControl = new FormControl();
/** Bandera de control para saber si el campo es requerido en el formulario o no */
required = false;
/** Función encargada de la validación de elementos del campo del formulario */
validateFn: Function;
/** Mensaje que indica el error de tipo de archivo */
errorTipoArchivos = '';
/** Listado de errores personalizados que puede presentar el componente */
basicErrors = [
{ name: 'required', message: this.constants.campoRequerido },
];
/** Objeto con valores a procesar actualizados por el usuario */
object = Object;
/** Listado de errores personalizados permitidos por el componente */
@Input('errors') errors: [] = [];
/** Propiedad Placeholder asociado al campo del formulario */
@Input('placeholder') placeholder: string = '';
/** Cadena de texto con los tipos de archivos aceptados por el componente */
@Input('accept') accept: string = '*';
/** Bandera que indica si el componente admite multiples archivos */
@Input('multiple') multiple: boolean = false;
/** Cantidad maxima de archivos a cargar */
@Input('maxUpload') maxUpload: number = 0;
/** Cantidad mínima de archivos a cargar */
@Input('minUpload') minUpload: number = 0;
/** Bandera que permite identificar si se actualiza la vista del archivo al realizar el cargue */
@Input('autoUpdate') autoUpdate: boolean = true;
/** Bandera que indica si se presentará al usuario la vista previa del archivo */
@Input('showFile') showFile: boolean = false;
/** Tamaño máximo permitido para el cargue del archivo en MBs */
@Input('sizeFile') sizeFile: number = 10;
@Input('action') action: string = 'create';
status = '';
public filesProcessed = 0;
public filesToProcess = 0;
/** Entrada de tipo de componente que define el campo en el formulario */
@ViewChild('input') input: MatInput;
/** Entrada de tipo de componente que define el campo archivo en el formulario */
@ViewChild('inputFile') inputFile: ElementRef;
/** Definición del método que es llamado al momento de cambiar el dato del
* campo del formulario */
onChange = (_: any) => { }
/** Definición del método que es llamado al momento de realizar acción sobre el
* campo del formulario */
onTouch = () => { }
/**
* Método encargado de construir una instancia de componente
*
* @param ngControl Control de tipo de ng del componente del formulario
* @param _controlName Nombre del Control a usar en el formulario
* @param uploadFileService Servicio de carga de archivos
* @param cdRef Referencia a componente de observable para saber si ha cambiado el valor del componente
* @param snackBar Componente usado para abrir un recuadro modal
*/
constructor(
@Optional() @Self() ngControl: NgControl,
@Optional() private _controlName: FormControlName,
private uploadFileService: UploadFileService,
private cdRef: ChangeDetectorRef,
private snackBar: MatSnackBar,
) {
if (ngControl) {
ngControl.valueAccessor = this;
}
}
/** Método encargado de inicializar el componente */
ngOnInit() {
if (this._controlName) {
this.control = this._controlName.control;
}
this.clearInputHiden();
this.activeRequired(this.control);
this.acceptValid(this.control);
this.addTotalFiles(this.control);
this.addSizeFile(this.control);
this.addErrors();
this.setMensajeErrorTipoArchivo();
}
/** Método que permite saber cual es el archivo seleccionado */
selectFile() {
this.onTouch();
this.inputFile.nativeElement.click();
}
/**
* Método encargado de asignar la bandera de activo al contol
* indicado
*
* @param control Control al cual se le asignará la bandera de requerida
*/
activeRequired(control: FormControl) {
if (control.validator != undefined) {
const validator = control.validator({} as AbstractControl);
if (validator && validator.required) {
this.required = true;
}
}
}
/**
* Método que permite validar el tipo de archivo con los permitidos
*
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
acceptValid(control: FormControl) {
if (this.accept) {
let validate = this.setValidateFile(control.validator, InputFileAcceptsValidator(this.accept));
control.setValidators(validate);
}
}
/**
* Método encargado de adicionar los errores identificados
* en el validator a la sección de errores del campo del formulario | addErrors() {
if (this.errors.length > 0) {
this.errors.map(item => {
this.basicErrors.push(item);
});
}
}
validateShowAttachFile(): Boolean {
if (this.control.disabled) {
return false;
}
if (this.maxUpload === 0 ) {
return true;
}
if ( this.files && ( this.maxUpload > this.files.length) ) {
return true;
} else {
return false;
}
}
/**
* Método encargado de adicionar el total de archivos al componente indicado
*
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
addTotalFiles(control: FormControl) {
let total = this.files ? this.files.length : 0;
if (this.maxUpload > 0) {
let validate = this.setValidateFile(control.validator, InputFileMaxValidator(this.maxUpload, total));
control.setValidators(validate);
}
if (this.minUpload > 0) {
if (this.action === 'edit') {
total = 0;
}
let validate = this.setValidateFile(control.validator, InputFileMinValidator(this.minUpload, total));
control.setValidators(validate);
}
}
resetForm() {
this.action = 'edit';
this.ngOnInit();
}
/**
* Método encargado de adicionar el tamaño de archivo al control indicado
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
addSizeFile(control) {
let validate = this.setValidateFile(control.validator, InputFileSize(this.sizeFile));
control.setValidators(validate);
}
/**
* Método encargado de realizar la validación del archivo cargado por el usuario
* @param existeValidate Indica si el archivo ya existe o es nuevo
* @param validate Indica si el archivo es de tipo válido
*/
setValidateFile(existeValidate, validate) {
if (existeValidate) {
return [existeValidate, validate];
} else {
return [validate];
}
}
/**
* Método que permite saber si el control al cual se le asigna el archivo
* es válido o no
*/
validControl(): boolean {
if (this.control.disabled) {
return false;
}
if (this.control.errors) {
if (Object.keys(this.control.errors).length > 0) {
return false;
}
}
if (!this.autoUpdate) {
return false;
}
return true;
}
/**
* Método encargado de adicionar los archivos cargados por el cliente al
* listado de archivos del modelo
*
* @param event Evento con los archivos cargados por el usuario
*/
agregarFiles(event) {
this.onTouch();
this.status = 'adding';
this.filesProcessed = 0;
this.filesToProcess = event.target.files.length;
this.control.setValue(event.target.files);
if (!this.validControl() && this.minUpload === 0) {
return;
}
this.files = [];
for (let file in event.target.files) {
if (typeof event.target.files[file] == 'object') {
this.files.push(event.target.files[file]);
this.upload(file, this.files[file]);
}
}
}
setFiles(files: any) {
this.files = files;
this.response();
this.status = 'rewrite';
this.detectChange();
this.clearInputHiden();
}
resetFormConditions() {
this.onTouch();
this.response();
this.status = 'reseting';
this.detectChange();
this.action = 'edit';
this.ngOnInit();
}
/**
* Método que permite la asignación de los errores de tipos de archivo
* a la sección de errores del formulario
*/
setMensajeErrorTipoArchivo() {
this.errorTipoArchivos = this.constants.typeFiles;
if (this.accept) {
let formatos = [] ;
const tipos = this.accept.split(',');
if (tipos) {
tipos.forEach(item => {
item = item.trim();
const index = this.constants.formatoArchivos.findIndex(formato => formato.mimeType.toLowerCase() === item.toLowerCase());
if (index > -1) {
if (formatos.findIndex(f => f === this.constants.formatoArchivos[index].nombreTipoArchivo) === -1) {
formatos.push(this.constants.formatoArchivos[index].nombreTipoArchivo);
}
}
});
}
if ( formatos.length > 0) {
let posicion = 0;
formatos.forEach(formato => {
posicion++;
if (formatos.length === posicion) {
this.errorTipoArchivos += formato + '. ';
} else {
this.errorTipoArchivos += formato + ', ';
}
});
}
}
}
/**
* Método encargado de actualizar el modelo de archivos con los archivos
* ingresados por el usuario
*
* @param key LLave del archivo modificado
* @param file Archivo modificado
*/
upload(key, file: File): void {
this.files[key].success = true;
this.uploadFileService.uploadFile(file).subscribe(event => {
if (event.type === HttpEventType.UploadProgress) {
this.status = 'loading';
this.files[key].progress = Math.round((event.loaded / event.total) * 100);
this.detectChange();
} else if (event.type === HttpEventType.Response) {
this.files[key] = event.body;
this.files[key].success = true;
this.filesProcessed ++;
if (this.filesProcessed === this.filesToProcess) {
this.response();
this.status = 'completed';
this.detectChange();
}
}
}, error => {
this.files[key].success = false;
this.snackBar.open(this.constants.errorSubirArchivo, 'X', {
duration: 5000,
panelClass: ['error-snackbar']
});
this.status = 'error';
this.detectChange();
});
}
/**
* Método que permite detectar los cambios del modelo de
* archivos enviados por el uuario
*/
detectChange() {
try {
this.cdRef.detectChanges();
} catch (error) { }
}
/** Gestión de la petición realizada por el usuario */
response() {
if (!this.control.disabled) {
this.onTouch();
if (this.multiple) {
setTimeout(() => {
this.onChange(this.files);
}, 100);
} else {
this.onChange(this.files[0]);
}
}
}
/**
* Método encargado de presentar en el formulario el nombre del archivo cargado
*
* @param file Archivo del cual se va a presentar la información
*/
showNameFile(file): string {
if (!file) {
return '';
}
if (file.name) {
return file.name;
}
if (file.nombre) {
return file.nombre;
}
}
/** Método encargado de eliminar el ultimo archivo cargado por el usuario */
eliminar() {
this.files = [];
this.control.setValue('');
this.clearInputHiden();
this.control.updateValueAndValidity();
this.onTouch();
this.ngOnInit();
}
/**
* Método encargado de eliminar el indice del archivo de la colección
* de archivos cargados
* @param key Llave del archivo a eliminar
*/
eliminarElemento(key) {
this.onTouch();
this.files.splice(key, 1);
this.response();
this.status = 'deleted';
this.detectChange();
this.clearInputHiden();
}
/** Método encargado de limpiar el archivo cargado por el usuario del modelo */
clearInputHiden() {
this.inputFile.nativeElement.value = '';
}
/**
* Método encargado de obtener el valor de archivo del componente
* que fué adjuntado
*/
getValue() {
return this.constants.adjuntarFile;
}
/**
* Método encargado de establecer el valor digitado por el usuario
* a la variable del modelo del componente
*
* @param value valor digitado por el usuario en el campo del formulario
**/
writeValue(value: any) {
if (typeof value == 'undefined' || value == null || !value) {
this.files = [];
} else if (Array.isArray(value)) {
this.files = value;
} else if (typeof value == 'object' && value) {
if (value['length'] > 0) {
this.files = [value[0]];
} else {
this.files = [value];
}
} else {
this.files = value ? value : [];
}
}
/**
* Método encargado de registar la funcion ingresada al onchange
* del componente
*
* @param fn Funcion con la que se definirá la acción onchange
* del control del formulario
**/
registerOnChange(fn){
this.onChange = fn;
}
/**
* Método encargado de registar la funcion ingresada al ontouched
* del componente
*
* @param fn Funcion con la que se definirá la acción ontouched
* del control del formulario
**/
registerOnTouched(fn) {
this.onTouch = fn;
}
/** Método encargado de establecer el estado de deshabilitado del
* campo del formulario en el componente
*
* @param isDisabled Valor que indica si el campo se encuentra en estado
* dehabilitado
**/
setDisabledState(isDisabled: boolean): void { }
} | */ | random_line_split |
sigma-form-upload-file.component.ts | import { Component, Input, OnInit, ChangeDetectionStrategy, Optional, Self, ViewChild, ViewEncapsulation, ChangeDetectorRef, ElementRef } from '@angular/core';
import { ControlValueAccessor, FormControl, NgControl, FormControlName, AbstractControl } from '@angular/forms';
import { CONST_SHARED } from '../../constantes-shared';
import { MatFormFieldControl, MatInput, MatSnackBar } from '@angular/material';
import { InputFileMaxValidator, InputFileMinValidator, InputFileAcceptsValidator, InputFileSize } from '../../form/input.file';
import { UploadFileService } from '../../services/upload.file.service';
import { HttpEventType } from '@angular/common/http';
import { WebcamImage } from 'ngx-webcam';
/**
* Componente usado para estandarizar el campo de carga de archivos
* en todos los formularios del sistema
*/
@Component({
// tslint:disable-next-line: component-selector
selector: 'sigma-form-upload-file',
templateUrl: './sigma-form-upload-file.component.html',
styleUrls: ['./sigma-form-upload-file.component.scss'],
providers: [
{
provide: MatFormFieldControl,
useExisting: SigmaFormUploadFileComponent
}
],
changeDetection: ChangeDetectionStrategy.OnPush,
encapsulation: ViewEncapsulation.None
})
export class SigmaFormUploadFileComponent implements OnInit, ControlValueAccessor {
/** Constantes a usar en el componente */
constants = CONST_SHARED;
/** Archivos seleccionados por el usuario */
files = [];
/** Control de formulario al que se asocia el campo */
control: FormControl = new FormControl();
/** Bandera de control para saber si el campo es requerido en el formulario o no */
required = false;
/** Función encargada de la validación de elementos del campo del formulario */
validateFn: Function;
/** Mensaje que indica el error de tipo de archivo */
errorTipoArchivos = '';
/** Listado de errores personalizados que puede presentar el componente */
basicErrors = [
{ name: 'required', message: this.constants.campoRequerido },
];
/** Objeto con valores a procesar actualizados por el usuario */
object = Object;
/** Listado de errores personalizados permitidos por el componente */
@Input('errors') errors: [] = [];
/** Propiedad Placeholder asociado al campo del formulario */
@Input('placeholder') placeholder: string = '';
/** Cadena de texto con los tipos de archivos aceptados por el componente */
@Input('accept') accept: string = '*';
/** Bandera que indica si el componente admite multiples archivos */
@Input('multiple') multiple: boolean = false;
/** Cantidad maxima de archivos a cargar */
@Input('maxUpload') maxUpload: number = 0;
/** Cantidad mínima de archivos a cargar */
@Input('minUpload') minUpload: number = 0;
/** Bandera que permite identificar si se actualiza la vista del archivo al realizar el cargue */
@Input('autoUpdate') autoUpdate: boolean = true;
/** Bandera que indica si se presentará al usuario la vista previa del archivo */
@Input('showFile') showFile: boolean = false;
/** Tamaño máximo permitido para el cargue del archivo en MBs */
@Input('sizeFile') sizeFile: number = 10;
@Input('action') action: string = 'create';
status = '';
public filesProcessed = 0;
public filesToProcess = 0;
/** Entrada de tipo de componente que define el campo en el formulario */
@ViewChild('input') input: MatInput;
/** Entrada de tipo de componente que define el campo archivo en el formulario */
@ViewChild('inputFile') inputFile: ElementRef;
/** Definición del método que es llamado al momento de cambiar el dato del
* campo del formulario */
onChange = (_: any) => { }
/** Definición del método que es llamado al momento de realizar acción sobre el
* campo del formulario */
onTouch = () => { }
/**
* Método encargado de construir una instancia de componente
*
* @param ngControl Control de tipo de ng del componente del formulario
* @param _controlName Nombre del Control a usar en el formulario
* @param uploadFileService Servicio de carga de archivos
* @param cdRef Referencia a componente de observable para saber si ha cambiado el valor del componente
* @param snackBar Componente usado para abrir un recuadro modal
*/
constructor(
@Optional() @Self() ngControl: NgControl,
@Optional() private _controlName: FormControlName,
private uploadFileService: UploadFileService,
private cdRef: ChangeDetectorRef,
private snackBar: MatSnackBar,
) {
if (ngControl) {
ngControl.valueAccessor = this;
}
}
/** Método encargado de inicializar el componente */
ngOnInit() {
if (this._controlName) {
this.control = this._controlName.control;
}
this.clearInputHiden();
this.activeRequired(this.control);
this.acceptValid(this.control);
this.addTotalFiles(this.control);
this.addSizeFile(this.control);
this.addErrors();
this.setMensajeErrorTipoArchivo();
}
/** Método que permite saber cual es el archivo seleccionado */
selectFile() {
this.onTouch();
this.inputFile.nativeElement.click();
}
/**
* Método encargado de asignar la bandera de activo al contol
* indicado
*
* @param control Control al cual se le asignará la bandera de requerida
*/
activeRequired(control: FormControl) {
if (control.validator != undefined) {
const validator = control.validator({} as AbstractControl);
if (validator && validator.required) {
this.required = true;
}
}
}
/**
* Método que permite validar el tipo de archivo con los permitidos
*
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
acceptValid(control: FormControl) {
if (this.accept) {
let validate = this.setValidateFile(control.validator, InputFileAcceptsValidator(this.accept));
control.setValidators(validate);
}
}
/**
* Método encargado de adicionar los errores identificados
* en el validator a la sección de errores del campo del formulario
*/
addErrors() {
if (this.errors.length > 0) {
this.errors.map(item => {
this.basicErrors.push(item);
});
}
}
validateShowAttachFile(): Boolean {
if (this.control.disabled) {
return false;
}
if (this.maxUpload === 0 ) {
return true;
}
if ( this.files && ( this.maxUpload > this.files.length) ) {
return true;
} else {
return false;
}
}
/**
* Método encargado de adicionar el total de archivos al componente indicado
*
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
addTotalFiles(control: FormControl) {
let total = this.files ? this.files.length : 0;
if (this.maxUpload > 0) {
let validate = this.setValidateFile(control.validator, InputFileMaxValidator(this.maxUpload, total));
control.setValidators(validate);
}
if (this.minUpload > 0) {
if (this.action === 'edit') {
total = 0;
}
let validate = this.setValidateFile(control.validator, InputFileMinValidator(this.minUpload, total));
control.setValidators(validate);
}
}
resetForm() {
this.action = 'edit';
this.ngOnInit();
}
/**
* Método encargado de adicionar el tamaño de archivo al control indicado
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
addSizeFile(control) {
let validate = this.setValidateFile(control.validator, InputFileSize(this.sizeFile));
control.setValidators(validate);
}
/**
* Método encargado de realizar la validación del archivo cargado por el usuario
* @param existeValidate Indica si el archivo ya existe o es nuevo
* @param validate Indica si el archivo es de tipo válido
*/
setValidateFile(existeValidate, validate) {
if (existeValidate) {
return [existeValidate, validate];
} else {
return [validate];
}
}
/**
* Método que permite saber si el control al cual se le asigna el archivo
* es válido o no
*/
validControl(): boolean {
if | l.disabled) {
return false;
}
if (this.control.errors) {
if (Object.keys(this.control.errors).length > 0) {
return false;
}
}
if (!this.autoUpdate) {
return false;
}
return true;
}
/**
* Método encargado de adicionar los archivos cargados por el cliente al
* listado de archivos del modelo
*
* @param event Evento con los archivos cargados por el usuario
*/
agregarFiles(event) {
this.onTouch();
this.status = 'adding';
this.filesProcessed = 0;
this.filesToProcess = event.target.files.length;
this.control.setValue(event.target.files);
if (!this.validControl() && this.minUpload === 0) {
return;
}
this.files = [];
for (let file in event.target.files) {
if (typeof event.target.files[file] == 'object') {
this.files.push(event.target.files[file]);
this.upload(file, this.files[file]);
}
}
}
setFiles(files: any) {
this.files = files;
this.response();
this.status = 'rewrite';
this.detectChange();
this.clearInputHiden();
}
resetFormConditions() {
this.onTouch();
this.response();
this.status = 'reseting';
this.detectChange();
this.action = 'edit';
this.ngOnInit();
}
/**
* Método que permite la asignación de los errores de tipos de archivo
* a la sección de errores del formulario
*/
setMensajeErrorTipoArchivo() {
this.errorTipoArchivos = this.constants.typeFiles;
if (this.accept) {
let formatos = [] ;
const tipos = this.accept.split(',');
if (tipos) {
tipos.forEach(item => {
item = item.trim();
const index = this.constants.formatoArchivos.findIndex(formato => formato.mimeType.toLowerCase() === item.toLowerCase());
if (index > -1) {
if (formatos.findIndex(f => f === this.constants.formatoArchivos[index].nombreTipoArchivo) === -1) {
formatos.push(this.constants.formatoArchivos[index].nombreTipoArchivo);
}
}
});
}
if ( formatos.length > 0) {
let posicion = 0;
formatos.forEach(formato => {
posicion++;
if (formatos.length === posicion) {
this.errorTipoArchivos += formato + '. ';
} else {
this.errorTipoArchivos += formato + ', ';
}
});
}
}
}
/**
* Método encargado de actualizar el modelo de archivos con los archivos
* ingresados por el usuario
*
* @param key LLave del archivo modificado
* @param file Archivo modificado
*/
upload(key, file: File): void {
this.files[key].success = true;
this.uploadFileService.uploadFile(file).subscribe(event => {
if (event.type === HttpEventType.UploadProgress) {
this.status = 'loading';
this.files[key].progress = Math.round((event.loaded / event.total) * 100);
this.detectChange();
} else if (event.type === HttpEventType.Response) {
this.files[key] = event.body;
this.files[key].success = true;
this.filesProcessed ++;
if (this.filesProcessed === this.filesToProcess) {
this.response();
this.status = 'completed';
this.detectChange();
}
}
}, error => {
this.files[key].success = false;
this.snackBar.open(this.constants.errorSubirArchivo, 'X', {
duration: 5000,
panelClass: ['error-snackbar']
});
this.status = 'error';
this.detectChange();
});
}
/**
* Método que permite detectar los cambios del modelo de
* archivos enviados por el uuario
*/
detectChange() {
try {
this.cdRef.detectChanges();
} catch (error) { }
}
/** Gestión de la petición realizada por el usuario */
response() {
if (!this.control.disabled) {
this.onTouch();
if (this.multiple) {
setTimeout(() => {
this.onChange(this.files);
}, 100);
} else {
this.onChange(this.files[0]);
}
}
}
/**
* Método encargado de presentar en el formulario el nombre del archivo cargado
*
* @param file Archivo del cual se va a presentar la información
*/
showNameFile(file): string {
if (!file) {
return '';
}
if (file.name) {
return file.name;
}
if (file.nombre) {
return file.nombre;
}
}
/** Método encargado de eliminar el ultimo archivo cargado por el usuario */
eliminar() {
this.files = [];
this.control.setValue('');
this.clearInputHiden();
this.control.updateValueAndValidity();
this.onTouch();
this.ngOnInit();
}
/**
* Método encargado de eliminar el indice del archivo de la colección
* de archivos cargados
* @param key Llave del archivo a eliminar
*/
eliminarElemento(key) {
this.onTouch();
this.files.splice(key, 1);
this.response();
this.status = 'deleted';
this.detectChange();
this.clearInputHiden();
}
/** Método encargado de limpiar el archivo cargado por el usuario del modelo */
clearInputHiden() {
this.inputFile.nativeElement.value = '';
}
/**
* Método encargado de obtener el valor de archivo del componente
* que fué adjuntado
*/
getValue() {
return this.constants.adjuntarFile;
}
/**
* Método encargado de establecer el valor digitado por el usuario
* a la variable del modelo del componente
*
* @param value valor digitado por el usuario en el campo del formulario
**/
writeValue(value: any) {
if (typeof value == 'undefined' || value == null || !value) {
this.files = [];
} else if (Array.isArray(value)) {
this.files = value;
} else if (typeof value == 'object' && value) {
if (value['length'] > 0) {
this.files = [value[0]];
} else {
this.files = [value];
}
} else {
this.files = value ? value : [];
}
}
/**
* Método encargado de registar la funcion ingresada al onchange
* del componente
*
* @param fn Funcion con la que se definirá la acción onchange
* del control del formulario
**/
registerOnChange(fn){
this.onChange = fn;
}
/**
* Método encargado de registar la funcion ingresada al ontouched
* del componente
*
* @param fn Funcion con la que se definirá la acción ontouched
* del control del formulario
**/
registerOnTouched(fn) {
this.onTouch = fn;
}
/** Método encargado de establecer el estado de deshabilitado del
* campo del formulario en el componente
*
* @param isDisabled Valor que indica si el campo se encuentra en estado
* dehabilitado
**/
setDisabledState(isDisabled: boolean): void { }
}
| (this.contro | identifier_name |
sigma-form-upload-file.component.ts | import { Component, Input, OnInit, ChangeDetectionStrategy, Optional, Self, ViewChild, ViewEncapsulation, ChangeDetectorRef, ElementRef } from '@angular/core';
import { ControlValueAccessor, FormControl, NgControl, FormControlName, AbstractControl } from '@angular/forms';
import { CONST_SHARED } from '../../constantes-shared';
import { MatFormFieldControl, MatInput, MatSnackBar } from '@angular/material';
import { InputFileMaxValidator, InputFileMinValidator, InputFileAcceptsValidator, InputFileSize } from '../../form/input.file';
import { UploadFileService } from '../../services/upload.file.service';
import { HttpEventType } from '@angular/common/http';
import { WebcamImage } from 'ngx-webcam';
/**
* Componente usado para estandarizar el campo de carga de archivos
* en todos los formularios del sistema
*/
@Component({
// tslint:disable-next-line: component-selector
selector: 'sigma-form-upload-file',
templateUrl: './sigma-form-upload-file.component.html',
styleUrls: ['./sigma-form-upload-file.component.scss'],
providers: [
{
provide: MatFormFieldControl,
useExisting: SigmaFormUploadFileComponent
}
],
changeDetection: ChangeDetectionStrategy.OnPush,
encapsulation: ViewEncapsulation.None
})
export class SigmaFormUploadFileComponent implements OnInit, ControlValueAccessor {
/** Constantes a usar en el componente */
constants = CONST_SHARED;
/** Archivos seleccionados por el usuario */
files = [];
/** Control de formulario al que se asocia el campo */
control: FormControl = new FormControl();
/** Bandera de control para saber si el campo es requerido en el formulario o no */
required = false;
/** Función encargada de la validación de elementos del campo del formulario */
validateFn: Function;
/** Mensaje que indica el error de tipo de archivo */
errorTipoArchivos = '';
/** Listado de errores personalizados que puede presentar el componente */
basicErrors = [
{ name: 'required', message: this.constants.campoRequerido },
];
/** Objeto con valores a procesar actualizados por el usuario */
object = Object;
/** Listado de errores personalizados permitidos por el componente */
@Input('errors') errors: [] = [];
/** Propiedad Placeholder asociado al campo del formulario */
@Input('placeholder') placeholder: string = '';
/** Cadena de texto con los tipos de archivos aceptados por el componente */
@Input('accept') accept: string = '*';
/** Bandera que indica si el componente admite multiples archivos */
@Input('multiple') multiple: boolean = false;
/** Cantidad maxima de archivos a cargar */
@Input('maxUpload') maxUpload: number = 0;
/** Cantidad mínima de archivos a cargar */
@Input('minUpload') minUpload: number = 0;
/** Bandera que permite identificar si se actualiza la vista del archivo al realizar el cargue */
@Input('autoUpdate') autoUpdate: boolean = true;
/** Bandera que indica si se presentará al usuario la vista previa del archivo */
@Input('showFile') showFile: boolean = false;
/** Tamaño máximo permitido para el cargue del archivo en MBs */
@Input('sizeFile') sizeFile: number = 10;
@Input('action') action: string = 'create';
status = '';
public filesProcessed = 0;
public filesToProcess = 0;
/** Entrada de tipo de componente que define el campo en el formulario */
@ViewChild('input') input: MatInput;
/** Entrada de tipo de componente que define el campo archivo en el formulario */
@ViewChild('inputFile') inputFile: ElementRef;
/** Definición del método que es llamado al momento de cambiar el dato del
* campo del formulario */
onChange = (_: any) => { }
/** Definición del método que es llamado al momento de realizar acción sobre el
* campo del formulario */
onTouch = () => { }
/**
* Método encargado de construir una instancia de componente
*
* @param ngControl Control de tipo de ng del componente del formulario
* @param _controlName Nombre del Control a usar en el formulario
* @param uploadFileService Servicio de carga de archivos
* @param cdRef Referencia a componente de observable para saber si ha cambiado el valor del componente
* @param snackBar Componente usado para abrir un recuadro modal
*/
constructor(
@Optional() @Self() ngControl: NgControl,
@Optional() private _controlName: FormControlName,
private uploadFileService: UploadFileService,
private cdRef: ChangeDetectorRef,
private snackBar: MatSnackBar,
) {
if (ngControl) {
ngControl.valueAccessor = this;
}
}
/** Método encargado de inicializar el componente */
ngOnInit() {
if (this._controlName) {
this.control = this._controlName.control;
}
this.clearInputHiden();
this.activeRequired(this.control);
this.acceptValid(this.control);
this.addTotalFiles(this.control);
this.addSizeFile(this.control);
this.addErrors();
this.setMensajeErrorTipoArchivo();
}
/** Método que permite saber cual es el archivo seleccionado */
selectFile() {
this.onTouch();
this.inputFile.nativeElement.click();
}
/**
* Método encargado de asignar la bandera de activo al contol
* indicado
*
* @param control Control al cual se le asignará la bandera de requerida
*/
activeRequired(control: FormControl) {
if (control.validator != undefined) {
const validator = control.validator({} as AbstractControl);
if (validator && validator.required) {
this.required = true;
}
}
}
/**
* Método que permite validar el tipo de archivo con los permitidos
*
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
acceptValid(control: FormControl) {
if (this.accept) {
let validate = this.setValidateFile(control.validator, InputFileAcceptsValidator(this.accept));
control.setValidators(validate);
}
}
/**
* Método encargado de adicionar los errores identificados
* en el validator a la sección de errores del campo del formulario
*/
addErrors() {
if (this.errors.length > 0) {
this.errors.map(item => {
this.basicErrors.push(item);
});
}
}
validateShowAttachFile(): Boolean {
if (this.control.disabled) {
return false;
}
if (this.maxUpload === 0 ) {
return true;
}
if ( this.files && ( this.maxUpload > this.files.length) ) {
return true;
} else {
return false;
}
}
/**
* Método encargado de adicionar el total de archivos al componente indicado
*
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
addTotalFiles(control: FormControl) {
let total = this.files ? this.files.length : 0;
if (this.maxUpload > 0) {
let validate = this.setValidateFile(control.validator, InputFileMaxValidator(this.maxUpload, total));
control.setValidators(validate);
}
if (this.minUpload > 0) {
if (this.action === 'edit') {
total = 0;
}
let validate = this.setValidateFile(control.validator, InputFileMinValidator(this.minUpload, total));
control.setValidators(validate);
}
}
resetForm() {
this.action = 'edit';
this.ngOnInit();
}
/**
* Método encargado de adicionar el tamaño de archivo al control indicado
* @param control Control de formulario al cual se le asociará el mensaje de falla o éxito
*/
addSizeFile(control) {
let validate = this.setValidateFile(control.validator, InputFileSize(this.sizeFile));
control.setValidators(validate);
}
/**
* Método encargado de realizar la validación del archivo cargado por el usuario
* @param existeValidate Indica si el archivo ya existe o es nuevo
* @param validate Indica si el archivo es de tipo válido
*/
setValidateFile(existeValidate, validate) {
if (existeValidate) {
return [existeValidate, validate];
} else {
return [validate];
}
}
/**
* Método que permite saber si el control al cual se le asigna el archivo
* es válido o no
*/
validControl(): boolean {
if (this.control.disabled) {
return false;
}
if (this.control.errors) {
if (Object.keys(this.control.errors).length > 0) {
return false;
}
}
if (!this.autoUpdate) {
return false;
}
return true;
}
/**
* Método encargado de adicionar los archivos cargados por el cliente al
* listado de archivos del modelo
*
* @param event Evento con los archivos cargados por el usuario
*/
agregarFiles(event) {
this.onTouch();
this.status = 'adding';
this.filesProcessed = 0;
this.filesToProcess = event.target.files.length;
this.control.setValue(event.target.files);
if (!this.validControl() && this.minUpload === 0) {
return;
}
this.files = [];
for (let file in event.target.files) {
if (typeof event.target.files[file] == 'object') {
this.files.push(event.target.files[file]);
this.upload(file, this.files[file]);
}
}
}
setFiles(files: any) {
this.files = files;
this | is.onTouch();
this.response();
this.status = 'reseting';
this.detectChange();
this.action = 'edit';
this.ngOnInit();
}
/**
* Método que permite la asignación de los errores de tipos de archivo
* a la sección de errores del formulario
*/
setMensajeErrorTipoArchivo() {
this.errorTipoArchivos = this.constants.typeFiles;
if (this.accept) {
let formatos = [] ;
const tipos = this.accept.split(',');
if (tipos) {
tipos.forEach(item => {
item = item.trim();
const index = this.constants.formatoArchivos.findIndex(formato => formato.mimeType.toLowerCase() === item.toLowerCase());
if (index > -1) {
if (formatos.findIndex(f => f === this.constants.formatoArchivos[index].nombreTipoArchivo) === -1) {
formatos.push(this.constants.formatoArchivos[index].nombreTipoArchivo);
}
}
});
}
if ( formatos.length > 0) {
let posicion = 0;
formatos.forEach(formato => {
posicion++;
if (formatos.length === posicion) {
this.errorTipoArchivos += formato + '. ';
} else {
this.errorTipoArchivos += formato + ', ';
}
});
}
}
}
/**
* Método encargado de actualizar el modelo de archivos con los archivos
* ingresados por el usuario
*
* @param key LLave del archivo modificado
* @param file Archivo modificado
*/
upload(key, file: File): void {
this.files[key].success = true;
this.uploadFileService.uploadFile(file).subscribe(event => {
if (event.type === HttpEventType.UploadProgress) {
this.status = 'loading';
this.files[key].progress = Math.round((event.loaded / event.total) * 100);
this.detectChange();
} else if (event.type === HttpEventType.Response) {
this.files[key] = event.body;
this.files[key].success = true;
this.filesProcessed ++;
if (this.filesProcessed === this.filesToProcess) {
this.response();
this.status = 'completed';
this.detectChange();
}
}
}, error => {
this.files[key].success = false;
this.snackBar.open(this.constants.errorSubirArchivo, 'X', {
duration: 5000,
panelClass: ['error-snackbar']
});
this.status = 'error';
this.detectChange();
});
}
/**
* Método que permite detectar los cambios del modelo de
* archivos enviados por el uuario
*/
detectChange() {
try {
this.cdRef.detectChanges();
} catch (error) { }
}
/** Gestión de la petición realizada por el usuario */
response() {
if (!this.control.disabled) {
this.onTouch();
if (this.multiple) {
setTimeout(() => {
this.onChange(this.files);
}, 100);
} else {
this.onChange(this.files[0]);
}
}
}
/**
* Método encargado de presentar en el formulario el nombre del archivo cargado
*
* @param file Archivo del cual se va a presentar la información
*/
showNameFile(file): string {
if (!file) {
return '';
}
if (file.name) {
return file.name;
}
if (file.nombre) {
return file.nombre;
}
}
/** Método encargado de eliminar el ultimo archivo cargado por el usuario */
eliminar() {
this.files = [];
this.control.setValue('');
this.clearInputHiden();
this.control.updateValueAndValidity();
this.onTouch();
this.ngOnInit();
}
/**
* Método encargado de eliminar el indice del archivo de la colección
* de archivos cargados
* @param key Llave del archivo a eliminar
*/
eliminarElemento(key) {
this.onTouch();
this.files.splice(key, 1);
this.response();
this.status = 'deleted';
this.detectChange();
this.clearInputHiden();
}
/** Método encargado de limpiar el archivo cargado por el usuario del modelo */
clearInputHiden() {
this.inputFile.nativeElement.value = '';
}
/**
* Método encargado de obtener el valor de archivo del componente
* que fué adjuntado
*/
getValue() {
return this.constants.adjuntarFile;
}
/**
* Método encargado de establecer el valor digitado por el usuario
* a la variable del modelo del componente
*
* @param value valor digitado por el usuario en el campo del formulario
**/
writeValue(value: any) {
if (typeof value == 'undefined' || value == null || !value) {
this.files = [];
} else if (Array.isArray(value)) {
this.files = value;
} else if (typeof value == 'object' && value) {
if (value['length'] > 0) {
this.files = [value[0]];
} else {
this.files = [value];
}
} else {
this.files = value ? value : [];
}
}
/**
* Método encargado de registar la funcion ingresada al onchange
* del componente
*
* @param fn Funcion con la que se definirá la acción onchange
* del control del formulario
**/
registerOnChange(fn){
this.onChange = fn;
}
/**
* Método encargado de registar la funcion ingresada al ontouched
* del componente
*
* @param fn Funcion con la que se definirá la acción ontouched
* del control del formulario
**/
registerOnTouched(fn) {
this.onTouch = fn;
}
/** Método encargado de establecer el estado de deshabilitado del
* campo del formulario en el componente
*
* @param isDisabled Valor que indica si el campo se encuentra en estado
* dehabilitado
**/
setDisabledState(isDisabled: boolean): void { }
}
| .response();
this.status = 'rewrite';
this.detectChange();
this.clearInputHiden();
}
resetFormConditions() {
th | identifier_body |
types.rs | use javascriptcore_sys::*;
use std::convert::TryFrom; | macro_rules! retain_release {
($name:ident, $ffi_ref:ty, $retain_fn:tt, $drop_fn:tt) => {
impl Drop for $name {
fn drop(&mut self) {
unsafe { $drop_fn(self.0) };
}
}
impl Clone for $name {
fn clone(&self) -> $name {
let x = unsafe { $retain_fn(self.0) };
$name(x)
}
}
impl Deref for $name {
type Target = $ffi_ref;
fn deref(&self) -> &$ffi_ref {
&self.0
}
}
};
}
unsafe impl Send for GlobalContext {}
unsafe impl Sync for GlobalContext {}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
unsafe impl Send for String {}
unsafe impl Sync for String {}
unsafe impl Send for Object {}
unsafe impl Sync for Object {}
unsafe impl Send for ContextGroup {}
unsafe impl Sync for ContextGroup {}
unsafe impl Send for Value {}
unsafe impl Sync for Value {}
#[derive(Copy, Clone, Debug)]
pub struct Context(pub(crate) JSContextRef);
pub struct ContextGroup(pub(crate) JSContextGroupRef);
pub struct GlobalContext(pub(crate) JSGlobalContextRef);
pub struct Object(pub(crate) Context, pub(crate) JSObjectRef);
pub struct String(pub(crate) JSStringRef);
use std::fmt;
impl fmt::Debug for Object {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s = f.debug_struct("Object");
unsafe {
let array = JSObjectCopyPropertyNames(*self.0, self.1);
let size = JSPropertyNameArrayGetCount(array);
for i in 0..size {
let js_ref = JSPropertyNameArrayGetNameAtIndex(array, i);
let prop_name = std::string::String::from(&String(js_ref));
let prop_value = Value::from(
self.0,
JSObjectGetPropertyAtIndex(*self.0, self.1, i as u32, null_mut()),
);
s.field(&prop_name, &format!("{:?}", prop_value));
}
}
s.finish()
}
}
impl fmt::Debug for Exception {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Exception")
.field("stack", &self.stack())
.field("message", &self.message())
.finish()
}
}
impl fmt::Display for Exception {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Message: {}", &self.message())?;
writeln!(f, "Stack:")?;
write!(f, "{}", self.stack())
}
}
#[derive(Debug, Copy, Clone)]
pub enum ValueType {
Undefined,
Null,
Boolean,
Number,
String,
Object,
Symbol,
}
#[derive(Debug)]
pub struct Value(
pub(crate) JSValueRef,
pub(crate) ValueType,
pub(crate) Context,
);
pub trait ContextType {
unsafe fn as_ptr(&self) -> JSContextRef;
fn undefined(&self) -> Value {
let ptr = unsafe { self.as_ptr() };
let value = unsafe { JSValueMakeUndefined(ptr) };
Value(value, ValueType::Undefined, Context(ptr))
}
}
impl ContextType for GlobalContext {
unsafe fn as_ptr(&self) -> JSContextRef {
self.0
}
}
impl ContextType for Context {
unsafe fn as_ptr(&self) -> JSContextRef {
self.0
}
}
impl Deref for Context {
type Target = JSContextRef;
fn deref(&self) -> &JSContextRef {
&self.0
}
}
impl Deref for Object {
type Target = JSObjectRef;
fn deref(&self) -> &JSObjectRef {
&self.1
}
}
retain_release!(
ContextGroup,
JSContextGroupRef,
JSContextGroupRetain,
JSContextGroupRelease
);
retain_release!(
GlobalContext,
JSGlobalContextRef,
JSGlobalContextRetain,
JSGlobalContextRelease
);
retain_release!(String, JSStringRef, JSStringRetain, JSStringRelease);
impl ContextGroup {
pub fn new() -> ContextGroup {
let ptr = unsafe { JSContextGroupCreate() };
ContextGroup(ptr)
}
pub fn create_global_context(&self) -> GlobalContext {
let ptr = unsafe { JSGlobalContextCreateInGroup(self.0, null_mut()) };
GlobalContext(ptr)
}
}
pub struct Exception(Object);
impl Exception {
pub fn stack(&self) -> std::string::String {
let stack_val = self
.0
.get_property(&String::new("stack").unwrap())
.expect("no `stack` property found");
let stack_str = String::try_from(&stack_val).expect("no string property found for `stack`");
std::string::String::from(&stack_str)
}
pub fn message(&self) -> std::string::String {
let message_val = self
.0
.get_property(&String::new("message").unwrap())
.expect("no `message` property found");
let message_str =
String::try_from(&message_val).expect("no string property found for `message`");
std::string::String::from(&message_str)
}
}
impl GlobalContext {
pub fn global_object(&self) -> Object {
let ptr = unsafe { JSContextGetGlobalObject(self.0) };
Object(Context(self.0), ptr)
}
pub fn evaluate_script_sync(&self, script: &String) -> Result<Value, Exception> {
let mut exception = null();
let ret = unsafe {
JSEvaluateScript(self.0, **script, null_mut(), null_mut(), 0, &mut exception)
};
if exception == null_mut() {
Ok(Value::from(Context(self.0), ret))
} else {
let value = Value::from(Context(self.0), exception);
let obj = Object::try_from(&value).unwrap();
Err(Exception(obj))
}
}
pub async fn evaluate_script<'a>(&'a self, script: &'a String) -> Result<Value, Exception> {
self.evaluate_script_sync(script)
}
pub fn add_function(
&self,
name: &str,
callback: JsCallback,
) -> Result<(), Box<dyn std::error::Error>> {
let name = String::new(name).unwrap();
let obj = self.global_object();
let fn_obj = obj.make_function_with_callback(&name, callback);
obj.set_property(&name, Value::from(Context(self.0), *fn_obj));
Ok(())
}
}
type JsCallback =
fn(Context, /*thisObject*/ Object, /*arguments*/ Vec<Value>) -> Result<Value, String>;
extern "C" fn callback_trampoline(
ctx: JSContextRef,
function: JSObjectRef,
this_object: JSObjectRef,
argument_count: usize,
arguments: *const JSValueRef,
exception: *mut JSValueRef,
) -> JSValueRef {
let callback = unsafe {
std::mem::transmute::<*mut ::std::os::raw::c_void, JsCallback>(JSObjectGetPrivate(function))
};
let ctx = Context(ctx);
let args = unsafe {
std::slice::from_raw_parts(arguments, argument_count)
.into_iter()
.map(|v| Value::from(ctx, *v))
.collect::<Vec<_>>()
};
match callback(ctx, Object(ctx, this_object), args) {
Ok(v) => v.0,
Err(e) => unsafe {
*exception = e.to_js_value(&ctx);
JSValueMakeUndefined(ctx.0)
},
}
}
impl ValueType {
unsafe fn from(ctx: Context, value_ref: JSValueRef) -> ValueType {
let raw_ty = JSValueGetType(ctx.0, value_ref);
match raw_ty {
0 => ValueType::Undefined,
1 => ValueType::Null,
2 => ValueType::Boolean,
3 => ValueType::Number,
4 => ValueType::String,
5 => ValueType::Object,
6 => ValueType::Symbol,
_ => unreachable!(),
}
}
}
impl Value {
fn from(ctx: Context, value_ref: JSValueRef) -> Value {
Value(value_ref, unsafe { ValueType::from(ctx, value_ref) }, ctx)
}
pub fn to_string(&self) -> std::string::String {
match self.js_type() {
ValueType::String => {
let js_str = String::try_from(self).expect("string");
std::string::String::from(&js_str)
}
ValueType::Number => {
let n = f64::try_from(self).expect("f64");
format!("{}", n)
}
ValueType::Boolean => {
let v = bool::try_from(self).expect("bool");
format!("{}", v)
}
ValueType::Null => "null".into(),
ValueType::Undefined => "undefined".into(),
ValueType::Symbol => "Symbol(...)".into(),
ValueType::Object => {
let obj = Object::try_from(self).expect("object");
format!("{:?}", obj)
}
}
}
}
fn rust_function_defn(name: &String) -> JSClassDefinition {
JSClassDefinition {
version: 0,
attributes: 0,
className: **name as *const _,
parentClass: null_mut(),
staticValues: null(),
staticFunctions: null(),
initialize: None,
finalize: None,
hasProperty: None,
getProperty: None,
setProperty: None,
deleteProperty: None,
getPropertyNames: None,
callAsFunction: Some(callback_trampoline),
callAsConstructor: None,
hasInstance: None,
convertToType: None,
}
}
impl Value {
pub fn js_type(&self) -> ValueType {
self.1
}
}
impl Object {
pub fn make_function_with_callback(&self, name: &String, callback: JsCallback) -> Object {
let cls = unsafe { JSClassCreate(&rust_function_defn(name)) };
let ptr = unsafe { JSObjectMake(*self.0, cls, callback as _) };
if unsafe { JSObjectGetPrivate(ptr) } == null_mut() {
panic!("No private");
}
Object(self.0, ptr)
}
pub fn set_property(&self, name: &String, value: Value) {
unsafe { JSObjectSetProperty(*self.0, self.1, **name, value.0, 0, null_mut()) };
}
pub fn get_property(&self, name: &String) -> Result<Value, Value> {
let mut exception = null();
let ret = unsafe { JSObjectGetProperty(*self.0, self.1, **name, &mut exception) };
if exception == null() {
Ok(Value::from(self.0, ret))
} else {
Err(Value::from(self.0, exception))
}
}
pub fn to_js_value(&self) -> Value {
Value(self.1, ValueType::Object, self.0)
}
}
impl String {
pub fn new(s: &str) -> Result<String, Box<dyn std::error::Error>> {
let s = CString::new(s)?;
let v = unsafe { JSStringCreateWithUTF8CString(s.as_ptr() as *const i8) };
Ok(String(v))
}
pub fn to_js_value(&self, ctx: &Context) -> JSValueRef {
unsafe { JSValueMakeString(**ctx, self.0) }
}
} | use std::ffi::CString;
use std::ops::Deref;
use std::ptr::{null, null_mut};
| random_line_split |
types.rs | use javascriptcore_sys::*;
use std::convert::TryFrom;
use std::ffi::CString;
use std::ops::Deref;
use std::ptr::{null, null_mut};
macro_rules! retain_release {
($name:ident, $ffi_ref:ty, $retain_fn:tt, $drop_fn:tt) => {
impl Drop for $name {
fn drop(&mut self) {
unsafe { $drop_fn(self.0) };
}
}
impl Clone for $name {
fn clone(&self) -> $name {
let x = unsafe { $retain_fn(self.0) };
$name(x)
}
}
impl Deref for $name {
type Target = $ffi_ref;
fn deref(&self) -> &$ffi_ref {
&self.0
}
}
};
}
unsafe impl Send for GlobalContext {}
unsafe impl Sync for GlobalContext {}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
unsafe impl Send for String {}
unsafe impl Sync for String {}
unsafe impl Send for Object {}
unsafe impl Sync for Object {}
unsafe impl Send for ContextGroup {}
unsafe impl Sync for ContextGroup {}
unsafe impl Send for Value {}
unsafe impl Sync for Value {}
#[derive(Copy, Clone, Debug)]
pub struct Context(pub(crate) JSContextRef);
pub struct ContextGroup(pub(crate) JSContextGroupRef);
pub struct GlobalContext(pub(crate) JSGlobalContextRef);
pub struct Object(pub(crate) Context, pub(crate) JSObjectRef);
pub struct String(pub(crate) JSStringRef);
use std::fmt;
impl fmt::Debug for Object {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl fmt::Debug for Exception {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Exception")
.field("stack", &self.stack())
.field("message", &self.message())
.finish()
}
}
impl fmt::Display for Exception {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Message: {}", &self.message())?;
writeln!(f, "Stack:")?;
write!(f, "{}", self.stack())
}
}
#[derive(Debug, Copy, Clone)]
pub enum ValueType {
Undefined,
Null,
Boolean,
Number,
String,
Object,
Symbol,
}
#[derive(Debug)]
pub struct Value(
pub(crate) JSValueRef,
pub(crate) ValueType,
pub(crate) Context,
);
pub trait ContextType {
unsafe fn as_ptr(&self) -> JSContextRef;
fn undefined(&self) -> Value {
let ptr = unsafe { self.as_ptr() };
let value = unsafe { JSValueMakeUndefined(ptr) };
Value(value, ValueType::Undefined, Context(ptr))
}
}
impl ContextType for GlobalContext {
unsafe fn as_ptr(&self) -> JSContextRef {
self.0
}
}
impl ContextType for Context {
unsafe fn as_ptr(&self) -> JSContextRef {
self.0
}
}
impl Deref for Context {
type Target = JSContextRef;
fn deref(&self) -> &JSContextRef {
&self.0
}
}
impl Deref for Object {
type Target = JSObjectRef;
fn deref(&self) -> &JSObjectRef {
&self.1
}
}
retain_release!(
ContextGroup,
JSContextGroupRef,
JSContextGroupRetain,
JSContextGroupRelease
);
retain_release!(
GlobalContext,
JSGlobalContextRef,
JSGlobalContextRetain,
JSGlobalContextRelease
);
retain_release!(String, JSStringRef, JSStringRetain, JSStringRelease);
impl ContextGroup {
pub fn new() -> ContextGroup {
let ptr = unsafe { JSContextGroupCreate() };
ContextGroup(ptr)
}
pub fn create_global_context(&self) -> GlobalContext {
let ptr = unsafe { JSGlobalContextCreateInGroup(self.0, null_mut()) };
GlobalContext(ptr)
}
}
pub struct Exception(Object);
impl Exception {
pub fn stack(&self) -> std::string::String {
let stack_val = self
.0
.get_property(&String::new("stack").unwrap())
.expect("no `stack` property found");
let stack_str = String::try_from(&stack_val).expect("no string property found for `stack`");
std::string::String::from(&stack_str)
}
pub fn message(&self) -> std::string::String {
let message_val = self
.0
.get_property(&String::new("message").unwrap())
.expect("no `message` property found");
let message_str =
String::try_from(&message_val).expect("no string property found for `message`");
std::string::String::from(&message_str)
}
}
impl GlobalContext {
pub fn global_object(&self) -> Object {
let ptr = unsafe { JSContextGetGlobalObject(self.0) };
Object(Context(self.0), ptr)
}
pub fn evaluate_script_sync(&self, script: &String) -> Result<Value, Exception> {
let mut exception = null();
let ret = unsafe {
JSEvaluateScript(self.0, **script, null_mut(), null_mut(), 0, &mut exception)
};
if exception == null_mut() {
Ok(Value::from(Context(self.0), ret))
} else {
let value = Value::from(Context(self.0), exception);
let obj = Object::try_from(&value).unwrap();
Err(Exception(obj))
}
}
pub async fn evaluate_script<'a>(&'a self, script: &'a String) -> Result<Value, Exception> {
self.evaluate_script_sync(script)
}
pub fn add_function(
&self,
name: &str,
callback: JsCallback,
) -> Result<(), Box<dyn std::error::Error>> {
let name = String::new(name).unwrap();
let obj = self.global_object();
let fn_obj = obj.make_function_with_callback(&name, callback);
obj.set_property(&name, Value::from(Context(self.0), *fn_obj));
Ok(())
}
}
type JsCallback =
fn(Context, /*thisObject*/ Object, /*arguments*/ Vec<Value>) -> Result<Value, String>;
extern "C" fn callback_trampoline(
ctx: JSContextRef,
function: JSObjectRef,
this_object: JSObjectRef,
argument_count: usize,
arguments: *const JSValueRef,
exception: *mut JSValueRef,
) -> JSValueRef {
let callback = unsafe {
std::mem::transmute::<*mut ::std::os::raw::c_void, JsCallback>(JSObjectGetPrivate(function))
};
let ctx = Context(ctx);
let args = unsafe {
std::slice::from_raw_parts(arguments, argument_count)
.into_iter()
.map(|v| Value::from(ctx, *v))
.collect::<Vec<_>>()
};
match callback(ctx, Object(ctx, this_object), args) {
Ok(v) => v.0,
Err(e) => unsafe {
*exception = e.to_js_value(&ctx);
JSValueMakeUndefined(ctx.0)
},
}
}
impl ValueType {
unsafe fn from(ctx: Context, value_ref: JSValueRef) -> ValueType {
let raw_ty = JSValueGetType(ctx.0, value_ref);
match raw_ty {
0 => ValueType::Undefined,
1 => ValueType::Null,
2 => ValueType::Boolean,
3 => ValueType::Number,
4 => ValueType::String,
5 => ValueType::Object,
6 => ValueType::Symbol,
_ => unreachable!(),
}
}
}
impl Value {
fn from(ctx: Context, value_ref: JSValueRef) -> Value {
Value(value_ref, unsafe { ValueType::from(ctx, value_ref) }, ctx)
}
pub fn to_string(&self) -> std::string::String {
match self.js_type() {
ValueType::String => {
let js_str = String::try_from(self).expect("string");
std::string::String::from(&js_str)
}
ValueType::Number => {
let n = f64::try_from(self).expect("f64");
format!("{}", n)
}
ValueType::Boolean => {
let v = bool::try_from(self).expect("bool");
format!("{}", v)
}
ValueType::Null => "null".into(),
ValueType::Undefined => "undefined".into(),
ValueType::Symbol => "Symbol(...)".into(),
ValueType::Object => {
let obj = Object::try_from(self).expect("object");
format!("{:?}", obj)
}
}
}
}
fn rust_function_defn(name: &String) -> JSClassDefinition {
JSClassDefinition {
version: 0,
attributes: 0,
className: **name as *const _,
parentClass: null_mut(),
staticValues: null(),
staticFunctions: null(),
initialize: None,
finalize: None,
hasProperty: None,
getProperty: None,
setProperty: None,
deleteProperty: None,
getPropertyNames: None,
callAsFunction: Some(callback_trampoline),
callAsConstructor: None,
hasInstance: None,
convertToType: None,
}
}
impl Value {
pub fn js_type(&self) -> ValueType {
self.1
}
}
impl Object {
pub fn make_function_with_callback(&self, name: &String, callback: JsCallback) -> Object {
let cls = unsafe { JSClassCreate(&rust_function_defn(name)) };
let ptr = unsafe { JSObjectMake(*self.0, cls, callback as _) };
if unsafe { JSObjectGetPrivate(ptr) } == null_mut() {
panic!("No private");
}
Object(self.0, ptr)
}
pub fn set_property(&self, name: &String, value: Value) {
unsafe { JSObjectSetProperty(*self.0, self.1, **name, value.0, 0, null_mut()) };
}
pub fn get_property(&self, name: &String) -> Result<Value, Value> {
let mut exception = null();
let ret = unsafe { JSObjectGetProperty(*self.0, self.1, **name, &mut exception) };
if exception == null() {
Ok(Value::from(self.0, ret))
} else {
Err(Value::from(self.0, exception))
}
}
pub fn to_js_value(&self) -> Value {
Value(self.1, ValueType::Object, self.0)
}
}
impl String {
pub fn new(s: &str) -> Result<String, Box<dyn std::error::Error>> {
let s = CString::new(s)?;
let v = unsafe { JSStringCreateWithUTF8CString(s.as_ptr() as *const i8) };
Ok(String(v))
}
pub fn to_js_value(&self, ctx: &Context) -> JSValueRef {
unsafe { JSValueMakeString(**ctx, self.0) }
}
}
| {
let mut s = f.debug_struct("Object");
unsafe {
let array = JSObjectCopyPropertyNames(*self.0, self.1);
let size = JSPropertyNameArrayGetCount(array);
for i in 0..size {
let js_ref = JSPropertyNameArrayGetNameAtIndex(array, i);
let prop_name = std::string::String::from(&String(js_ref));
let prop_value = Value::from(
self.0,
JSObjectGetPropertyAtIndex(*self.0, self.1, i as u32, null_mut()),
);
s.field(&prop_name, &format!("{:?}", prop_value));
}
}
s.finish()
} | identifier_body |
types.rs | use javascriptcore_sys::*;
use std::convert::TryFrom;
use std::ffi::CString;
use std::ops::Deref;
use std::ptr::{null, null_mut};
macro_rules! retain_release {
($name:ident, $ffi_ref:ty, $retain_fn:tt, $drop_fn:tt) => {
impl Drop for $name {
fn drop(&mut self) {
unsafe { $drop_fn(self.0) };
}
}
impl Clone for $name {
fn clone(&self) -> $name {
let x = unsafe { $retain_fn(self.0) };
$name(x)
}
}
impl Deref for $name {
type Target = $ffi_ref;
fn deref(&self) -> &$ffi_ref {
&self.0
}
}
};
}
unsafe impl Send for GlobalContext {}
unsafe impl Sync for GlobalContext {}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
unsafe impl Send for String {}
unsafe impl Sync for String {}
unsafe impl Send for Object {}
unsafe impl Sync for Object {}
unsafe impl Send for ContextGroup {}
unsafe impl Sync for ContextGroup {}
unsafe impl Send for Value {}
unsafe impl Sync for Value {}
#[derive(Copy, Clone, Debug)]
pub struct Context(pub(crate) JSContextRef);
pub struct ContextGroup(pub(crate) JSContextGroupRef);
pub struct GlobalContext(pub(crate) JSGlobalContextRef);
pub struct Object(pub(crate) Context, pub(crate) JSObjectRef);
pub struct String(pub(crate) JSStringRef);
use std::fmt;
impl fmt::Debug for Object {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s = f.debug_struct("Object");
unsafe {
let array = JSObjectCopyPropertyNames(*self.0, self.1);
let size = JSPropertyNameArrayGetCount(array);
for i in 0..size {
let js_ref = JSPropertyNameArrayGetNameAtIndex(array, i);
let prop_name = std::string::String::from(&String(js_ref));
let prop_value = Value::from(
self.0,
JSObjectGetPropertyAtIndex(*self.0, self.1, i as u32, null_mut()),
);
s.field(&prop_name, &format!("{:?}", prop_value));
}
}
s.finish()
}
}
impl fmt::Debug for Exception {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Exception")
.field("stack", &self.stack())
.field("message", &self.message())
.finish()
}
}
impl fmt::Display for Exception {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Message: {}", &self.message())?;
writeln!(f, "Stack:")?;
write!(f, "{}", self.stack())
}
}
#[derive(Debug, Copy, Clone)]
pub enum ValueType {
Undefined,
Null,
Boolean,
Number,
String,
Object,
Symbol,
}
#[derive(Debug)]
pub struct Value(
pub(crate) JSValueRef,
pub(crate) ValueType,
pub(crate) Context,
);
pub trait ContextType {
unsafe fn as_ptr(&self) -> JSContextRef;
fn undefined(&self) -> Value {
let ptr = unsafe { self.as_ptr() };
let value = unsafe { JSValueMakeUndefined(ptr) };
Value(value, ValueType::Undefined, Context(ptr))
}
}
impl ContextType for GlobalContext {
unsafe fn as_ptr(&self) -> JSContextRef {
self.0
}
}
impl ContextType for Context {
unsafe fn as_ptr(&self) -> JSContextRef {
self.0
}
}
impl Deref for Context {
type Target = JSContextRef;
fn deref(&self) -> &JSContextRef {
&self.0
}
}
impl Deref for Object {
type Target = JSObjectRef;
fn deref(&self) -> &JSObjectRef {
&self.1
}
}
retain_release!(
ContextGroup,
JSContextGroupRef,
JSContextGroupRetain,
JSContextGroupRelease
);
retain_release!(
GlobalContext,
JSGlobalContextRef,
JSGlobalContextRetain,
JSGlobalContextRelease
);
retain_release!(String, JSStringRef, JSStringRetain, JSStringRelease);
impl ContextGroup {
pub fn new() -> ContextGroup {
let ptr = unsafe { JSContextGroupCreate() };
ContextGroup(ptr)
}
pub fn create_global_context(&self) -> GlobalContext {
let ptr = unsafe { JSGlobalContextCreateInGroup(self.0, null_mut()) };
GlobalContext(ptr)
}
}
pub struct Exception(Object);
impl Exception {
pub fn stack(&self) -> std::string::String {
let stack_val = self
.0
.get_property(&String::new("stack").unwrap())
.expect("no `stack` property found");
let stack_str = String::try_from(&stack_val).expect("no string property found for `stack`");
std::string::String::from(&stack_str)
}
pub fn message(&self) -> std::string::String {
let message_val = self
.0
.get_property(&String::new("message").unwrap())
.expect("no `message` property found");
let message_str =
String::try_from(&message_val).expect("no string property found for `message`");
std::string::String::from(&message_str)
}
}
impl GlobalContext {
pub fn global_object(&self) -> Object {
let ptr = unsafe { JSContextGetGlobalObject(self.0) };
Object(Context(self.0), ptr)
}
pub fn evaluate_script_sync(&self, script: &String) -> Result<Value, Exception> {
let mut exception = null();
let ret = unsafe {
JSEvaluateScript(self.0, **script, null_mut(), null_mut(), 0, &mut exception)
};
if exception == null_mut() {
Ok(Value::from(Context(self.0), ret))
} else {
let value = Value::from(Context(self.0), exception);
let obj = Object::try_from(&value).unwrap();
Err(Exception(obj))
}
}
pub async fn | <'a>(&'a self, script: &'a String) -> Result<Value, Exception> {
self.evaluate_script_sync(script)
}
pub fn add_function(
&self,
name: &str,
callback: JsCallback,
) -> Result<(), Box<dyn std::error::Error>> {
let name = String::new(name).unwrap();
let obj = self.global_object();
let fn_obj = obj.make_function_with_callback(&name, callback);
obj.set_property(&name, Value::from(Context(self.0), *fn_obj));
Ok(())
}
}
type JsCallback =
fn(Context, /*thisObject*/ Object, /*arguments*/ Vec<Value>) -> Result<Value, String>;
extern "C" fn callback_trampoline(
ctx: JSContextRef,
function: JSObjectRef,
this_object: JSObjectRef,
argument_count: usize,
arguments: *const JSValueRef,
exception: *mut JSValueRef,
) -> JSValueRef {
let callback = unsafe {
std::mem::transmute::<*mut ::std::os::raw::c_void, JsCallback>(JSObjectGetPrivate(function))
};
let ctx = Context(ctx);
let args = unsafe {
std::slice::from_raw_parts(arguments, argument_count)
.into_iter()
.map(|v| Value::from(ctx, *v))
.collect::<Vec<_>>()
};
match callback(ctx, Object(ctx, this_object), args) {
Ok(v) => v.0,
Err(e) => unsafe {
*exception = e.to_js_value(&ctx);
JSValueMakeUndefined(ctx.0)
},
}
}
impl ValueType {
unsafe fn from(ctx: Context, value_ref: JSValueRef) -> ValueType {
let raw_ty = JSValueGetType(ctx.0, value_ref);
match raw_ty {
0 => ValueType::Undefined,
1 => ValueType::Null,
2 => ValueType::Boolean,
3 => ValueType::Number,
4 => ValueType::String,
5 => ValueType::Object,
6 => ValueType::Symbol,
_ => unreachable!(),
}
}
}
impl Value {
fn from(ctx: Context, value_ref: JSValueRef) -> Value {
Value(value_ref, unsafe { ValueType::from(ctx, value_ref) }, ctx)
}
pub fn to_string(&self) -> std::string::String {
match self.js_type() {
ValueType::String => {
let js_str = String::try_from(self).expect("string");
std::string::String::from(&js_str)
}
ValueType::Number => {
let n = f64::try_from(self).expect("f64");
format!("{}", n)
}
ValueType::Boolean => {
let v = bool::try_from(self).expect("bool");
format!("{}", v)
}
ValueType::Null => "null".into(),
ValueType::Undefined => "undefined".into(),
ValueType::Symbol => "Symbol(...)".into(),
ValueType::Object => {
let obj = Object::try_from(self).expect("object");
format!("{:?}", obj)
}
}
}
}
fn rust_function_defn(name: &String) -> JSClassDefinition {
JSClassDefinition {
version: 0,
attributes: 0,
className: **name as *const _,
parentClass: null_mut(),
staticValues: null(),
staticFunctions: null(),
initialize: None,
finalize: None,
hasProperty: None,
getProperty: None,
setProperty: None,
deleteProperty: None,
getPropertyNames: None,
callAsFunction: Some(callback_trampoline),
callAsConstructor: None,
hasInstance: None,
convertToType: None,
}
}
impl Value {
pub fn js_type(&self) -> ValueType {
self.1
}
}
impl Object {
pub fn make_function_with_callback(&self, name: &String, callback: JsCallback) -> Object {
let cls = unsafe { JSClassCreate(&rust_function_defn(name)) };
let ptr = unsafe { JSObjectMake(*self.0, cls, callback as _) };
if unsafe { JSObjectGetPrivate(ptr) } == null_mut() {
panic!("No private");
}
Object(self.0, ptr)
}
pub fn set_property(&self, name: &String, value: Value) {
unsafe { JSObjectSetProperty(*self.0, self.1, **name, value.0, 0, null_mut()) };
}
pub fn get_property(&self, name: &String) -> Result<Value, Value> {
let mut exception = null();
let ret = unsafe { JSObjectGetProperty(*self.0, self.1, **name, &mut exception) };
if exception == null() {
Ok(Value::from(self.0, ret))
} else {
Err(Value::from(self.0, exception))
}
}
pub fn to_js_value(&self) -> Value {
Value(self.1, ValueType::Object, self.0)
}
}
impl String {
pub fn new(s: &str) -> Result<String, Box<dyn std::error::Error>> {
let s = CString::new(s)?;
let v = unsafe { JSStringCreateWithUTF8CString(s.as_ptr() as *const i8) };
Ok(String(v))
}
pub fn to_js_value(&self, ctx: &Context) -> JSValueRef {
unsafe { JSValueMakeString(**ctx, self.0) }
}
}
| evaluate_script | identifier_name |
setting.go | package handle
import (
"encoding/json"
"fmt"
"itflow/cache"
"itflow/db"
"itflow/encrypt"
"itflow/internal/response"
"itflow/internal/role"
"itflow/internal/user"
"net/http"
"strings"
"time"
"github.com/hyahm/golog"
"github.com/hyahm/xmux"
)
func CreateUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
// nickname := xmux.GetInstance(r).Get("nickname").(string)
uid := xmux.GetInstance(r).Get("uid").(int64)
createTime := time.Now().Unix()
getuser := xmux.GetInstance(r).Data.(*user.GetAddUser)
if strings.Contains(getuser.Nickname, "@") {
w.Write(errorcode.Error("昵称不能包含@符号"))
return
}
enpassword := encrypt.PwdEncrypt(getuser.Password, cache.Salt)
var err error
db.Mconn.OpenDebug()
errorcode.Id, err = db.Mconn.Insert(`insert into user(nickname, password, email, createtime, createuid, realname, jid) values(
?,?,?,?,?,?,
(select id from jobs where name=?))`, getuser.Nickname,
enpassword, getuser.Email, createTime,
uid, getuser.RealName, getuser.Position)
golog.Info(db.Mconn.GetSql())
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
cache.CacheEmail.SendMail("成功创建用户",
fmt.Sprintf(`<html><body><h1>已成功创建用户<h1>登录网址:<a href="%s">%s</a></br>用户名: %s</br> 密码: %s</br>邮箱: %s</body></html>`, r.Referer(), r.Referer(), getuser.Nickname, getuser.Password, getuser.Email),
getuser.Email)
// // 验证组和职位不能为空
// if getuser.StatusGroup == "" || getuser.RoleGroup == "" || getuser.Position == "" {
// w.Write(errorcode.Error("验证组和职位不能为空"))
// return
// }
// //1,先要验证nickname 是否有重复的
// if _, ok := cache.CacheNickNameUid[getuser.Nickname]; ok {
// w.Write(errorcode.Error("nickname 重复"))
// return
// }
// //验证邮箱 是否有重复的
// var hasemail bool
// for _, v := range cache.CacheUidEmail {
// if v == getuser.Email {
// hasemail = true
// }
// }
// if hasemail {
// w.Write(errorcode.Error("email 重复"))
// return
// }
// ids := make([]string, 0)
// for k := range cache.CacheSidStatus {
// ids = append(ids, strconv.FormatInt(k.ToInt64(), 10))
// }
// var sgid int64
// var hassggroup bool
// for k, v := range cache.CacheSgidGroup {
// if v == getuser.StatusGroup {
// sgid = k
// hassggroup = true
// break
// }
// }
// var rid int64
// err := model.CheckRoleNameInGroup(getuser.RoleGroup, &rid)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// if !hassggroup {
// w.Write(errorcode.Error("没有找到权限"))
// return
// }
// // 获取级别,如果这个职位不存在,就返回错误
// var jid int64
// var ok bool
// if jid, ok = cache.CacheJobnameJid[getuser.Position]; !ok {
// w.Write(errorcode.Error("职位不存在"))
// return
// }
// // 增加用户
// user := model.User{
// NickName: getuser.Nickname,
// RealName: getuser.RealName,
// Password: enpassword,
// Email: getuser.Email,
// CreateId: uid,
// ShowStatus: cache.StoreLevelId(strings.Join(ids, ",")),
// BugGroupId: sgid,
// Roleid: rid,
// Jobid: jid,
// }
// err = user.Create()
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// //更新缓存
// send, _ := json.Marshal(errorcode)
w.Write(errorcode.Success())
return
}
func RemoveUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
id := r.FormValue("id")
// 判断是否有bug
var count int
err := db.Mconn.GetOne("select count(id) from bugs where uid=?", id).Scan(&count)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
if count > 0 {
golog.Error("uid:%v,has bugs,can not remove")
w.Write(errorcode.IsUse())
return
}
// 查看用户组是否存在此用户
userrows, err := db.Mconn.GetRows("select ids from usergroup")
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
var hasgroup bool
for userrows.Next() {
var ids string
userrows.Scan(&ids)
for _, v := range strings.Split(ids, ",") {
if v == id {
hasgroup = true
break
}
}
if hasgroup {
w.Write(errorcode.Error("还有group"))
return
}
}
userrows.Close()
_, err = db.Mconn.Update("delete from user where id=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func DisableUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
id := r.FormValue("id")
var err error
_, err = db.Mconn.Update("update user set disable=ABS(disable-1) where id=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
_, err = db.Mconn.Update("update bugs set dustbin=ABS(dustbin-1) where uid=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
// 显示自己能管理的权限,不显示自己的
func UserList(w http.ResponseWriter, r *http.Request) {
uid := xmux.GetInstance(r).Get("uid").(int64)
errorcode := &response.Response{}
uls := &user.UserList{}
if uid == cache.SUPERID {
getallsql := `select u.id,createtime,realname,nickname,email,disable,j.name from
user as u
join jobs as j
on u.jid = j.id and u.id<>?`
adminrows, err := db.Mconn.GetRows(getallsql, cache.SUPERID)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
for adminrows.Next() {
ul := &user.User{}
err = adminrows.Scan(&ul.Id, &ul.Createtime, &ul.Realname, &ul.Nickname, &ul.Email,
&ul.Disable, &ul.Position)
if err != nil {
golog.Info(err)
continue
}
uls.Userlist = append(uls.Userlist, ul)
}
adminrows.Close()
send, _ := json.Marshal(uls)
w.W | db.Mconn.GetRows(getallsql, uid)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
for adminrows.Next() {
ul := &user.User{}
err = adminrows.Scan(&ul.Id, &ul.Createtime, &ul.Realname, &ul.Nickname, &ul.Email,
&ul.Disable, &ul.RoleGroup, &ul.StatusGroup, &ul.Position)
if err != nil {
golog.Info(err)
continue
}
uls.Userlist = append(uls.Userlist, ul)
}
adminrows.Close()
send, _ := json.Marshal(uls)
w.Write(send)
return
}
}
func UserUpdate(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
uid := xmux.GetInstance(r).Get("uid").(int64)
if cache.SUPERID != uid {
w.Write(errorcode.ErrorNoPermission())
return
}
uls := xmux.GetInstance(r).Data.(*user.User)
// 0是系统管理员, 1是管理层, 2是普通用户
//switch level {
//case 0:
// var hasstatusgroup bool
// var rid int64
// var bsid int64
// err := model.CheckRoleNameInGroup(uls.RoleGroup, &rid)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// for k, v := range cache.CacheSgidGroup {
// if v == uls.StatusGroup {
// bsid = k
// hasstatusgroup = true
// break
// }
// }
// if _, ok := cache.CacheJobnameJid[uls.Position]; !ok {
// w.Write(errorcode.Error("没有找到职位"))
// return
// }
// if !hasstatusgroup {
// w.Write(errorcode.Error("没有找到status"))
// return
// }
if strings.Contains(uls.Nickname, "@") {
w.Write(errorcode.Error("昵称不能包含@符号"))
return
}
getallsql := `update user set
realname=?, nickname=?, email=?,
jid=(select coalesce(min(id),0) from jobs where name=?)
where id=?`
_, err := db.Mconn.Update(getallsql,
uls.Realname, uls.Nickname, uls.Email, uls.Position,
uls.Id,
)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func ChangePassword(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
getuser := xmux.GetInstance(r).Data.(*user.ChangePasswod)
uid := xmux.GetInstance(r).Get("uid").(int64)
getaritclesql := "select count(id) from user where id=? and password=?"
oldpassword := encrypt.PwdEncrypt(getuser.Oldpassword, cache.Salt)
var n int
err := db.Mconn.GetOne(getaritclesql, uid, oldpassword).Scan(&n)
if err != nil || n != 1 {
golog.Error(err)
w.Write(errorcode.ErrorNoPermission())
return
}
newpassword := encrypt.PwdEncrypt(getuser.Newpassword, cache.Salt)
chpwdsql := "update user set password=? where id=?"
_, err = db.Mconn.Update(chpwdsql, newpassword, uid)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func GetRoles(w http.ResponseWriter, r *http.Request) {
rl := &role.RespRoles{}
w.Write(rl.List())
return
}
// func GetThisRoles(w http.ResponseWriter, r *http.Request) {
// errorcode := &response.Response{}
// rl := &getroles{}
// id := r.FormValue("id")
// var rolestring string
// err := db.Mconn.GetOne("select rolestring from user where id=?", id).Scan(&rolestring)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ConnectMysqlFail())
// return
// }
// send, _ := json.Marshal(rl)
// w.Write(send)
// return
// }
type sendGroup struct {
Groups []string `json:"groups"`
Code int `json:"code"`
}
func GetGroup(w http.ResponseWriter, r *http.Request) {
sg := &sendGroup{}
send, _ := json.Marshal(sg)
w.Write(send)
return
}
type sty struct {
Ts map[int]string `json:"ts"`
Code int `json:"code"`
}
func GetTaskTyp(w http.ResponseWriter, r *http.Request) {
ts := &sty{
Ts: make(map[int]string, 0),
}
rows, err := db.Mconn.GetRows("select id,name from typ")
if err != nil {
w.Write([]byte(fmt.Sprintf(`{"code": 2, "msg": "%s"}`, err.Error())))
return
}
for rows.Next() {
var t string
var id int
err = rows.Scan(&id, &t)
if err != nil {
golog.Info(err)
continue
}
ts.Ts[id] = t
}
send, _ := json.Marshal(ts)
w.Write(send)
return
}
func ResetPwd(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
rp := xmux.GetInstance(r).Data.(*user.ResetPassword)
newpassword := encrypt.PwdEncrypt(rp.Password, cache.Salt)
updatepwdsql := "update user set password=? where id=?"
_, err := db.Mconn.Update(updatepwdsql, newpassword, rp.Id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
| rite(send)
return
} else {
getallsql := `select u.id,createtime,realname,nickname,email,disable, j.name from
user as u join jobs as j
on u.jid in (select id from jobs where hypo=(select jid from user where id=?))`
adminrows, err := | conditional_block |
setting.go | package handle
import (
"encoding/json"
"fmt"
"itflow/cache"
"itflow/db"
"itflow/encrypt"
"itflow/internal/response"
"itflow/internal/role"
"itflow/internal/user"
"net/http"
"strings"
"time"
"github.com/hyahm/golog"
"github.com/hyahm/xmux"
)
func CreateUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
// nickname := xmux.GetInstance(r).Get("nickname").(string)
uid := xmux.GetInstance(r).Get("uid").(int64)
createTime := time.Now().Unix()
getuser := xmux.GetInstance(r).Data.(*user.GetAddUser)
if strings.Contains(getuser.Nickname, "@") {
w.Write(errorcode.Error("昵称不能包含@符号"))
return
}
enpassword := encrypt.PwdEncrypt(getuser.Password, cache.Salt)
var err error
db.Mconn.OpenDebug()
errorcode.Id, err = db.Mconn.Insert(`insert into user(nickname, password, email, createtime, createuid, realname, jid) values(
?,?,?,?,?,?,
(select id from jobs where name=?))`, getuser.Nickname,
enpassword, getuser.Email, createTime,
uid, getuser.RealName, getuser.Position)
golog.Info(db.Mconn.GetSql())
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
cache.CacheEmail.SendMail("成功创建用户",
fmt.Sprintf(`<html><body><h1>已成功创建用户<h1>登录网址:<a href="%s">%s</a></br>用户名: %s</br> 密码: %s</br>邮箱: %s</body></html>`, r.Referer(), r.Referer(), getuser.Nickname, getuser.Password, getuser.Email),
getuser.Email)
// // 验证组和职位不能为空
// if getuser.StatusGroup == "" || getuser.RoleGroup == "" || getuser.Position == "" {
// w.Write(errorcode.Error("验证组和职位不能为空"))
// return
// }
// //1,先要验证nickname 是否有重复的
// if _, ok := cache.CacheNickNameUid[getuser.Nickname]; ok {
// w.Write(errorcode.Error("nickname 重复"))
// return
// }
// //验证邮箱 是否有重复的
// var hasemail bool
// for _, v := range cache.CacheUidEmail {
// if v == getuser.Email {
// hasemail = true
// }
// }
// if hasemail {
// w.Write(errorcode.Error("email 重复"))
// return
// }
// ids := make([]string, 0)
// for k := range cache.CacheSidStatus {
// ids = append(ids, strconv.FormatInt(k.ToInt64(), 10))
// }
// var sgid int64
// var hassggroup bool
// for k, v := range cache.CacheSgidGroup {
// if v == getuser.StatusGroup {
// sgid = k
// hassggroup = true
// break
// }
// }
// var rid int64
// err := model.CheckRoleNameInGroup(getuser.RoleGroup, &rid)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// if !hassggroup {
// w.Write(errorcode.Error("没有找到权限"))
// return
// }
// // 获取级别,如果这个职位不存在,就返回错误
// var jid int64
// var ok bool
// if jid, ok = cache.CacheJobnameJid[getuser.Position]; !ok {
// w.Write(errorcode.Error("职位不存在"))
// return
// }
// // 增加用户
// user := model.User{
// NickName: getuser.Nickname,
// RealName: getuser.RealName,
// Password: enpassword,
// Email: getuser.Email,
// CreateId: uid,
// ShowStatus: cache.StoreLevelId(strings.Join(ids, ",")),
// BugGroupId: sgid,
// Roleid: rid,
// Jobid: jid,
// }
// err = user.Create()
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// //更新缓存
// send, _ := json.Marshal(errorcode)
w.Write(errorcode.Success())
return
}
func RemoveUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
id := r.FormValue("id")
// 判断是否有bug
var count int
err := db.Mconn.GetOne("select count(id) from bugs where uid=?", id).Scan(&count)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
if count > 0 {
golog.Error("uid:%v,has bugs,can not remove")
w.Write(errorcode.IsUse())
return
}
// 查看用户组是否存在此用户
userrows, err := db.Mconn.GetRows("select ids from usergroup")
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
var hasgroup bool
for userrows.Next() {
var ids string
userrows.Scan(&ids)
for _, v := range strings.Split(ids, ",") {
if v == id {
hasgroup = true
break
}
}
if hasgroup {
w.Write(errorcode.Error("还有group"))
return
}
}
userrows.Close()
_, err = db.Mconn.Update("delete from user where id=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func DisableUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
id := r.FormValue("id")
var err error
_, err = db.Mconn.Update("update user set disable=ABS(disable-1) where id=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcod | r))
return
}
_, err = db.Mconn.Update("update bugs set dustbin=ABS(dustbin-1) where uid=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
// 显示自己能管理的权限,不显示自己的
func UserList(w http.ResponseWriter, r *http.Request) {
uid := xmux.GetInstance(r).Get("uid").(int64)
errorcode := &response.Response{}
uls := &user.UserList{}
if uid == cache.SUPERID {
getallsql := `select u.id,createtime,realname,nickname,email,disable,j.name from
user as u
join jobs as j
on u.jid = j.id and u.id<>?`
adminrows, err := db.Mconn.GetRows(getallsql, cache.SUPERID)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
for adminrows.Next() {
ul := &user.User{}
err = adminrows.Scan(&ul.Id, &ul.Createtime, &ul.Realname, &ul.Nickname, &ul.Email,
&ul.Disable, &ul.Position)
if err != nil {
golog.Info(err)
continue
}
uls.Userlist = append(uls.Userlist, ul)
}
adminrows.Close()
send, _ := json.Marshal(uls)
w.Write(send)
return
} else {
getallsql := `select u.id,createtime,realname,nickname,email,disable, j.name from
user as u join jobs as j
on u.jid in (select id from jobs where hypo=(select jid from user where id=?))`
adminrows, err := db.Mconn.GetRows(getallsql, uid)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
for adminrows.Next() {
ul := &user.User{}
err = adminrows.Scan(&ul.Id, &ul.Createtime, &ul.Realname, &ul.Nickname, &ul.Email,
&ul.Disable, &ul.RoleGroup, &ul.StatusGroup, &ul.Position)
if err != nil {
golog.Info(err)
continue
}
uls.Userlist = append(uls.Userlist, ul)
}
adminrows.Close()
send, _ := json.Marshal(uls)
w.Write(send)
return
}
}
func UserUpdate(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
uid := xmux.GetInstance(r).Get("uid").(int64)
if cache.SUPERID != uid {
w.Write(errorcode.ErrorNoPermission())
return
}
uls := xmux.GetInstance(r).Data.(*user.User)
// 0是系统管理员, 1是管理层, 2是普通用户
//switch level {
//case 0:
// var hasstatusgroup bool
// var rid int64
// var bsid int64
// err := model.CheckRoleNameInGroup(uls.RoleGroup, &rid)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// for k, v := range cache.CacheSgidGroup {
// if v == uls.StatusGroup {
// bsid = k
// hasstatusgroup = true
// break
// }
// }
// if _, ok := cache.CacheJobnameJid[uls.Position]; !ok {
// w.Write(errorcode.Error("没有找到职位"))
// return
// }
// if !hasstatusgroup {
// w.Write(errorcode.Error("没有找到status"))
// return
// }
if strings.Contains(uls.Nickname, "@") {
w.Write(errorcode.Error("昵称不能包含@符号"))
return
}
getallsql := `update user set
realname=?, nickname=?, email=?,
jid=(select coalesce(min(id),0) from jobs where name=?)
where id=?`
_, err := db.Mconn.Update(getallsql,
uls.Realname, uls.Nickname, uls.Email, uls.Position,
uls.Id,
)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func ChangePassword(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
getuser := xmux.GetInstance(r).Data.(*user.ChangePasswod)
uid := xmux.GetInstance(r).Get("uid").(int64)
getaritclesql := "select count(id) from user where id=? and password=?"
oldpassword := encrypt.PwdEncrypt(getuser.Oldpassword, cache.Salt)
var n int
err := db.Mconn.GetOne(getaritclesql, uid, oldpassword).Scan(&n)
if err != nil || n != 1 {
golog.Error(err)
w.Write(errorcode.ErrorNoPermission())
return
}
newpassword := encrypt.PwdEncrypt(getuser.Newpassword, cache.Salt)
chpwdsql := "update user set password=? where id=?"
_, err = db.Mconn.Update(chpwdsql, newpassword, uid)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func GetRoles(w http.ResponseWriter, r *http.Request) {
rl := &role.RespRoles{}
w.Write(rl.List())
return
}
// func GetThisRoles(w http.ResponseWriter, r *http.Request) {
// errorcode := &response.Response{}
// rl := &getroles{}
// id := r.FormValue("id")
// var rolestring string
// err := db.Mconn.GetOne("select rolestring from user where id=?", id).Scan(&rolestring)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ConnectMysqlFail())
// return
// }
// send, _ := json.Marshal(rl)
// w.Write(send)
// return
// }
type sendGroup struct {
Groups []string `json:"groups"`
Code int `json:"code"`
}
func GetGroup(w http.ResponseWriter, r *http.Request) {
sg := &sendGroup{}
send, _ := json.Marshal(sg)
w.Write(send)
return
}
type sty struct {
Ts map[int]string `json:"ts"`
Code int `json:"code"`
}
func GetTaskTyp(w http.ResponseWriter, r *http.Request) {
ts := &sty{
Ts: make(map[int]string, 0),
}
rows, err := db.Mconn.GetRows("select id,name from typ")
if err != nil {
w.Write([]byte(fmt.Sprintf(`{"code": 2, "msg": "%s"}`, err.Error())))
return
}
for rows.Next() {
var t string
var id int
err = rows.Scan(&id, &t)
if err != nil {
golog.Info(err)
continue
}
ts.Ts[id] = t
}
send, _ := json.Marshal(ts)
w.Write(send)
return
}
func ResetPwd(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
rp := xmux.GetInstance(r).Data.(*user.ResetPassword)
newpassword := encrypt.PwdEncrypt(rp.Password, cache.Salt)
updatepwdsql := "update user set password=? where id=?"
_, err := db.Mconn.Update(updatepwdsql, newpassword, rp.Id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
| e.ErrorE(er | identifier_name |
setting.go | package handle
import (
"encoding/json"
"fmt"
"itflow/cache"
"itflow/db"
"itflow/encrypt"
"itflow/internal/response"
"itflow/internal/role"
"itflow/internal/user"
"net/http"
"strings"
"time"
"github.com/hyahm/golog"
"github.com/hyahm/xmux"
)
func CreateUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
// nickname := xmux.GetInstance(r).Get("nickname").(string)
uid := xmux.GetInstance(r).Get("uid").(int64)
createTime := time.Now().Unix()
getuser := xmux.GetInstance(r).Data.(*user.GetAddUser)
if strings.Contains(getuser.Nickname, "@") {
w.Write(errorcode.Error("昵称不能包含@符号"))
return
}
enpassword := encrypt.PwdEncrypt(getuser.Password, cache.Salt)
var err error
db.Mconn.OpenDebug()
errorcode.Id, err = db.Mconn.Insert(`insert into user(nickname, password, email, createtime, createuid, realname, jid) values(
?,?,?,?,?,?,
(select id from jobs where name=?))`, getuser.Nickname,
enpassword, getuser.Email, createTime,
uid, getuser.RealName, getuser.Position)
golog.Info(db.Mconn.GetSql())
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
cache.CacheEmail.SendMail("成功创建用户",
fmt.Sprintf(`<html><body><h1>已成功创建用户<h1>登录网址:<a href="%s">%s</a></br>用户名: %s</br> 密码: %s</br>邮箱: %s</body></html>`, r.Referer(), r.Referer(), getuser.Nickname, getuser.Password, getuser.Email),
getuser.Email)
// // 验证组和职位不能为空
// if getuser.StatusGroup == "" || getuser.RoleGroup == "" || getuser.Position == "" {
// w.Write(errorcode.Error("验证组和职位不能为空"))
// return
// }
// //1,先要验证nickname 是否有重复的
// if _, ok := cache.CacheNickNameUid[getuser.Nickname]; ok {
// w.Write(errorcode.Error("nickname 重复"))
// return
// }
// //验证邮箱 是否有重复的
// var hasemail bool
// for _, v := range cache.CacheUidEmail {
// if v == getuser.Email {
// hasemail = true
// }
// }
// if hasemail {
// w.Write(errorcode.Error("email 重复"))
// return
// }
// ids := make([]string, 0)
// for k := range cache.CacheSidStatus {
// ids = append(ids, strconv.FormatInt(k.ToInt64(), 10))
// }
// var sgid int64
// var hassggroup bool
// for k, v := range cache.CacheSgidGroup {
// if v == getuser.StatusGroup {
// sgid = k
// hassggroup = true
// break
// }
// }
// var rid int64
// err := model.CheckRoleNameInGroup(getuser.RoleGroup, &rid)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// if !hassggroup {
// w.Write(errorcode.Error("没有找到权限"))
// return
// }
// // 获取级别,如果这个职位不存在,就返回错误
// var jid int64
// var ok bool
// if jid, ok = cache.CacheJobnameJid[getuser.Position]; !ok {
// w.Write(errorcode.Error("职位不存在"))
// return
// }
// // 增加用户
// user := model.User{
// NickName: getuser.Nickname,
// RealName: getuser.RealName,
// Password: enpassword,
// Email: getuser.Email,
// CreateId: uid,
// ShowStatus: cache.StoreLevelId(strings.Join(ids, ",")),
// BugGroupId: sgid,
// Roleid: rid,
// Jobid: jid,
// }
// err = user.Create()
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// //更新缓存
// send, _ := json.Marshal(errorcode)
w.Write(errorcode.Success())
return
}
func RemoveUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
id := r.FormValue("id")
// 判断是否有bug
var count int
err := db.Mconn.GetOne("select count(id) from bugs where uid=?", id).Scan(&count)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
if count > 0 {
golog.Error("uid:%v,has bugs,can not remove")
w.Write(errorcode.IsUse())
return
}
// 查看用户组是否存在此用户
userrows, err := db.Mconn.GetRows("select ids from usergroup")
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
var hasgroup bool
for userrows.Next() {
var ids string
userrows.Scan(&ids)
for _, v := range strings.Split(ids, ",") {
if v == id {
hasgroup = true
break
}
}
if hasgroup {
w.Write(errorcode.Error("还有group"))
return
}
}
userrows.Close()
_, err = db.Mconn.Update("delete from user where id=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func DisableUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
id := r.FormValue("id")
var err error
_, err = db.Mconn.Update("update user set disable=ABS(disable-1) where id=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
_, err = db.Mconn.Update("update bugs set dustbin=ABS(dustbin-1) where uid=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
// 显示自己能管理的权限,不显示自己的
func UserList(w http.ResponseWriter, r *http.Request) {
uid := xmux.GetInstance(r).Get("uid").(int64)
errorcode := &response.Response{}
uls := &user.UserList{}
if uid == cache.SUPERID {
getallsql := `select u.id,createtime,realname,nickname,email,disable,j.name from
user as u
join jobs as j
on u.jid = j.id and u.id<>?`
adminrows, err := db.Mconn.GetRows(getallsql, cache.SUPERID)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
for adminrows.Next() {
ul := &user.User{}
err = adminrows.Scan(&ul.Id, &ul.Createtime, &ul.Realname, &ul.Nickname, &ul.Email,
&ul.Disable, &ul.Position)
if err != nil {
golog.Info(err)
continue
}
uls.Userlist = append(uls.Userlist, ul)
}
adminrows.Close()
send, _ := json.Marshal(uls)
w.Write(send)
return
} else {
getallsql := `select u.id,createtime,realname,nickname,email,disable, j.name from
user as u join jobs as j
on u.jid in (select id from jobs where hypo=(select jid from user where id=?))`
adminrows, err := db.Mconn.GetRows(getallsql, uid)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
for adminrows.Next() {
ul := &user.User{}
err = adminrows.Scan(&ul.Id, &ul.Createtime, &ul.Realname, &ul.Nickname, &ul.Email,
&ul.Disable, &ul.RoleGroup, &ul.StatusGroup, &ul.Position)
if err != nil {
golog.Info(err)
continue
}
uls.Userlist = append(uls.Userlist, ul)
}
adminrows.Close()
send, _ := json.Marshal(uls)
w.Write(send)
return
}
}
func UserUpdate(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
uid := xmux.GetInstance(r).Get("uid").(int64)
if cache.SUPERID != uid {
w.Write(errorcode.ErrorNoPermission())
return
}
uls := xmux.GetInstance(r).Data.(*user.User)
// 0是系统管理员, 1是管理层, 2是普通用户
//switch level {
//case 0:
// var hasstatusgroup bool
// var rid int64
// var bsid int64
// err := model.CheckRoleNameInGroup(uls.RoleGroup, &rid)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// for k, v := range cache.CacheSgidGroup {
// if v == uls.StatusGroup {
// bsid = k
// hasstatusgroup = true
// break
// }
// }
// if _, ok := cache.CacheJobnameJid[uls.Position]; !ok {
// w.Write(errorcode.Error("没有找到职位"))
// return
// }
// if !hasstatusgroup {
// w.Write(errorcode.Error("没有找到status"))
// return
// }
if strings.Contains(uls.Nickname, "@") {
w.Write(errorcode.Error("昵称不能包含@符号"))
return
}
getallsql := `update user set
realname=?, nickname=?, email=?,
jid=(select coalesce(min(id),0) from jobs where name=?)
where id=?`
_, err := db.Mconn.Update(getallsql,
uls.Realname, uls.Nickname, uls.Email, uls.Position,
uls.Id,
)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func ChangePassword(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
getuser := xmux.GetInstance(r).Data.(*user.ChangePasswod)
uid := xmux.GetInstance(r).Get("uid").(int64)
getaritclesql := "select count(id) from user where id=? and password=?"
oldpassword := encrypt.PwdEncrypt(getuser.Oldpassword, cache.Salt)
var n int
err := db.Mconn.GetOne(getaritclesql, uid, oldpassword).Scan(&n)
if err != nil || n != 1 {
golog.Error(err)
w.Write(errorcode.ErrorNoPermission())
return
}
newpassword := encrypt.PwdEncrypt(getuser.Newpassword, cache.Salt)
chpwdsql := "update user set password=? where id=?"
_, err = db.Mconn.Update(chpwdsql, newpassword, uid)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func GetRoles(w http.ResponseWriter, r *http.Request) {
rl := &role.RespRoles{}
w.Write(rl.List())
return
}
// func GetThisRoles(w http.ResponseWriter, r *http.Request) {
// errorcode := &response.Response{}
// rl := &getroles{}
// id := r.FormValue("id")
// var rolestring string
// err := db.Mconn.GetOne("select rolestring from user where id=?", id).Scan(&rolestring)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ConnectMysqlFail())
// return
// }
// send, _ := json.Marshal(rl)
// w.Write(send)
// return
// }
type sendGroup struct {
Groups []string `json:"groups"`
Code int `json:"code"`
}
func GetGroup(w http.ResponseWriter, r *http.Request) {
sg := &sendGroup{}
send, _ := json.Marshal(sg)
w.Write(send)
return
}
type sty struct {
Ts map[int]string `json:"ts"`
Code int `json:"code"`
}
func GetTaskTyp(w http.ResponseWriter, r *http.Request) {
ts := &sty{
Ts: make(map[int]string, 0),
}
rows, err := db.Mconn.GetRows("select id,name from typ")
if err != nil {
w.Write([]byte(fmt.Sprintf(`{"code": 2, "msg": "%s"}`, err.Error())))
return
}
for rows.Next() {
var t string
var id int
err = rows.Scan(&id, &t)
if err != nil {
golog.Info(err)
continue
}
ts.Ts[id] = t
}
send, _ := json. | te(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
| Marshal(ts)
w.Write(send)
return
}
func ResetPwd(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
rp := xmux.GetInstance(r).Data.(*user.ResetPassword)
newpassword := encrypt.PwdEncrypt(rp.Password, cache.Salt)
updatepwdsql := "update user set password=? where id=?"
_, err := db.Mconn.Update(updatepwdsql, newpassword, rp.Id)
if err != nil {
golog.Error(err)
w.Wri | identifier_body |
setting.go | package handle
import (
"encoding/json"
"fmt"
"itflow/cache"
"itflow/db"
"itflow/encrypt"
"itflow/internal/response"
"itflow/internal/role" | "github.com/hyahm/golog"
"github.com/hyahm/xmux"
)
func CreateUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
// nickname := xmux.GetInstance(r).Get("nickname").(string)
uid := xmux.GetInstance(r).Get("uid").(int64)
createTime := time.Now().Unix()
getuser := xmux.GetInstance(r).Data.(*user.GetAddUser)
if strings.Contains(getuser.Nickname, "@") {
w.Write(errorcode.Error("昵称不能包含@符号"))
return
}
enpassword := encrypt.PwdEncrypt(getuser.Password, cache.Salt)
var err error
db.Mconn.OpenDebug()
errorcode.Id, err = db.Mconn.Insert(`insert into user(nickname, password, email, createtime, createuid, realname, jid) values(
?,?,?,?,?,?,
(select id from jobs where name=?))`, getuser.Nickname,
enpassword, getuser.Email, createTime,
uid, getuser.RealName, getuser.Position)
golog.Info(db.Mconn.GetSql())
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
cache.CacheEmail.SendMail("成功创建用户",
fmt.Sprintf(`<html><body><h1>已成功创建用户<h1>登录网址:<a href="%s">%s</a></br>用户名: %s</br> 密码: %s</br>邮箱: %s</body></html>`, r.Referer(), r.Referer(), getuser.Nickname, getuser.Password, getuser.Email),
getuser.Email)
// // 验证组和职位不能为空
// if getuser.StatusGroup == "" || getuser.RoleGroup == "" || getuser.Position == "" {
// w.Write(errorcode.Error("验证组和职位不能为空"))
// return
// }
// //1,先要验证nickname 是否有重复的
// if _, ok := cache.CacheNickNameUid[getuser.Nickname]; ok {
// w.Write(errorcode.Error("nickname 重复"))
// return
// }
// //验证邮箱 是否有重复的
// var hasemail bool
// for _, v := range cache.CacheUidEmail {
// if v == getuser.Email {
// hasemail = true
// }
// }
// if hasemail {
// w.Write(errorcode.Error("email 重复"))
// return
// }
// ids := make([]string, 0)
// for k := range cache.CacheSidStatus {
// ids = append(ids, strconv.FormatInt(k.ToInt64(), 10))
// }
// var sgid int64
// var hassggroup bool
// for k, v := range cache.CacheSgidGroup {
// if v == getuser.StatusGroup {
// sgid = k
// hassggroup = true
// break
// }
// }
// var rid int64
// err := model.CheckRoleNameInGroup(getuser.RoleGroup, &rid)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// if !hassggroup {
// w.Write(errorcode.Error("没有找到权限"))
// return
// }
// // 获取级别,如果这个职位不存在,就返回错误
// var jid int64
// var ok bool
// if jid, ok = cache.CacheJobnameJid[getuser.Position]; !ok {
// w.Write(errorcode.Error("职位不存在"))
// return
// }
// // 增加用户
// user := model.User{
// NickName: getuser.Nickname,
// RealName: getuser.RealName,
// Password: enpassword,
// Email: getuser.Email,
// CreateId: uid,
// ShowStatus: cache.StoreLevelId(strings.Join(ids, ",")),
// BugGroupId: sgid,
// Roleid: rid,
// Jobid: jid,
// }
// err = user.Create()
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// //更新缓存
// send, _ := json.Marshal(errorcode)
w.Write(errorcode.Success())
return
}
func RemoveUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
id := r.FormValue("id")
// 判断是否有bug
var count int
err := db.Mconn.GetOne("select count(id) from bugs where uid=?", id).Scan(&count)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
if count > 0 {
golog.Error("uid:%v,has bugs,can not remove")
w.Write(errorcode.IsUse())
return
}
// 查看用户组是否存在此用户
userrows, err := db.Mconn.GetRows("select ids from usergroup")
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
var hasgroup bool
for userrows.Next() {
var ids string
userrows.Scan(&ids)
for _, v := range strings.Split(ids, ",") {
if v == id {
hasgroup = true
break
}
}
if hasgroup {
w.Write(errorcode.Error("还有group"))
return
}
}
userrows.Close()
_, err = db.Mconn.Update("delete from user where id=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func DisableUser(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
id := r.FormValue("id")
var err error
_, err = db.Mconn.Update("update user set disable=ABS(disable-1) where id=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
_, err = db.Mconn.Update("update bugs set dustbin=ABS(dustbin-1) where uid=?", id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
// 显示自己能管理的权限,不显示自己的
func UserList(w http.ResponseWriter, r *http.Request) {
uid := xmux.GetInstance(r).Get("uid").(int64)
errorcode := &response.Response{}
uls := &user.UserList{}
if uid == cache.SUPERID {
getallsql := `select u.id,createtime,realname,nickname,email,disable,j.name from
user as u
join jobs as j
on u.jid = j.id and u.id<>?`
adminrows, err := db.Mconn.GetRows(getallsql, cache.SUPERID)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
for adminrows.Next() {
ul := &user.User{}
err = adminrows.Scan(&ul.Id, &ul.Createtime, &ul.Realname, &ul.Nickname, &ul.Email,
&ul.Disable, &ul.Position)
if err != nil {
golog.Info(err)
continue
}
uls.Userlist = append(uls.Userlist, ul)
}
adminrows.Close()
send, _ := json.Marshal(uls)
w.Write(send)
return
} else {
getallsql := `select u.id,createtime,realname,nickname,email,disable, j.name from
user as u join jobs as j
on u.jid in (select id from jobs where hypo=(select jid from user where id=?))`
adminrows, err := db.Mconn.GetRows(getallsql, uid)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
for adminrows.Next() {
ul := &user.User{}
err = adminrows.Scan(&ul.Id, &ul.Createtime, &ul.Realname, &ul.Nickname, &ul.Email,
&ul.Disable, &ul.RoleGroup, &ul.StatusGroup, &ul.Position)
if err != nil {
golog.Info(err)
continue
}
uls.Userlist = append(uls.Userlist, ul)
}
adminrows.Close()
send, _ := json.Marshal(uls)
w.Write(send)
return
}
}
func UserUpdate(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
uid := xmux.GetInstance(r).Get("uid").(int64)
if cache.SUPERID != uid {
w.Write(errorcode.ErrorNoPermission())
return
}
uls := xmux.GetInstance(r).Data.(*user.User)
// 0是系统管理员, 1是管理层, 2是普通用户
//switch level {
//case 0:
// var hasstatusgroup bool
// var rid int64
// var bsid int64
// err := model.CheckRoleNameInGroup(uls.RoleGroup, &rid)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ErrorE(err))
// return
// }
// for k, v := range cache.CacheSgidGroup {
// if v == uls.StatusGroup {
// bsid = k
// hasstatusgroup = true
// break
// }
// }
// if _, ok := cache.CacheJobnameJid[uls.Position]; !ok {
// w.Write(errorcode.Error("没有找到职位"))
// return
// }
// if !hasstatusgroup {
// w.Write(errorcode.Error("没有找到status"))
// return
// }
if strings.Contains(uls.Nickname, "@") {
w.Write(errorcode.Error("昵称不能包含@符号"))
return
}
getallsql := `update user set
realname=?, nickname=?, email=?,
jid=(select coalesce(min(id),0) from jobs where name=?)
where id=?`
_, err := db.Mconn.Update(getallsql,
uls.Realname, uls.Nickname, uls.Email, uls.Position,
uls.Id,
)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func ChangePassword(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
getuser := xmux.GetInstance(r).Data.(*user.ChangePasswod)
uid := xmux.GetInstance(r).Get("uid").(int64)
getaritclesql := "select count(id) from user where id=? and password=?"
oldpassword := encrypt.PwdEncrypt(getuser.Oldpassword, cache.Salt)
var n int
err := db.Mconn.GetOne(getaritclesql, uid, oldpassword).Scan(&n)
if err != nil || n != 1 {
golog.Error(err)
w.Write(errorcode.ErrorNoPermission())
return
}
newpassword := encrypt.PwdEncrypt(getuser.Newpassword, cache.Salt)
chpwdsql := "update user set password=? where id=?"
_, err = db.Mconn.Update(chpwdsql, newpassword, uid)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
}
func GetRoles(w http.ResponseWriter, r *http.Request) {
rl := &role.RespRoles{}
w.Write(rl.List())
return
}
// func GetThisRoles(w http.ResponseWriter, r *http.Request) {
// errorcode := &response.Response{}
// rl := &getroles{}
// id := r.FormValue("id")
// var rolestring string
// err := db.Mconn.GetOne("select rolestring from user where id=?", id).Scan(&rolestring)
// if err != nil {
// golog.Error(err)
// w.Write(errorcode.ConnectMysqlFail())
// return
// }
// send, _ := json.Marshal(rl)
// w.Write(send)
// return
// }
type sendGroup struct {
Groups []string `json:"groups"`
Code int `json:"code"`
}
func GetGroup(w http.ResponseWriter, r *http.Request) {
sg := &sendGroup{}
send, _ := json.Marshal(sg)
w.Write(send)
return
}
type sty struct {
Ts map[int]string `json:"ts"`
Code int `json:"code"`
}
func GetTaskTyp(w http.ResponseWriter, r *http.Request) {
ts := &sty{
Ts: make(map[int]string, 0),
}
rows, err := db.Mconn.GetRows("select id,name from typ")
if err != nil {
w.Write([]byte(fmt.Sprintf(`{"code": 2, "msg": "%s"}`, err.Error())))
return
}
for rows.Next() {
var t string
var id int
err = rows.Scan(&id, &t)
if err != nil {
golog.Info(err)
continue
}
ts.Ts[id] = t
}
send, _ := json.Marshal(ts)
w.Write(send)
return
}
func ResetPwd(w http.ResponseWriter, r *http.Request) {
errorcode := &response.Response{}
rp := xmux.GetInstance(r).Data.(*user.ResetPassword)
newpassword := encrypt.PwdEncrypt(rp.Password, cache.Salt)
updatepwdsql := "update user set password=? where id=?"
_, err := db.Mconn.Update(updatepwdsql, newpassword, rp.Id)
if err != nil {
golog.Error(err)
w.Write(errorcode.ErrorE(err))
return
}
send, _ := json.Marshal(errorcode)
w.Write(send)
return
} | "itflow/internal/user"
"net/http"
"strings"
"time"
| random_line_split |
util.js | // import {openLoadingCommon,closeLoadingCommon,notice,alert_util,alertNotice,openView,dataToForm,cleanForm} from './common'
import Vue from "vue";
/**
* 公共方法
* @constructor
*/
var Utils;
Utils = {
utilSubString: function (val, length) {
if (val.length <= length) {
return val
} else if (val.length > length) {
return val.substring(0, length) + "……";
}
},
getServerDate: function () {
return new Date($.ajax({
async: false
}).getResponseHeader("Date"));
},
// 获取两个时间之差(毫秒)
getInervalMilliseconds() {
let in_time = new Date(localStorage.getItem("in_time"));
let current_time = this.getServerDate();
let val = current_time - in_time;
if (val < 0) {
return 0;
} else {
return val;
}
},
/**
* 拖拽初始化
*/
dragInit() {
let host_proxy = this;
var dragged;
/* 可拖动的目标元素会触发事件 */
document.addEventListener("drag", function (event) {}, false);
document.addEventListener("dragstart", function (event) {
// 保存拖动元素的引用(ref.)
dragged = event.target;
// 使其半透明
event.target.style.opacity = .5;
}, false);
document.addEventListener("dragend", function (event) {
// 重置透明度
event.target.style.opacity = "";
}, false);
/* 放下目标节点时触发事件 */
document.addEventListener("dragover", function (event) {
// 阻止默认动作
event.preventDefault();
}, false);
document.addEventListener("dragenter", function (event) {
// 当可拖动的元素进入可放置的目标高亮目标节点
if (event.target.className == "dropzone") {
event.target.style.background = "#F2F6FC";
}
}, false);
document.addEventListener("dragleave", function (event) {
// 当拖动元素离开可放置目标节点,重置其背景
if (event.target.className == "dropzone") {
event.target.style.background = "";
}
}, false);
document.addEventListener("drop", function (event) {
// 阻止默认动作(如打开一些元素的链接)
event.preventDefault();
// 移动拖动的元素到所选择的放置目标节点’
if (event.target.className == "dropzone") {
let name = $(event.target).attr("name");
if (name != 'home') {
// 判断是否已经有拖拽
let html = $(event.target).html();
if (html == null || html == "") {
event.target.style.background = "";
dragged.parentNode.removeChild(dragged);
event.target.appendChild(dragged);
} else {
alert("提示:只能添加一个标签")
//alert_util(host_proxy,'提示:已存在,无须再添加','warning')
}
} else {
event.target.style.background = "";
dragged.parentNode.removeChild(dragged);
event.target.appendChild(dragged);
}
}
}, false);
},
/**
* 将map1 转到 mao2 中
* @param map1
* @param map2
*/
mapToMap: function (map1, map2) {
$.each(map2, function (key, value) {
map2[key] = map1[key];
})
},
/**
* 清空map
* @param map
*/
cleanMap(map) {
$.each(map, function (key, value) {
if (key == 'is_enable') {
map[key] = true;
} else {
map[key] = "";
}
})
},
/**
* 清空map
* @param map
*/
cleaarMap(map) {
$.each(map, function (key, value) {
map[key] = "";
})
},
/**
* 获取复选框中的id
* @param list
* @returns {Array}
*/
getIds(list) {
let ids = [];
$(list).each(function (i, item) {
if ($.inArray(item._id, list) == '-1') {
ids.push(item._id)
}
})
return ids;
},
/**
*
* @param num
* @param str
*/
isNoStr(num) {
let str = "";
if (num == "0") {
str = "否"
} else if (num == "1") {
str = "是"
}
return str;
},
/**
* 转化sex
* @param str
*/
changeSex(str) {
if (str == '0') {
return "男";
} else {
return "女";
}
},
auditFormat(row) {
if (row.is_audit == 0) {
return '待审核'
} else if (row.is_audit == 1) {
return '通过'
} else if (row.is_audit == 2) {
return '退回'
} else if (row.is_audit == 3) {
return '已保存'
} else {
return '已保存'
}
},
auditFormatOrderby(str) {
let map = {};
if (str == 0) {
map.order_by = "2";
map.status = '待审核';
} else if (str == 1) {
map.order_by = "3";
map.status = '通过';
} else if (str == 2) {
map.order_by = "4";
map.status = '退回';
} else if (str == 3) {
map.order_by = "1";
map.status = '已保存';
} else {
map.order_by = "5";
map.status = '其他';
}
return map;
},
//加密
encrypt(word, keyStr) {
keyStr = keyStr ? keyStr : k;
var key = CryptoJS.enc.Utf8.parse(keyStr);
var srcs = CryptoJS.enc.Utf8.parse(word);
var encrypted = CryptoJS.AES.encrypt(srcs, key, {
iv: key,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.ZeroPadding
});
return CryptoJS.enc.Base64.stringify(encrypted.ciphertext);
},
//解密
decrypt(word, keyStr) {
keyStr = keyStr ? keyStr : k;
var key = CryptoJS.enc.Utf8.parse(keyStr);
let base64 = CryptoJS.enc.Base64.parse(word);
let src = CryptoJS.enc.Base64.stringify(base64);
var decrypt = CryptoJS.AES.decrypt(src, key, {
iv: key,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.ZeroPadding
});
var decryptedStr = decrypt.toString(CryptoJS.enc.Utf8);
return decryptedStr.toString();
},
/* 数组对象排序,array-数组对象,key-排序字段,status=0-升序,status=1-降序 */
sortByKey(array, key, status) {
return array.sort(function (a, b) {
var x = a[key];
var y = b[key];
if (status === 0) {
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
} else {
return ((x > y) ? -1 : ((x < y) ? 1 : 0));
}
})
},
loginType(row) {
if (row.login_type == "1") {
return '登录'
} else if (row.login_type == "2") {
return '异常'
} else if (row.login_type == "3") {
return '登出'
} else {
return '流程错误'
}
},
replace_all(html_str, name) {
let replace_str = "<span style='color: red'>" + name + "</span>"
let re = new RegExp(name, "g"); //定义正则表达式
//第一个参数是要替换掉的内容,第二个参数"g"表示替换全部(global)。
var Newstr = html_str.replace(re, replace_str); //第一个参数是正则表达式。//本例会将全部匹配项替换为第二个参数。
return Newstr;
},
/**
* 将list1 转到 list2中
* @param list1
* @param list2
*/
listToList: function (list1, list2) {
$.each(list2, function (index, item) {
$.each(list1, function (id, iten) {
if (item._id == iten._id) {
$.each(item, function (key, value) {
item[key] = iten[key];
})
}
})
})
return list2;
},
addCoreUrl() {
return "/kb-core/"
},
addApiUrl() {
return '/api'
},
getPicUrl() {
return "http://10.170.130.230:9002/core/static"
},
getUserInfo() {
let user = localStorage.getItem("user")
user = JSON.parse(user);
return user.id
},
addLoginUrl() {
return "/login/"
},
/**
* 将list转成map
* @param list 集合
* @param key map 的key
* @param value map 的value
*/
listToMap(list, key, value) {
let map = {}
if (list != null) {
list.forEach(function (item, index) {
map[item[key]] = item[value]
})
}
return map
},
/**
* 给多个查询条件的选项标签添加对应值映射和key。
* @param arr 查询条件 | aram dictMap 数据字典Map
*/
initConditionData(arr, dictMap) {
for (let i = 0; i < arr.length; i++) {
var key = arr[i].dict || arr[i].key;
if (key && dictMap[key]) {
this.attachDataMap(arr[i], dictMap[key]);
}
}
},
/**
* 获取字典表Map。
* @param callback 字典获取之后回调执行函数
*
*/
getDictDataMap(callback) {
adapter.queryDictData({}, function (d) {
var tData = d.data || {};
var mData = tData.data || {};
var map = {};
for (var k in mData) {
map[k] = map[k] || {};
for (var i = 0; i < mData[k].length; i++) {
var v = mData[k][i].value;
map[k][v] = mData[k][i].name;
}
}
if (typeof (callback) === "function") {
callback(map);
}
console.log(tData);
});
},
/**
* 给单个查询条件的选项标签添加对应值映射和key。
* @param obj 查询条件对象
* @param dict 数据字典
*/
attachDataMap(obj, dict) {
console.log(dict)
var t = dict.length && obj.dataMap;
if (t) {
obj.items = [];
for (let i = 0; i < dict.length; i++) {
let item = dict[i]
let name = item.name;
if (name) {
obj.items.push(name);
obj.dataMap[name] = item.value;
}
}
}
},
/**
* 获取当前用户输入的查询条件参数。
* @param arr 查询条件数组
*/
getConditionParam(arr) {
let param = {};
for (let i = 0; i < arr.length; i++) {
let key = arr[i].key;
var map = arr[i].dataMap || {};
if (key && !arr[i].flag) {
let value = arr[i].value;
if (value.constructor.name === "Array") {
var tArr = [];
for (let j = 0; j < value.length; j++) {
tArr.push(map[value[j]] || value[j]);
}
param[key] = tArr;
} else {
param[key] = map[value] || value;
}
}
}
var ret = this.transferConditionData(param);
for (var t in ret) {
if (ret[t] && typeof (ret[t]) === "object") {
ret[t] = JSON.stringify(ret[t]);
}
}
return ret
},
transferConditionData(param) {
var dsl = {};
for (let k in param) {
var t = this.produceDSL(k, param[k]);
var type = t.type;
var data = t.data;
dsl[type] = dsl[type] || {};
if (type === "exact_search" && data.value !== "" && data.value !== undefined) {
dsl[type][data.key] = data.value;
} else if (type === "in_search" && data.value.constructor.name === "Array" && data.value.length !== 0) {
dsl[type][data.key] = data.value;
} else if (type === "rang_search" && data.value.start && data.value.end) {
dsl[type][data.key] = data.value;
} else {
}
}
return dsl;
},
produceDSL(key, value) {
var ret = {
type: "exact_search",
data: {
key: key,
value: value
}
}
if (key === "publish_time") {
ret.type = "rang_search";
if (value.constructor.name === "Array") {
ret.data = {
key: "publish_time",
value: {
start: value[0],
end: value[1]
}
}
} else {
ret.data = {
key: "publish_time",
value: ""
}
}
} else if (value.constructor.name === "Array") {
ret.type = "in_search";
} else {}
return ret;
},
downloadItem(fileName, content) {
const blob = new Blob([content]);
const elink = document.createElement("a");
elink.download = fileName;
elink.style.display = "none";
elink.href = URL.createObjectURL(blob);
document.body.appendChild(elink);
elink.click();
URL.revokeObjectURL(elink.href); // 释放URL 对象
document.body.removeChild(elink);
},
checkConditions(dsl, tDSL) {
var ret = false;
if (!tDSL) {
return true;
}
for (var k in dsl) {
if (dsl[k] === "{}" || (dsl[k] && Object.keys(dsl[k]).length === 0)) {
delete dsl[k];
}
}
for (var k in tDSL) {
if (
dsl[k] === "{}" ||
(tDSL[k] && Object.keys(tDSL[k]).length === 0)
) {
delete tDSL[k];
}
}
for (var k in dsl) {
var v = dsl[k];
if (typeof v === "object") {
ret = !(JSON.stringify(v) === JSON.stringify(tDSL[k]));
} else {
ret = !(v === tDSL[k]);
}
if (ret) {
break;
}
}
return ret;
}
/* function getInervalHour(startDate, endDate) {
var ms = endDate.getTime() - startDate.getTime();
if (ms < 0) return 0;
return Math.floor(ms/1000/60/60);
}
console.log("登录时间:"+localStorage.getItem("in_time"))*/
};
export default Utils
| 数组
* @p | identifier_name |
util.js | // import {openLoadingCommon,closeLoadingCommon,notice,alert_util,alertNotice,openView,dataToForm,cleanForm} from './common'
import Vue from "vue";
/**
* 公共方法
* @constructor
*/
var Utils;
Utils = {
utilSubString: function (val, length) {
if (val.length <= length) {
return val
} else if (val.length > length) {
return val.substring(0, length) + "……";
}
},
getServerDate: function () {
return new Date($.ajax({
async: false
}).getResponseHeader("Date"));
},
// 获取两个时间之差(毫秒)
getInervalMilliseconds() {
let in_time = new Date(localStorage.getItem("in_time"));
let current_time = this.getServerDate();
let val = current_time - in_time;
if (val < 0) {
return 0;
} else {
return val;
}
},
/**
* 拖拽初始化
*/
dragInit() {
let host_proxy = this;
var dragged;
/* 可拖动的目标元素会触发事件 */
document.addEventListener("drag", function (event) {}, false);
document.addEventListener("dragstart", function (event) {
// 保存拖动元素的引用(ref.)
dragged = event.target;
// 使其半透明
event.target.style.opacity = .5;
}, false);
document.addEventListener("dragend", function (event) {
// 重置透明度
event.target.style.opacity = "";
}, false);
/* 放下目标节点时触发事件 */
document.addEventListener("dragover", function (event) {
// 阻止默认动作
event.preventDefault();
}, false);
document.addEventListener("dragenter", function (event) {
// 当可拖动的元素进入可放置的目标高亮目标节点
if (event.target.className == "dropzone") {
event.target.style.background = "#F2F6FC";
}
}, false);
document.addEventListener("dragleave", function (event) {
// 当拖动元素离开可放置目标节点,重置其背景
if (event.target.className == "dropzone") {
event.target.style.background = "";
}
}, false);
document.addEventListener("drop", function (event) {
// 阻止默认动作(如打开一些元素的链接)
event.preventDefault();
// 移动拖动的元素到所选择的放置目标节点’
if (event.target.className == "dropzone") {
let name = $(event.target).attr("name");
if (name != 'home') {
// 判断是否已经有拖拽
let html = $(event.target).html();
if (html == null || html == "") {
event.target.style.background = "";
dragged.parentNode.removeChild(dragged);
event.target.appendChild(dragged);
} else {
alert("提示:只能添加一个标签")
//alert_util(host_proxy,'提示:已存在,无须再添加','warning')
}
} else {
event.target.style.background = "";
dragged.parentNode.removeChild(dragged);
event.target.appendChild(dragged);
}
}
}, false);
},
/**
* 将map1 转到 mao2 中
* @param map1
* @param map2
*/
mapToMap: function (map1, map2) {
$.each(map2, function (key, value) {
map2[key] = map1[key];
})
},
/**
* 清空map
* @param map
*/
cleanMap(map) {
$.each(map, function (key, value) {
if (key == 'is_enable') {
map[key] = true;
} else {
map[key] = "";
}
})
},
/**
* 清空map
* @param map
*/
cleaarMap(map) {
$.each(map, function (key, value) {
map[key] = "";
})
},
/**
* 获取复选框中的id
* @param list
* @returns {Array}
*/
getIds(list) {
let ids = [];
$(list).each(function (i, item) {
if ($.inArray(item._id, list) == '-1') {
ids.push(item._id)
}
})
return ids;
},
/**
*
* @param num
* @param str
*/
isNoStr(num) {
let str = "";
if (num == "0") {
str = "否"
} else if (num == "1") {
str = "是"
}
return str;
},
/**
* 转化sex
* @param str
*/
changeSex(str) {
if (str == '0') {
return "男";
} else {
return "女";
}
},
auditFormat(row) {
if (row.is_audit == 0) {
return '待审核'
} else if (row.is_audit == 1) {
return '通过'
} else if (row.is_audit == 2) {
return '退回'
} else if (row.is_audit == 3) {
return '已保存'
} else {
return '已保存'
}
},
auditFormatOrderby(str) {
let map = {};
if (str == 0) {
map.order_by = "2";
map.status = '待审核';
} else if (str == 1) {
map.order_by = "3";
map.status = '通过';
} else if (str == 2) {
map.order_by = "4";
map.status = '退回';
} else if (str == 3) {
map.order_by = "1";
map.status = '已保存';
} else {
map.order_by = "5";
map.status = '其他';
}
return map;
},
//加密
encrypt(word, keyStr) {
keyStr = keyStr ? keyStr : k;
var key = CryptoJS.enc.Utf8.parse(keyStr);
var srcs = CryptoJS.enc.Utf8.parse(word);
var encrypted = CryptoJS.AES.encrypt(srcs, key, {
iv: key,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.ZeroPadding
});
return CryptoJS.enc.Base64.stringify(encrypted.ciphertext);
},
//解密
decrypt(word, keyStr) {
keyStr = keyStr ? keyStr : k;
var key = CryptoJS.enc.Utf8.parse(keyStr);
let base64 = CryptoJS.enc.Base64.parse(word);
let src = CryptoJS.enc.Base64.stringify(base64);
var decrypt = CryptoJS.AES.decrypt(src, key, {
iv: key,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.ZeroPadding
});
var decryptedStr = decrypt.toString(CryptoJS.enc.Utf8);
return decryptedStr.toString();
},
/* 数组对象排序,array-数组对象,key-排序字段,status=0-升序,status=1-降序 */
sortByKey(array, key, status) {
return array.sort(function (a, b) {
var x = a[key];
var y = b[key];
if (status === 0) {
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
} else {
return ((x > y) ? -1 : ((x < y) ? 1 : 0));
}
})
},
loginType(row) {
if (row.login_type == "1") {
return '登录'
} else if (row.login_type == "2") {
return '异常'
} else if (row.login_type == "3") {
return '登出'
} else {
return '流程错误'
}
},
replace_all(html_str, name) {
let replace_str = "<span style='color: red'>" + name + "</span>"
let re = new RegExp(name, "g"); //定义正则表达式
//第一个参数是要替换掉的内容,第二个参数"g"表示替换全部(global)。
var Newstr = html_str.replace(re, replace_str); //第一个参数是正则表达式。//本例会将全部匹配项替换为第二个参数。
return Newstr;
},
/**
* 将list1 转到 list2中
* @param list1
* @param list2
*/
listToList: function (list1, list2) {
$.each(list2, function (index, item) {
$.each(list1, function (id, iten) {
if (item._id == iten._id) {
$.each(item, function (key, value) {
item[key] = iten[key];
})
}
})
})
return list2;
},
addCoreUrl() {
return "/kb-core/"
},
addApiUrl() {
return '/api'
},
getPicUrl() {
return "http://10.170.130.230:9002/core/static"
},
getUserInfo() {
let user = localStorage.getItem("user")
user = JSON.parse(user);
return user.id
},
addLoginUrl() {
return "/login/"
},
/**
* 将list转成map
* @param list 集合
* @param key map 的key
* @param value map 的value
*/
listToMap(list, key, value) {
let map = {}
if (list != null) {
list.forEach(function (item, index) {
map[item[key]] = item[value]
})
}
return map
},
/**
* 给多个查询条件的选项标签添加对应值映射和key。
* @param arr 查询条件数组
* @param dictMap 数据字典Map
*/
initConditionData(arr, dictMap) {
for (let i = 0; i < arr.length; i++) {
var key = arr[i].dict || arr[i].key;
if (key && dictMap[key]) {
this.attachDataMap(arr[i], dictMap[key]);
}
}
},
/**
* 获取字典表Map。
* @param callback 字典获取之后回调执行函数
*
*/
getDictDataMap(callback) {
adapter.queryDictData({}, function (d) {
var tData = d.data || {};
var mData = tData.data || {};
var map = {};
for (var k in mData) {
map[k] = map[k] || {};
for (var i = 0; i < mData[k].length; i++) {
var v = mData[k][i].value;
map[k][v] = mData[k][i].name;
}
}
if (typeof (callback) === "function") {
callback(map);
}
console.log(tData);
});
},
/**
* 给单个查询条件的选项标签添加对应值映射和key。
* @param obj 查询条件对象
* @param dict 数据字典
*/
attachDataMap(obj, dict) {
console.log(dict)
var t = dict.length && obj.dataMap;
if (t) {
obj.items = [];
for (let i = 0; i < dict.length; i++) {
let item = dict[i]
let name = item.name;
if (name) {
obj.items.push(name);
obj.dataMap[name] = item.value;
}
}
}
},
/**
* 获取当前用户输入的查询条件参数。
* @param arr 查询条件数组
*/
getConditionParam(arr) {
let param = {};
for (let i = 0; i < arr.length; i++) {
let key = arr[i].key;
var map = arr[i].dataMap || {};
if (key && !arr[i].flag) {
let value = arr[i].value;
if (value.constructor.name === "Array") {
var tArr = [];
for (let j = 0; j < value.length; j++) {
tArr.push(map[value[j]] || value[j]);
}
param[key] = tArr;
} else {
param[key] = map[value] || value;
}
}
}
var ret = this.transferConditionData(param);
for (var t in ret) {
if (ret[t] && typeof (ret[t]) === "object") {
ret[t] = JSON.stringify(ret[t]);
}
}
return ret
},
transferConditionData(param) {
var dsl = {};
for (let k in param) {
var t = this.produceDSL(k, param[k]);
var type = t.type;
var data = t.data;
dsl[type] = dsl[type] || {};
if (type === "exact_search" && data.value !== "" && data.value !== undefined) {
dsl[type][data.key] = data.value;
} else if (type === "in_search" && data.value.constructor.name === "Array" && data.value.length !== 0) {
dsl[type][data.key] = data.value;
} else if (type === "rang_search" && data.value.start && data.value.end) {
dsl[type][data.key] = data.value;
} else {
}
}
return dsl;
},
produceDSL(key, value) {
var ret = {
type: "exact_search",
data: {
key: key,
value: value
}
}
if (key === "publish_time") {
ret.type = "rang_search";
if (value.constructor.name === "Array") {
ret.data = {
key: "publish_time",
value: {
start: value[0],
end: value[1]
}
}
} else {
ret.data = {
key: "publish_time",
value: ""
}
}
} else if (value.constructor.name === "Array") {
ret.type = "in_search";
} else {}
return ret;
},
downloadItem(fileName, content) {
const blob = new Blob([content]);
const elink = document.createElement("a");
elink.download = fileName;
elink.style.display = "none";
elink.href = URL.createObjectURL(blob);
document.body.appendChild(elink);
elink.click();
URL.revokeObjectURL(elink.href); // 释放URL 对象
document.body.removeChild(elink);
},
checkConditions(dsl, tDSL) {
var ret = false;
if (!tDSL) {
return true;
}
for (var k in dsl) {
if (dsl[k] === "{}" || (dsl[k] && Object.keys(dsl[k]).length === 0)) {
delete dsl[k];
}
}
for (var k in tDSL) {
if (
dsl[k] === "{}" ||
(tDSL[k] && Object.keys(tDSL[k]).length === 0)
) {
delete tDSL[k];
}
}
for (var k in dsl) {
var v = dsl[k];
if (typeof v === "object") {
ret = !(JSON.stringify(v) === JSON.stringify | _time"))*/
};
export default Utils
| (tDSL[k]));
} else {
ret = !(v === tDSL[k]);
}
if (ret) {
break;
}
}
return ret;
}
/* function getInervalHour(startDate, endDate) {
var ms = endDate.getTime() - startDate.getTime();
if (ms < 0) return 0;
return Math.floor(ms/1000/60/60);
}
console.log("登录时间:"+localStorage.getItem("in | identifier_body |
util.js | // import {openLoadingCommon,closeLoadingCommon,notice,alert_util,alertNotice,openView,dataToForm,cleanForm} from './common'
import Vue from "vue";
/**
* 公共方法
* @constructor
*/
var Utils;
Utils = {
utilSubString: function (val, length) {
if (val.length <= length) {
return val
} else if (val.length > length) {
return val.substring(0, length) + "……";
}
},
getServerDate: function () {
return new Date($.ajax({
async: false
}).getResponseHeader("Date"));
},
// 获取两个时间之差(毫秒)
getInervalMilliseconds() {
let in_time = new Date(localStorage.getItem("in_time"));
let current_time = this.getServerDate();
let val = current_time - in_time;
if (val < 0) {
return 0;
} else {
return val;
}
},
/**
* 拖拽初始化
*/
dragInit() {
let host_proxy = this;
var dragged;
/* 可拖动的目标元素会触发事件 */
document.addEventListener("drag", function (event) {}, false);
document.addEventListener("dragstart", function (event) {
// 保存拖动元素的引用(ref.)
dragged = event.target;
// 使其半透明
event.target.style.opacity = .5;
}, false);
document.addEventListener("dragend", function (event) {
// 重置透明度
event.target.style.opacity = "";
}, false);
/* 放下目标节点时触发事件 */
document.addEventListener("dragover", function (event) {
// 阻止默认动作
event.preventDefault();
}, false);
document.addEventListener("dragenter", function (event) {
// 当可拖动的元素进入可放置的目标高亮目标节点
if (event.target.className == "dropzone") {
event.target.style.background = "#F2F6FC";
}
}, false);
document.addEventListener("dragleave", function (event) {
// 当拖动元素离开可放置目标节点,重置其背景
if (event.target.className == "dropzone") {
event.target.style.background = "";
}
}, false);
document.addEventListener("drop", function (event) {
// 阻止默认动作(如打开一些元素的链接)
event.preventDefault();
// 移动拖动的元素到所选择的放置目标节点’
if (event.target.className == "dropzone") {
let name = $(event.target).attr("name");
if (name != 'home') {
// 判断是否已经有拖拽
let html = $(event.target).html();
if (html == null || html == "") {
event.target.style.background = "";
dragged.parentNode.removeChild(dragged);
event.target.appendChild(dragged);
} else {
alert("提示:只能添加一个标签")
//alert_util(host_proxy,'提示:已存在,无须再添加','warning')
}
} else {
event.target.style.background = "";
dragged.parentNode.removeChild(dragged);
event.target.appendChild(dragged);
}
}
}, false);
},
/**
* 将map1 转到 mao2 中
* @param map1
* @param map2
*/
mapToMap: function (map1, map2) {
$.each(map2, function (key, value) {
map2[key] = map1[key];
})
},
/**
* 清空map
* @param map
*/
cleanMap(map) {
$.each(map, function (key, value) {
if (key == 'is_enable') {
map[key] = true;
} else {
map[key] = "";
}
})
},
/**
* 清空map
* @param map
*/
cleaarMap(map) {
$.each(map, function (key, value) {
map[key] = "";
})
},
/**
* 获取复选框中的id
* @param list
* @returns {Array}
*/
getIds(list) {
let ids = [];
$(list).each(function (i, item) {
if ($.inArray(item._id, list) == '-1') {
ids.push(item._id)
}
})
return ids;
},
/**
*
* @param num
* @param str
*/
isNoStr(num) {
let str = "";
if (num == "0") {
str = "否"
} else if (num == "1") {
str = "是"
}
return str;
},
/**
* 转化sex
* @param str
*/
changeSex(str) {
if (str == '0') {
return "男";
} else {
return "女";
}
},
auditFormat(row) {
if (row.is_audit == 0) {
return '待审核'
} else if (row.is_audit == 1) {
return '通过'
} else if (row.is_audit == 2) {
return '退回'
} else if (row.is_audit == 3) {
return '已保存'
} else {
return '已保存'
}
},
auditFormatOrderby(str) {
let map = {};
if (str == 0) {
map.order_by = "2";
map.status = '待审核';
} else if (str == 1) {
map.order_by = "3";
map.status = '通过';
} else if (str == 2) {
map.order_by = "4";
map.status = '退回';
} else if (str == 3) {
map.order_by = "1";
map.status = '已保存';
} else {
map.order_by = "5";
map.status = '其他';
}
return map;
},
//加密
encrypt(word, keyStr) {
keyStr = keyStr ? keyStr : k;
var key = CryptoJS.enc.Utf8.parse(keyStr);
var srcs = CryptoJS.enc.Utf8.parse(word);
var encrypted = CryptoJS.AES.encrypt(srcs, key, {
iv: key,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.ZeroPadding
});
return CryptoJS.enc.Base64.stringify(encrypted.ciphertext);
},
//解密
decrypt(word, keyStr) {
keyStr = keyStr ? keyStr : k;
var key = CryptoJS.enc.Utf8.parse(keyStr);
let base64 = CryptoJS.enc.Base64.parse(word);
let src = CryptoJS.enc.Base64.stringify(base64);
var decrypt = CryptoJS.AES.decrypt(src, key, {
iv: key,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.ZeroPadding
});
var decryptedStr = decrypt.toString(CryptoJS.enc.Utf8);
return decryptedStr.toString();
},
/* 数组对象排序,array-数组对象,key-排序字段,status=0-升序,status=1-降序 */
sortByKey(array, key, status) {
return array.sort(function (a, b) {
var x = a[key];
var y = b[key];
if (status === 0) {
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
} else {
return ((x > y) ? -1 : ((x < y) ? 1 : 0));
}
})
},
loginType(row) {
if (row.login_type == "1") {
return '登录'
} else if (row.login_type == "2") {
return '异常'
} else if (row.login_type == "3") {
return '登出'
} else {
return '流程错误'
}
},
replace_all(html_str, name) {
let replace_str = "<span style='color: red'>" + name + "</span>"
let re = new RegExp(name, "g"); //定义正则表达式
//第一个参数是要替换掉的内容,第二个参数"g"表示替换全部(global)。
var Newstr = html_str.replace(re, replace_str); //第一个参数是正则表达式。//本例会将全部匹配项替换为第二个参数。
return Newstr;
},
/**
* 将list1 转到 list2中
* @param list1
* @param list2
*/
listToList: function (list1, list2) {
$.each(list2, function (index, item) {
$.each(list1, function (id, iten) {
if (item._id == iten._id) {
$.each(item, function (key, value) {
item[key] = iten[key];
})
}
})
})
return list2;
},
addCoreUrl() {
return "/kb-core/"
},
addApiUrl() {
return '/api'
},
getPicUrl() {
return "http://10.170.130.230:9002/core/static"
},
getUserInfo() {
let user = localStorage.getItem("user")
user = JSON.parse(user);
return user.id
},
addLoginUrl() {
return "/login/"
},
/**
* 将list转成map
* @param list 集合
* @param key map 的key
* @param value map 的value
*/
listToMap(list, key, value) {
let map = {}
if (list != null) {
list.forEach(funct | 查询条件的选项标签添加对应值映射和key。
* @param arr 查询条件数组
* @param dictMap 数据字典Map
*/
initConditionData(arr, dictMap) {
for (let i = 0; i < arr.length; i++) {
var key = arr[i].dict || arr[i].key;
if (key && dictMap[key]) {
this.attachDataMap(arr[i], dictMap[key]);
}
}
},
/**
* 获取字典表Map。
* @param callback 字典获取之后回调执行函数
*
*/
getDictDataMap(callback) {
adapter.queryDictData({}, function (d) {
var tData = d.data || {};
var mData = tData.data || {};
var map = {};
for (var k in mData) {
map[k] = map[k] || {};
for (var i = 0; i < mData[k].length; i++) {
var v = mData[k][i].value;
map[k][v] = mData[k][i].name;
}
}
if (typeof (callback) === "function") {
callback(map);
}
console.log(tData);
});
},
/**
* 给单个查询条件的选项标签添加对应值映射和key。
* @param obj 查询条件对象
* @param dict 数据字典
*/
attachDataMap(obj, dict) {
console.log(dict)
var t = dict.length && obj.dataMap;
if (t) {
obj.items = [];
for (let i = 0; i < dict.length; i++) {
let item = dict[i]
let name = item.name;
if (name) {
obj.items.push(name);
obj.dataMap[name] = item.value;
}
}
}
},
/**
* 获取当前用户输入的查询条件参数。
* @param arr 查询条件数组
*/
getConditionParam(arr) {
let param = {};
for (let i = 0; i < arr.length; i++) {
let key = arr[i].key;
var map = arr[i].dataMap || {};
if (key && !arr[i].flag) {
let value = arr[i].value;
if (value.constructor.name === "Array") {
var tArr = [];
for (let j = 0; j < value.length; j++) {
tArr.push(map[value[j]] || value[j]);
}
param[key] = tArr;
} else {
param[key] = map[value] || value;
}
}
}
var ret = this.transferConditionData(param);
for (var t in ret) {
if (ret[t] && typeof (ret[t]) === "object") {
ret[t] = JSON.stringify(ret[t]);
}
}
return ret
},
transferConditionData(param) {
var dsl = {};
for (let k in param) {
var t = this.produceDSL(k, param[k]);
var type = t.type;
var data = t.data;
dsl[type] = dsl[type] || {};
if (type === "exact_search" && data.value !== "" && data.value !== undefined) {
dsl[type][data.key] = data.value;
} else if (type === "in_search" && data.value.constructor.name === "Array" && data.value.length !== 0) {
dsl[type][data.key] = data.value;
} else if (type === "rang_search" && data.value.start && data.value.end) {
dsl[type][data.key] = data.value;
} else {
}
}
return dsl;
},
produceDSL(key, value) {
var ret = {
type: "exact_search",
data: {
key: key,
value: value
}
}
if (key === "publish_time") {
ret.type = "rang_search";
if (value.constructor.name === "Array") {
ret.data = {
key: "publish_time",
value: {
start: value[0],
end: value[1]
}
}
} else {
ret.data = {
key: "publish_time",
value: ""
}
}
} else if (value.constructor.name === "Array") {
ret.type = "in_search";
} else {}
return ret;
},
downloadItem(fileName, content) {
const blob = new Blob([content]);
const elink = document.createElement("a");
elink.download = fileName;
elink.style.display = "none";
elink.href = URL.createObjectURL(blob);
document.body.appendChild(elink);
elink.click();
URL.revokeObjectURL(elink.href); // 释放URL 对象
document.body.removeChild(elink);
},
checkConditions(dsl, tDSL) {
var ret = false;
if (!tDSL) {
return true;
}
for (var k in dsl) {
if (dsl[k] === "{}" || (dsl[k] && Object.keys(dsl[k]).length === 0)) {
delete dsl[k];
}
}
for (var k in tDSL) {
if (
dsl[k] === "{}" ||
(tDSL[k] && Object.keys(tDSL[k]).length === 0)
) {
delete tDSL[k];
}
}
for (var k in dsl) {
var v = dsl[k];
if (typeof v === "object") {
ret = !(JSON.stringify(v) === JSON.stringify(tDSL[k]));
} else {
ret = !(v === tDSL[k]);
}
if (ret) {
break;
}
}
return ret;
}
/* function getInervalHour(startDate, endDate) {
var ms = endDate.getTime() - startDate.getTime();
if (ms < 0) return 0;
return Math.floor(ms/1000/60/60);
}
console.log("登录时间:"+localStorage.getItem("in_time"))*/
};
export default Utils
| ion (item, index) {
map[item[key]] = item[value]
})
}
return map
},
/**
* 给多个 | conditional_block |
util.js | // import {openLoadingCommon,closeLoadingCommon,notice,alert_util,alertNotice,openView,dataToForm,cleanForm} from './common'
import Vue from "vue";
/**
* 公共方法
* @constructor
*/
var Utils;
Utils = {
utilSubString: function (val, length) {
if (val.length <= length) {
return val
} else if (val.length > length) {
return val.substring(0, length) + "……";
}
},
getServerDate: function () {
return new Date($.ajax({
async: false
}).getResponseHeader("Date"));
},
// 获取两个时间之差(毫秒)
getInervalMilliseconds() {
let in_time = new Date(localStorage.getItem("in_time"));
let current_time = this.getServerDate();
let val = current_time - in_time;
if (val < 0) {
return 0;
} else {
return val;
}
},
/**
* 拖拽初始化
*/
dragInit() {
let host_proxy = this;
var dragged;
/* 可拖动的目标元素会触发事件 */
document.addEventListener("drag", function (event) {}, false);
document.addEventListener("dragstart", function (event) {
// 保存拖动元素的引用(ref.)
dragged = event.target;
// 使其半透明
event.target.style.opacity = .5;
}, false);
document.addEventListener("dragend", function (event) {
// 重置透明度
event.target.style.opacity = "";
}, false);
/* 放下目标节点时触发事件 */
document.addEventListener("dragover", function (event) {
// 阻止默认动作
event.preventDefault();
}, false);
document.addEventListener("dragenter", function (event) {
// 当可拖动的元素进入可放置的目标高亮目标节点
if (event.target.className == "dropzone") {
event.target.style.background = "#F2F6FC";
}
}, false);
document.addEventListener("dragleave", function (event) {
// 当拖动元素离开可放置目标节点,重置其背景
if (event.target.className == "dropzone") {
event.target.style.background = "";
}
}, false);
document.addEventListener("drop", function (event) {
// 阻止默认动作(如打开一些元素的链接)
event.preventDefault();
// 移动拖动的元素到所选择的放置目标节点’
if (event.target.className == "dropzone") {
let name = $(event.target).attr("name");
if (name != 'home') {
// 判断是否已经有拖拽
let html = $(event.target).html();
if (html == null || html == "") {
event.target.style.background = "";
dragged.parentNode.removeChild(dragged);
event.target.appendChild(dragged);
} else {
alert("提示:只能添加一个标签")
//alert_util(host_proxy,'提示:已存在,无须再添加','warning')
}
} else {
event.target.style.background = "";
dragged.parentNode.removeChild(dragged);
event.target.appendChild(dragged);
}
}
}, false);
},
/**
* 将map1 转到 mao2 中
* @param map1
* @param map2
*/
mapToMap: function (map1, map2) {
$.each(map2, function (key, value) {
map2[key] = map1[key];
})
},
/**
* 清空map
* @param map
*/
cleanMap(map) {
$.each(map, function (key, value) {
if (key == 'is_enable') {
map[key] = true;
} else {
map[key] = "";
}
})
},
/**
* 清空map
* @param map
*/
cleaarMap(map) {
$.each(map, function (key, value) {
map[key] = "";
})
},
/**
* 获取复选框中的id
* @param list
* @returns {Array}
*/
getIds(list) {
let ids = [];
$(list).each(function (i, item) {
if ($.inArray(item._id, list) == '-1') {
ids.push(item._id)
}
})
return ids;
},
/**
*
* @param num
* @param str
*/
isNoStr(num) {
let str = "";
if (num == "0") {
str = "否"
} else if (num == "1") {
str = "是"
}
return str;
},
/**
* 转化sex
* @param str
*/
changeSex(str) {
if (str == '0') {
return "男";
} else {
return "女";
}
},
auditFormat(row) {
if (row.is_audit == 0) {
return '待审核'
} else if (row.is_audit == 1) {
return '通过'
} else if (row.is_audit == 2) {
return '退回'
} else if (row.is_audit == 3) {
return '已保存'
} else {
return '已保存'
}
},
auditFormatOrderby(str) {
let map = {};
if (str == 0) {
map.order_by = "2";
map.status = '待审核';
} else if (str == 1) {
map.order_by = "3";
map.status = '通过';
} else if (str == 2) {
map.order_by = "4";
map.status = '退回';
} else if (str == 3) {
map.order_by = "1";
map.status = '已保存';
} else {
map.order_by = "5";
map.status = '其他';
}
return map;
},
//加密
encrypt(word, keyStr) {
keyStr = keyStr ? keyStr : k;
var key = CryptoJS.enc.Utf8.parse(keyStr);
var srcs = CryptoJS.enc.Utf8.parse(word);
var encrypted = CryptoJS.AES.encrypt(srcs, key, {
iv: key,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.ZeroPadding
});
return CryptoJS.enc.Base64.stringify(encrypted.ciphertext);
},
//解密
decrypt(word, keyStr) {
keyStr = keyStr ? keyStr : k;
var key = CryptoJS.enc.Utf8.parse(keyStr);
let base64 = CryptoJS.enc.Base64.parse(word);
let src = CryptoJS.enc.Base64.stringify(base64);
var decrypt = CryptoJS.AES.decrypt(src, key, {
iv: key,
mode: CryptoJS.mode.CBC,
padding: CryptoJS.pad.ZeroPadding | var decryptedStr = decrypt.toString(CryptoJS.enc.Utf8);
return decryptedStr.toString();
},
/* 数组对象排序,array-数组对象,key-排序字段,status=0-升序,status=1-降序 */
sortByKey(array, key, status) {
return array.sort(function (a, b) {
var x = a[key];
var y = b[key];
if (status === 0) {
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
} else {
return ((x > y) ? -1 : ((x < y) ? 1 : 0));
}
})
},
loginType(row) {
if (row.login_type == "1") {
return '登录'
} else if (row.login_type == "2") {
return '异常'
} else if (row.login_type == "3") {
return '登出'
} else {
return '流程错误'
}
},
replace_all(html_str, name) {
let replace_str = "<span style='color: red'>" + name + "</span>"
let re = new RegExp(name, "g"); //定义正则表达式
//第一个参数是要替换掉的内容,第二个参数"g"表示替换全部(global)。
var Newstr = html_str.replace(re, replace_str); //第一个参数是正则表达式。//本例会将全部匹配项替换为第二个参数。
return Newstr;
},
/**
* 将list1 转到 list2中
* @param list1
* @param list2
*/
listToList: function (list1, list2) {
$.each(list2, function (index, item) {
$.each(list1, function (id, iten) {
if (item._id == iten._id) {
$.each(item, function (key, value) {
item[key] = iten[key];
})
}
})
})
return list2;
},
addCoreUrl() {
return "/kb-core/"
},
addApiUrl() {
return '/api'
},
getPicUrl() {
return "http://10.170.130.230:9002/core/static"
},
getUserInfo() {
let user = localStorage.getItem("user")
user = JSON.parse(user);
return user.id
},
addLoginUrl() {
return "/login/"
},
/**
* 将list转成map
* @param list 集合
* @param key map 的key
* @param value map 的value
*/
listToMap(list, key, value) {
let map = {}
if (list != null) {
list.forEach(function (item, index) {
map[item[key]] = item[value]
})
}
return map
},
/**
* 给多个查询条件的选项标签添加对应值映射和key。
* @param arr 查询条件数组
* @param dictMap 数据字典Map
*/
initConditionData(arr, dictMap) {
for (let i = 0; i < arr.length; i++) {
var key = arr[i].dict || arr[i].key;
if (key && dictMap[key]) {
this.attachDataMap(arr[i], dictMap[key]);
}
}
},
/**
* 获取字典表Map。
* @param callback 字典获取之后回调执行函数
*
*/
getDictDataMap(callback) {
adapter.queryDictData({}, function (d) {
var tData = d.data || {};
var mData = tData.data || {};
var map = {};
for (var k in mData) {
map[k] = map[k] || {};
for (var i = 0; i < mData[k].length; i++) {
var v = mData[k][i].value;
map[k][v] = mData[k][i].name;
}
}
if (typeof (callback) === "function") {
callback(map);
}
console.log(tData);
});
},
/**
* 给单个查询条件的选项标签添加对应值映射和key。
* @param obj 查询条件对象
* @param dict 数据字典
*/
attachDataMap(obj, dict) {
console.log(dict)
var t = dict.length && obj.dataMap;
if (t) {
obj.items = [];
for (let i = 0; i < dict.length; i++) {
let item = dict[i]
let name = item.name;
if (name) {
obj.items.push(name);
obj.dataMap[name] = item.value;
}
}
}
},
/**
* 获取当前用户输入的查询条件参数。
* @param arr 查询条件数组
*/
getConditionParam(arr) {
let param = {};
for (let i = 0; i < arr.length; i++) {
let key = arr[i].key;
var map = arr[i].dataMap || {};
if (key && !arr[i].flag) {
let value = arr[i].value;
if (value.constructor.name === "Array") {
var tArr = [];
for (let j = 0; j < value.length; j++) {
tArr.push(map[value[j]] || value[j]);
}
param[key] = tArr;
} else {
param[key] = map[value] || value;
}
}
}
var ret = this.transferConditionData(param);
for (var t in ret) {
if (ret[t] && typeof (ret[t]) === "object") {
ret[t] = JSON.stringify(ret[t]);
}
}
return ret
},
transferConditionData(param) {
var dsl = {};
for (let k in param) {
var t = this.produceDSL(k, param[k]);
var type = t.type;
var data = t.data;
dsl[type] = dsl[type] || {};
if (type === "exact_search" && data.value !== "" && data.value !== undefined) {
dsl[type][data.key] = data.value;
} else if (type === "in_search" && data.value.constructor.name === "Array" && data.value.length !== 0) {
dsl[type][data.key] = data.value;
} else if (type === "rang_search" && data.value.start && data.value.end) {
dsl[type][data.key] = data.value;
} else {
}
}
return dsl;
},
produceDSL(key, value) {
var ret = {
type: "exact_search",
data: {
key: key,
value: value
}
}
if (key === "publish_time") {
ret.type = "rang_search";
if (value.constructor.name === "Array") {
ret.data = {
key: "publish_time",
value: {
start: value[0],
end: value[1]
}
}
} else {
ret.data = {
key: "publish_time",
value: ""
}
}
} else if (value.constructor.name === "Array") {
ret.type = "in_search";
} else {}
return ret;
},
downloadItem(fileName, content) {
const blob = new Blob([content]);
const elink = document.createElement("a");
elink.download = fileName;
elink.style.display = "none";
elink.href = URL.createObjectURL(blob);
document.body.appendChild(elink);
elink.click();
URL.revokeObjectURL(elink.href); // 释放URL 对象
document.body.removeChild(elink);
},
checkConditions(dsl, tDSL) {
var ret = false;
if (!tDSL) {
return true;
}
for (var k in dsl) {
if (dsl[k] === "{}" || (dsl[k] && Object.keys(dsl[k]).length === 0)) {
delete dsl[k];
}
}
for (var k in tDSL) {
if (
dsl[k] === "{}" ||
(tDSL[k] && Object.keys(tDSL[k]).length === 0)
) {
delete tDSL[k];
}
}
for (var k in dsl) {
var v = dsl[k];
if (typeof v === "object") {
ret = !(JSON.stringify(v) === JSON.stringify(tDSL[k]));
} else {
ret = !(v === tDSL[k]);
}
if (ret) {
break;
}
}
return ret;
}
/* function getInervalHour(startDate, endDate) {
var ms = endDate.getTime() - startDate.getTime();
if (ms < 0) return 0;
return Math.floor(ms/1000/60/60);
}
console.log("登录时间:"+localStorage.getItem("in_time"))*/
};
export default Utils | });
| random_line_split |
Assembler.py | import sys
import os
""" CONSTANTS """
# A constant representing the width of a word in RAM
CMD_LEN = 16
# Predefined symbols. These are the constants that will be used in assembly
SP = 0
LCL = 1
ARG = 2
THIS = 3
THAT = 4
SCREEN = 16384
KBD = 24576
# The last number of RAM to be reserved
RAM_RESERVE_END = 16
# A constant representing the first place in RAM available for variables
VAR_FIRST_MEM = 16
""" Global variables"""
# A global variable representing the number of variables created in the
# supplied assembly code. When translating multiple files, this variable is
# set to 0 at the beginning of each translation process.
numOfVariables = 0
def translate_c_command(command):
command_array = command.split("=")
if len(command_array) == 1: # no destination
command_and_jump_array = command_array[0].split(";")
destination_command = ""
else: # if length = 2
destination_command = command_array[0]
command_and_jump_array = command_array[1].split(";")
if len(command_and_jump_array) == 1: # no jump
jump_command = ""
compute_command = command_and_jump_array[0]
else: # if length = 2
compute_command = command_and_jump_array[0]
jump_command = command_and_jump_array[1]
compute_bin = compute_command_to_bin(compute_command)
destination_bin = destination_command_to_bin(destination_command)
jump_bin = jump_command_to_bin(jump_command)
return compute_bin+destination_bin+jump_bin
def compute_command_to_bin(compute_command):
if "*" in compute_command:
return mul_command_to_bin(compute_command)
elif ">>" in compute_command or "<<" in compute_command:
return shift_command_to_bin(compute_command)
elif "M" in compute_command:
return m_command_to_bin(compute_command)
else:
return a_command_to_bin(compute_command)
def mul_command_to_bin(compute_command):
if compute_command == "D*A":
return "1000000000"
elif compute_command == "D*M":
return "1001000000"
def shift_command_to_bin(compute_command):
if compute_command == "D<<":
return "1010110000"
elif compute_command == "A<<":
return "1010100000"
elif compute_command == "M<<":
return "1011100000"
elif compute_command == "D>>":
return "1010010000"
elif compute_command == "A>>":
return "1010000000"
elif compute_command == "M>>":
return "1011000000"
def m_command_to_bin(compute_command):
prefix = "1111"
# replacing the M in the M command to an A,
# this way we can use the A command func
compute_command = compute_command.replace("M", "A")
return a_command_to_bin(compute_command, prefix)
def a_command_to_bin(compute_command, prefix="1110"):
# shared A and M commands
if compute_command == "A":
suffix = "110000"
elif compute_command == "!A":
suffix = "110001"
elif compute_command == "-A":
suffix = "110011"
elif compute_command == "A+1":
suffix = "110111"
elif compute_command == "A-1":
suffix = "110010"
elif compute_command == "D+A":
suffix = "000010"
elif compute_command == "D-A":
suffix = "010011"
elif compute_command == "A-D":
suffix = "000111"
elif compute_command == "D&A":
suffix = "000000"
elif compute_command == "D|A":
suffix = "010101"
# A only commands
elif compute_command == "0":
suffix = "101010"
elif compute_command == "1":
suffix = "111111"
elif compute_command == "-1":
suffix = "111010"
elif compute_command == "D":
suffix = "001100"
elif compute_command == "!D":
suffix = "001101"
elif compute_command == "-D":
suffix = "001111"
elif compute_command == "D+1":
suffix = "011111"
elif compute_command == "D-1":
suffix = "001110"
return prefix+suffix
def destination_command_to_bin(destination_command):
left_bit, middle_bit, right_bit = "0", "0", "0"
if "M" in destination_command:
right_bit = "1"
if "D" in destination_command:
middle_bit = "1"
if "A" in destination_command:
left_bit = "1"
return left_bit + middle_bit + right_bit
def jump_command_to_bin(jump_command):
left_bit, middle_bit, right_bit = "0", "0", "0"
if jump_command == "JMP":
return "111"
if jump_command == "JNE":
return "101"
if "G" in jump_command:
right_bit = "1"
if "E" in jump_command:
middle_bit = "1"
if "L" in jump_command:
left_bit = "1"
return left_bit + middle_bit + right_bit
def translate_to_binary(command):
"""
A function that translates a number into binary, and formatting it to fit
the machine code language word length (16 bit)
:param command: an integer to transform into hack binary
:return: hack binary code
"""
int_command = int(command)
binary_command = bin(int_command)[2:]
missing_bits = CMD_LEN - len(binary_command)
cmd_prefix = missing_bits * str(0)
binary_command = str(cmd_prefix) + str(binary_command)
return binary_command + "\n"
def translate_a_command(marker_dictionary, cmd):
"""
A function that gets a assembly command and translated it into machine
code, using a supplied marker dictionary.
This function is designed to update the supplied marker dictionary.
:param marker_dictionary: a dictionary of pointers
:param cmd: an assembly command line
:return: the machine code binary respective to the supplied assembly line
"""
if cmd.isdigit():
return translate_to_binary(cmd)
else:
if cmd in marker_dictionary:
return translate_to_binary(marker_dictionary[cmd])
else:
global numOfVariables
marker_dictionary[cmd] = VAR_FIRST_MEM + numOfVariables
numOfVariables += 1
return translate_to_binary(VAR_FIRST_MEM + numOfVariables - 1)
def write_cmd(hack_file, marker_dictionary, cmd):
"""
This function writes a translated assembly name as hack machine code into
the supplied .hack file. The function uses helper functions to translate
code according to the type of code.
:param hack_file: a .hack file (destination for hack machine code)
:param marker_dictionary: a dictionary of pointers
:param cmd: a command to translate and write into hack_file
:return: None.
"""
if cmd[0] == '@':
hack_file.write(translate_a_command(marker_dictionary, cmd[1:]))
else:
hack_file.write(translate_c_command(cmd) + "\n")
def load_constants():
"""
A function that creates a dictionary containing all the hack assembly
constants and their respective binary values, including I/O and reserved
RAM locations
:return: the created dictionary
"""
marker_dictionary = dict()
marker_dictionary["SP"] = SP
marker_dictionary["LCL"] = LCL
marker_dictionary["ARG"] = ARG
marker_dictionary["THIS"] = THIS
marker_dictionary["THAT"] = THAT
marker_dictionary["SCREEN"] = SCREEN
marker_dictionary["KBD"] = KBD
for i in range(0, RAM_RESERVE_END):
marker_dictionary["R"+str(i)] = i
return marker_dictionary
def pre_process_asm_file(assembly_file):
|
def assemble_file(assembly_file_name, hack_file_name):
"""
A function that receives names of an .asm file and a .hack file.
The function will create the specified .hack file, and using helper
functions will write to it hack machine code, line by line, respective to
the supplied assembly code.
:param assembly_file_name: a name of an .asm file to translate to machine.
:param hack_file_name: a name of a source file to write machine code to.
:return: None
"""
global numOfVariables
numOfVariables = 0
assembly_file = open(assembly_file_name)
command_list, marker_dictionary = pre_process_asm_file(assembly_file)
hack_file = open(hack_file_name, 'w')
for command in command_list:
write_cmd(hack_file, marker_dictionary, command)
# print(marker_dictionary) - useful for troubleshooting and understanding
def assemble_files():
"""
This function works on supplied arguments by the user. The arguments are
either an .asm file or a directory. If given a directory, the function
will operate on each of the .asm files contained within it, if any exist.
The file(s) name(s) will be sent to assemble_file func, which in turn,
and by helper functions, will translate (each) .asm file to a respective
.hack file - a hack computer binary file.
When a folder is supplied, all .hack files will be stored in that folder.
:return: None.
"""
path = os.path.expanduser(sys.argv[1])
if os.path.isdir(path):
file_root = path + "/"
for file in os.listdir(path):
filename = os.path.splitext(file)
if filename[1] == ".asm":
hack_file_name = file_root + filename[0] + ".hack"
assemble_file(file_root + file, hack_file_name)
else:
filename = os.path.splitext(path)
hack_file_name = filename[0] + ".hack"
assemble_file(path, hack_file_name)
assemble_files()
| """
This function process an assembly file before it's translation to machine
code. It creates a dictionary, and places into it all markers in the code,
and assigns each one of them it's location in code, allowing to use it as
a reference in future. While doing so, it deletes each marker's
declaration.
The function also clears all whitespaces and comments from the code.
Any line which is not a comment, empty, or a marker declaration is
inserted to a list of ordered commands, later used for creating a hack
machine code binary.
:param assembly_file: an .asm file
:return: the created dictionary and commands list.
"""
line_counter = 0
marker_dictionary = load_constants()
commands_list = list()
for command in assembly_file.readlines():
command = command.split("/")[0] # getting rid of comments
command = "".join(command.split()) # getting rid of whitespaces
if command:
if command.startswith('('):
marker_dictionary[command[1:-1]] = line_counter
continue
commands_list.append(command)
line_counter += 1
return commands_list, marker_dictionary | identifier_body |
Assembler.py | import sys
import os
""" CONSTANTS """
# A constant representing the width of a word in RAM
CMD_LEN = 16
# Predefined symbols. These are the constants that will be used in assembly
SP = 0
LCL = 1
ARG = 2
THIS = 3
THAT = 4
| RAM_RESERVE_END = 16
# A constant representing the first place in RAM available for variables
VAR_FIRST_MEM = 16
""" Global variables"""
# A global variable representing the number of variables created in the
# supplied assembly code. When translating multiple files, this variable is
# set to 0 at the beginning of each translation process.
numOfVariables = 0
def translate_c_command(command):
command_array = command.split("=")
if len(command_array) == 1: # no destination
command_and_jump_array = command_array[0].split(";")
destination_command = ""
else: # if length = 2
destination_command = command_array[0]
command_and_jump_array = command_array[1].split(";")
if len(command_and_jump_array) == 1: # no jump
jump_command = ""
compute_command = command_and_jump_array[0]
else: # if length = 2
compute_command = command_and_jump_array[0]
jump_command = command_and_jump_array[1]
compute_bin = compute_command_to_bin(compute_command)
destination_bin = destination_command_to_bin(destination_command)
jump_bin = jump_command_to_bin(jump_command)
return compute_bin+destination_bin+jump_bin
def compute_command_to_bin(compute_command):
if "*" in compute_command:
return mul_command_to_bin(compute_command)
elif ">>" in compute_command or "<<" in compute_command:
return shift_command_to_bin(compute_command)
elif "M" in compute_command:
return m_command_to_bin(compute_command)
else:
return a_command_to_bin(compute_command)
def mul_command_to_bin(compute_command):
if compute_command == "D*A":
return "1000000000"
elif compute_command == "D*M":
return "1001000000"
def shift_command_to_bin(compute_command):
if compute_command == "D<<":
return "1010110000"
elif compute_command == "A<<":
return "1010100000"
elif compute_command == "M<<":
return "1011100000"
elif compute_command == "D>>":
return "1010010000"
elif compute_command == "A>>":
return "1010000000"
elif compute_command == "M>>":
return "1011000000"
def m_command_to_bin(compute_command):
prefix = "1111"
# replacing the M in the M command to an A,
# this way we can use the A command func
compute_command = compute_command.replace("M", "A")
return a_command_to_bin(compute_command, prefix)
def a_command_to_bin(compute_command, prefix="1110"):
# shared A and M commands
if compute_command == "A":
suffix = "110000"
elif compute_command == "!A":
suffix = "110001"
elif compute_command == "-A":
suffix = "110011"
elif compute_command == "A+1":
suffix = "110111"
elif compute_command == "A-1":
suffix = "110010"
elif compute_command == "D+A":
suffix = "000010"
elif compute_command == "D-A":
suffix = "010011"
elif compute_command == "A-D":
suffix = "000111"
elif compute_command == "D&A":
suffix = "000000"
elif compute_command == "D|A":
suffix = "010101"
# A only commands
elif compute_command == "0":
suffix = "101010"
elif compute_command == "1":
suffix = "111111"
elif compute_command == "-1":
suffix = "111010"
elif compute_command == "D":
suffix = "001100"
elif compute_command == "!D":
suffix = "001101"
elif compute_command == "-D":
suffix = "001111"
elif compute_command == "D+1":
suffix = "011111"
elif compute_command == "D-1":
suffix = "001110"
return prefix+suffix
def destination_command_to_bin(destination_command):
left_bit, middle_bit, right_bit = "0", "0", "0"
if "M" in destination_command:
right_bit = "1"
if "D" in destination_command:
middle_bit = "1"
if "A" in destination_command:
left_bit = "1"
return left_bit + middle_bit + right_bit
def jump_command_to_bin(jump_command):
left_bit, middle_bit, right_bit = "0", "0", "0"
if jump_command == "JMP":
return "111"
if jump_command == "JNE":
return "101"
if "G" in jump_command:
right_bit = "1"
if "E" in jump_command:
middle_bit = "1"
if "L" in jump_command:
left_bit = "1"
return left_bit + middle_bit + right_bit
def translate_to_binary(command):
"""
A function that translates a number into binary, and formatting it to fit
the machine code language word length (16 bit)
:param command: an integer to transform into hack binary
:return: hack binary code
"""
int_command = int(command)
binary_command = bin(int_command)[2:]
missing_bits = CMD_LEN - len(binary_command)
cmd_prefix = missing_bits * str(0)
binary_command = str(cmd_prefix) + str(binary_command)
return binary_command + "\n"
def translate_a_command(marker_dictionary, cmd):
"""
A function that gets a assembly command and translated it into machine
code, using a supplied marker dictionary.
This function is designed to update the supplied marker dictionary.
:param marker_dictionary: a dictionary of pointers
:param cmd: an assembly command line
:return: the machine code binary respective to the supplied assembly line
"""
if cmd.isdigit():
return translate_to_binary(cmd)
else:
if cmd in marker_dictionary:
return translate_to_binary(marker_dictionary[cmd])
else:
global numOfVariables
marker_dictionary[cmd] = VAR_FIRST_MEM + numOfVariables
numOfVariables += 1
return translate_to_binary(VAR_FIRST_MEM + numOfVariables - 1)
def write_cmd(hack_file, marker_dictionary, cmd):
"""
This function writes a translated assembly name as hack machine code into
the supplied .hack file. The function uses helper functions to translate
code according to the type of code.
:param hack_file: a .hack file (destination for hack machine code)
:param marker_dictionary: a dictionary of pointers
:param cmd: a command to translate and write into hack_file
:return: None.
"""
if cmd[0] == '@':
hack_file.write(translate_a_command(marker_dictionary, cmd[1:]))
else:
hack_file.write(translate_c_command(cmd) + "\n")
def load_constants():
"""
A function that creates a dictionary containing all the hack assembly
constants and their respective binary values, including I/O and reserved
RAM locations
:return: the created dictionary
"""
marker_dictionary = dict()
marker_dictionary["SP"] = SP
marker_dictionary["LCL"] = LCL
marker_dictionary["ARG"] = ARG
marker_dictionary["THIS"] = THIS
marker_dictionary["THAT"] = THAT
marker_dictionary["SCREEN"] = SCREEN
marker_dictionary["KBD"] = KBD
for i in range(0, RAM_RESERVE_END):
marker_dictionary["R"+str(i)] = i
return marker_dictionary
def pre_process_asm_file(assembly_file):
"""
This function process an assembly file before it's translation to machine
code. It creates a dictionary, and places into it all markers in the code,
and assigns each one of them it's location in code, allowing to use it as
a reference in future. While doing so, it deletes each marker's
declaration.
The function also clears all whitespaces and comments from the code.
Any line which is not a comment, empty, or a marker declaration is
inserted to a list of ordered commands, later used for creating a hack
machine code binary.
:param assembly_file: an .asm file
:return: the created dictionary and commands list.
"""
line_counter = 0
marker_dictionary = load_constants()
commands_list = list()
for command in assembly_file.readlines():
command = command.split("/")[0] # getting rid of comments
command = "".join(command.split()) # getting rid of whitespaces
if command:
if command.startswith('('):
marker_dictionary[command[1:-1]] = line_counter
continue
commands_list.append(command)
line_counter += 1
return commands_list, marker_dictionary
def assemble_file(assembly_file_name, hack_file_name):
"""
A function that receives names of an .asm file and a .hack file.
The function will create the specified .hack file, and using helper
functions will write to it hack machine code, line by line, respective to
the supplied assembly code.
:param assembly_file_name: a name of an .asm file to translate to machine.
:param hack_file_name: a name of a source file to write machine code to.
:return: None
"""
global numOfVariables
numOfVariables = 0
assembly_file = open(assembly_file_name)
command_list, marker_dictionary = pre_process_asm_file(assembly_file)
hack_file = open(hack_file_name, 'w')
for command in command_list:
write_cmd(hack_file, marker_dictionary, command)
# print(marker_dictionary) - useful for troubleshooting and understanding
def assemble_files():
"""
This function works on supplied arguments by the user. The arguments are
either an .asm file or a directory. If given a directory, the function
will operate on each of the .asm files contained within it, if any exist.
The file(s) name(s) will be sent to assemble_file func, which in turn,
and by helper functions, will translate (each) .asm file to a respective
.hack file - a hack computer binary file.
When a folder is supplied, all .hack files will be stored in that folder.
:return: None.
"""
path = os.path.expanduser(sys.argv[1])
if os.path.isdir(path):
file_root = path + "/"
for file in os.listdir(path):
filename = os.path.splitext(file)
if filename[1] == ".asm":
hack_file_name = file_root + filename[0] + ".hack"
assemble_file(file_root + file, hack_file_name)
else:
filename = os.path.splitext(path)
hack_file_name = filename[0] + ".hack"
assemble_file(path, hack_file_name)
assemble_files() | SCREEN = 16384
KBD = 24576
# The last number of RAM to be reserved
| random_line_split |
Assembler.py | import sys
import os
""" CONSTANTS """
# A constant representing the width of a word in RAM
CMD_LEN = 16
# Predefined symbols. These are the constants that will be used in assembly
SP = 0
LCL = 1
ARG = 2
THIS = 3
THAT = 4
SCREEN = 16384
KBD = 24576
# The last number of RAM to be reserved
RAM_RESERVE_END = 16
# A constant representing the first place in RAM available for variables
VAR_FIRST_MEM = 16
""" Global variables"""
# A global variable representing the number of variables created in the
# supplied assembly code. When translating multiple files, this variable is
# set to 0 at the beginning of each translation process.
numOfVariables = 0
def translate_c_command(command):
command_array = command.split("=")
if len(command_array) == 1: # no destination
command_and_jump_array = command_array[0].split(";")
destination_command = ""
else: # if length = 2
destination_command = command_array[0]
command_and_jump_array = command_array[1].split(";")
if len(command_and_jump_array) == 1: # no jump
jump_command = ""
compute_command = command_and_jump_array[0]
else: # if length = 2
compute_command = command_and_jump_array[0]
jump_command = command_and_jump_array[1]
compute_bin = compute_command_to_bin(compute_command)
destination_bin = destination_command_to_bin(destination_command)
jump_bin = jump_command_to_bin(jump_command)
return compute_bin+destination_bin+jump_bin
def compute_command_to_bin(compute_command):
if "*" in compute_command:
return mul_command_to_bin(compute_command)
elif ">>" in compute_command or "<<" in compute_command:
return shift_command_to_bin(compute_command)
elif "M" in compute_command:
return m_command_to_bin(compute_command)
else:
return a_command_to_bin(compute_command)
def mul_command_to_bin(compute_command):
if compute_command == "D*A":
return "1000000000"
elif compute_command == "D*M":
return "1001000000"
def shift_command_to_bin(compute_command):
if compute_command == "D<<":
return "1010110000"
elif compute_command == "A<<":
return "1010100000"
elif compute_command == "M<<":
return "1011100000"
elif compute_command == "D>>":
return "1010010000"
elif compute_command == "A>>":
return "1010000000"
elif compute_command == "M>>":
return "1011000000"
def m_command_to_bin(compute_command):
prefix = "1111"
# replacing the M in the M command to an A,
# this way we can use the A command func
compute_command = compute_command.replace("M", "A")
return a_command_to_bin(compute_command, prefix)
def a_command_to_bin(compute_command, prefix="1110"):
# shared A and M commands
if compute_command == "A":
suffix = "110000"
elif compute_command == "!A":
suffix = "110001"
elif compute_command == "-A":
suffix = "110011"
elif compute_command == "A+1":
suffix = "110111"
elif compute_command == "A-1":
suffix = "110010"
elif compute_command == "D+A":
suffix = "000010"
elif compute_command == "D-A":
suffix = "010011"
elif compute_command == "A-D":
suffix = "000111"
elif compute_command == "D&A":
suffix = "000000"
elif compute_command == "D|A":
|
# A only commands
elif compute_command == "0":
suffix = "101010"
elif compute_command == "1":
suffix = "111111"
elif compute_command == "-1":
suffix = "111010"
elif compute_command == "D":
suffix = "001100"
elif compute_command == "!D":
suffix = "001101"
elif compute_command == "-D":
suffix = "001111"
elif compute_command == "D+1":
suffix = "011111"
elif compute_command == "D-1":
suffix = "001110"
return prefix+suffix
def destination_command_to_bin(destination_command):
left_bit, middle_bit, right_bit = "0", "0", "0"
if "M" in destination_command:
right_bit = "1"
if "D" in destination_command:
middle_bit = "1"
if "A" in destination_command:
left_bit = "1"
return left_bit + middle_bit + right_bit
def jump_command_to_bin(jump_command):
left_bit, middle_bit, right_bit = "0", "0", "0"
if jump_command == "JMP":
return "111"
if jump_command == "JNE":
return "101"
if "G" in jump_command:
right_bit = "1"
if "E" in jump_command:
middle_bit = "1"
if "L" in jump_command:
left_bit = "1"
return left_bit + middle_bit + right_bit
def translate_to_binary(command):
"""
A function that translates a number into binary, and formatting it to fit
the machine code language word length (16 bit)
:param command: an integer to transform into hack binary
:return: hack binary code
"""
int_command = int(command)
binary_command = bin(int_command)[2:]
missing_bits = CMD_LEN - len(binary_command)
cmd_prefix = missing_bits * str(0)
binary_command = str(cmd_prefix) + str(binary_command)
return binary_command + "\n"
def translate_a_command(marker_dictionary, cmd):
"""
A function that gets a assembly command and translated it into machine
code, using a supplied marker dictionary.
This function is designed to update the supplied marker dictionary.
:param marker_dictionary: a dictionary of pointers
:param cmd: an assembly command line
:return: the machine code binary respective to the supplied assembly line
"""
if cmd.isdigit():
return translate_to_binary(cmd)
else:
if cmd in marker_dictionary:
return translate_to_binary(marker_dictionary[cmd])
else:
global numOfVariables
marker_dictionary[cmd] = VAR_FIRST_MEM + numOfVariables
numOfVariables += 1
return translate_to_binary(VAR_FIRST_MEM + numOfVariables - 1)
def write_cmd(hack_file, marker_dictionary, cmd):
"""
This function writes a translated assembly name as hack machine code into
the supplied .hack file. The function uses helper functions to translate
code according to the type of code.
:param hack_file: a .hack file (destination for hack machine code)
:param marker_dictionary: a dictionary of pointers
:param cmd: a command to translate and write into hack_file
:return: None.
"""
if cmd[0] == '@':
hack_file.write(translate_a_command(marker_dictionary, cmd[1:]))
else:
hack_file.write(translate_c_command(cmd) + "\n")
def load_constants():
"""
A function that creates a dictionary containing all the hack assembly
constants and their respective binary values, including I/O and reserved
RAM locations
:return: the created dictionary
"""
marker_dictionary = dict()
marker_dictionary["SP"] = SP
marker_dictionary["LCL"] = LCL
marker_dictionary["ARG"] = ARG
marker_dictionary["THIS"] = THIS
marker_dictionary["THAT"] = THAT
marker_dictionary["SCREEN"] = SCREEN
marker_dictionary["KBD"] = KBD
for i in range(0, RAM_RESERVE_END):
marker_dictionary["R"+str(i)] = i
return marker_dictionary
def pre_process_asm_file(assembly_file):
"""
This function process an assembly file before it's translation to machine
code. It creates a dictionary, and places into it all markers in the code,
and assigns each one of them it's location in code, allowing to use it as
a reference in future. While doing so, it deletes each marker's
declaration.
The function also clears all whitespaces and comments from the code.
Any line which is not a comment, empty, or a marker declaration is
inserted to a list of ordered commands, later used for creating a hack
machine code binary.
:param assembly_file: an .asm file
:return: the created dictionary and commands list.
"""
line_counter = 0
marker_dictionary = load_constants()
commands_list = list()
for command in assembly_file.readlines():
command = command.split("/")[0] # getting rid of comments
command = "".join(command.split()) # getting rid of whitespaces
if command:
if command.startswith('('):
marker_dictionary[command[1:-1]] = line_counter
continue
commands_list.append(command)
line_counter += 1
return commands_list, marker_dictionary
def assemble_file(assembly_file_name, hack_file_name):
"""
A function that receives names of an .asm file and a .hack file.
The function will create the specified .hack file, and using helper
functions will write to it hack machine code, line by line, respective to
the supplied assembly code.
:param assembly_file_name: a name of an .asm file to translate to machine.
:param hack_file_name: a name of a source file to write machine code to.
:return: None
"""
global numOfVariables
numOfVariables = 0
assembly_file = open(assembly_file_name)
command_list, marker_dictionary = pre_process_asm_file(assembly_file)
hack_file = open(hack_file_name, 'w')
for command in command_list:
write_cmd(hack_file, marker_dictionary, command)
# print(marker_dictionary) - useful for troubleshooting and understanding
def assemble_files():
"""
This function works on supplied arguments by the user. The arguments are
either an .asm file or a directory. If given a directory, the function
will operate on each of the .asm files contained within it, if any exist.
The file(s) name(s) will be sent to assemble_file func, which in turn,
and by helper functions, will translate (each) .asm file to a respective
.hack file - a hack computer binary file.
When a folder is supplied, all .hack files will be stored in that folder.
:return: None.
"""
path = os.path.expanduser(sys.argv[1])
if os.path.isdir(path):
file_root = path + "/"
for file in os.listdir(path):
filename = os.path.splitext(file)
if filename[1] == ".asm":
hack_file_name = file_root + filename[0] + ".hack"
assemble_file(file_root + file, hack_file_name)
else:
filename = os.path.splitext(path)
hack_file_name = filename[0] + ".hack"
assemble_file(path, hack_file_name)
assemble_files()
| suffix = "010101" | conditional_block |
Assembler.py | import sys
import os
""" CONSTANTS """
# A constant representing the width of a word in RAM
CMD_LEN = 16
# Predefined symbols. These are the constants that will be used in assembly
SP = 0
LCL = 1
ARG = 2
THIS = 3
THAT = 4
SCREEN = 16384
KBD = 24576
# The last number of RAM to be reserved
RAM_RESERVE_END = 16
# A constant representing the first place in RAM available for variables
VAR_FIRST_MEM = 16
""" Global variables"""
# A global variable representing the number of variables created in the
# supplied assembly code. When translating multiple files, this variable is
# set to 0 at the beginning of each translation process.
numOfVariables = 0
def translate_c_command(command):
command_array = command.split("=")
if len(command_array) == 1: # no destination
command_and_jump_array = command_array[0].split(";")
destination_command = ""
else: # if length = 2
destination_command = command_array[0]
command_and_jump_array = command_array[1].split(";")
if len(command_and_jump_array) == 1: # no jump
jump_command = ""
compute_command = command_and_jump_array[0]
else: # if length = 2
compute_command = command_and_jump_array[0]
jump_command = command_and_jump_array[1]
compute_bin = compute_command_to_bin(compute_command)
destination_bin = destination_command_to_bin(destination_command)
jump_bin = jump_command_to_bin(jump_command)
return compute_bin+destination_bin+jump_bin
def compute_command_to_bin(compute_command):
if "*" in compute_command:
return mul_command_to_bin(compute_command)
elif ">>" in compute_command or "<<" in compute_command:
return shift_command_to_bin(compute_command)
elif "M" in compute_command:
return m_command_to_bin(compute_command)
else:
return a_command_to_bin(compute_command)
def mul_command_to_bin(compute_command):
if compute_command == "D*A":
return "1000000000"
elif compute_command == "D*M":
return "1001000000"
def shift_command_to_bin(compute_command):
if compute_command == "D<<":
return "1010110000"
elif compute_command == "A<<":
return "1010100000"
elif compute_command == "M<<":
return "1011100000"
elif compute_command == "D>>":
return "1010010000"
elif compute_command == "A>>":
return "1010000000"
elif compute_command == "M>>":
return "1011000000"
def m_command_to_bin(compute_command):
prefix = "1111"
# replacing the M in the M command to an A,
# this way we can use the A command func
compute_command = compute_command.replace("M", "A")
return a_command_to_bin(compute_command, prefix)
def a_command_to_bin(compute_command, prefix="1110"):
# shared A and M commands
if compute_command == "A":
suffix = "110000"
elif compute_command == "!A":
suffix = "110001"
elif compute_command == "-A":
suffix = "110011"
elif compute_command == "A+1":
suffix = "110111"
elif compute_command == "A-1":
suffix = "110010"
elif compute_command == "D+A":
suffix = "000010"
elif compute_command == "D-A":
suffix = "010011"
elif compute_command == "A-D":
suffix = "000111"
elif compute_command == "D&A":
suffix = "000000"
elif compute_command == "D|A":
suffix = "010101"
# A only commands
elif compute_command == "0":
suffix = "101010"
elif compute_command == "1":
suffix = "111111"
elif compute_command == "-1":
suffix = "111010"
elif compute_command == "D":
suffix = "001100"
elif compute_command == "!D":
suffix = "001101"
elif compute_command == "-D":
suffix = "001111"
elif compute_command == "D+1":
suffix = "011111"
elif compute_command == "D-1":
suffix = "001110"
return prefix+suffix
def destination_command_to_bin(destination_command):
left_bit, middle_bit, right_bit = "0", "0", "0"
if "M" in destination_command:
right_bit = "1"
if "D" in destination_command:
middle_bit = "1"
if "A" in destination_command:
left_bit = "1"
return left_bit + middle_bit + right_bit
def jump_command_to_bin(jump_command):
left_bit, middle_bit, right_bit = "0", "0", "0"
if jump_command == "JMP":
return "111"
if jump_command == "JNE":
return "101"
if "G" in jump_command:
right_bit = "1"
if "E" in jump_command:
middle_bit = "1"
if "L" in jump_command:
left_bit = "1"
return left_bit + middle_bit + right_bit
def translate_to_binary(command):
"""
A function that translates a number into binary, and formatting it to fit
the machine code language word length (16 bit)
:param command: an integer to transform into hack binary
:return: hack binary code
"""
int_command = int(command)
binary_command = bin(int_command)[2:]
missing_bits = CMD_LEN - len(binary_command)
cmd_prefix = missing_bits * str(0)
binary_command = str(cmd_prefix) + str(binary_command)
return binary_command + "\n"
def translate_a_command(marker_dictionary, cmd):
"""
A function that gets a assembly command and translated it into machine
code, using a supplied marker dictionary.
This function is designed to update the supplied marker dictionary.
:param marker_dictionary: a dictionary of pointers
:param cmd: an assembly command line
:return: the machine code binary respective to the supplied assembly line
"""
if cmd.isdigit():
return translate_to_binary(cmd)
else:
if cmd in marker_dictionary:
return translate_to_binary(marker_dictionary[cmd])
else:
global numOfVariables
marker_dictionary[cmd] = VAR_FIRST_MEM + numOfVariables
numOfVariables += 1
return translate_to_binary(VAR_FIRST_MEM + numOfVariables - 1)
def | (hack_file, marker_dictionary, cmd):
"""
This function writes a translated assembly name as hack machine code into
the supplied .hack file. The function uses helper functions to translate
code according to the type of code.
:param hack_file: a .hack file (destination for hack machine code)
:param marker_dictionary: a dictionary of pointers
:param cmd: a command to translate and write into hack_file
:return: None.
"""
if cmd[0] == '@':
hack_file.write(translate_a_command(marker_dictionary, cmd[1:]))
else:
hack_file.write(translate_c_command(cmd) + "\n")
def load_constants():
"""
A function that creates a dictionary containing all the hack assembly
constants and their respective binary values, including I/O and reserved
RAM locations
:return: the created dictionary
"""
marker_dictionary = dict()
marker_dictionary["SP"] = SP
marker_dictionary["LCL"] = LCL
marker_dictionary["ARG"] = ARG
marker_dictionary["THIS"] = THIS
marker_dictionary["THAT"] = THAT
marker_dictionary["SCREEN"] = SCREEN
marker_dictionary["KBD"] = KBD
for i in range(0, RAM_RESERVE_END):
marker_dictionary["R"+str(i)] = i
return marker_dictionary
def pre_process_asm_file(assembly_file):
"""
This function process an assembly file before it's translation to machine
code. It creates a dictionary, and places into it all markers in the code,
and assigns each one of them it's location in code, allowing to use it as
a reference in future. While doing so, it deletes each marker's
declaration.
The function also clears all whitespaces and comments from the code.
Any line which is not a comment, empty, or a marker declaration is
inserted to a list of ordered commands, later used for creating a hack
machine code binary.
:param assembly_file: an .asm file
:return: the created dictionary and commands list.
"""
line_counter = 0
marker_dictionary = load_constants()
commands_list = list()
for command in assembly_file.readlines():
command = command.split("/")[0] # getting rid of comments
command = "".join(command.split()) # getting rid of whitespaces
if command:
if command.startswith('('):
marker_dictionary[command[1:-1]] = line_counter
continue
commands_list.append(command)
line_counter += 1
return commands_list, marker_dictionary
def assemble_file(assembly_file_name, hack_file_name):
"""
A function that receives names of an .asm file and a .hack file.
The function will create the specified .hack file, and using helper
functions will write to it hack machine code, line by line, respective to
the supplied assembly code.
:param assembly_file_name: a name of an .asm file to translate to machine.
:param hack_file_name: a name of a source file to write machine code to.
:return: None
"""
global numOfVariables
numOfVariables = 0
assembly_file = open(assembly_file_name)
command_list, marker_dictionary = pre_process_asm_file(assembly_file)
hack_file = open(hack_file_name, 'w')
for command in command_list:
write_cmd(hack_file, marker_dictionary, command)
# print(marker_dictionary) - useful for troubleshooting and understanding
def assemble_files():
"""
This function works on supplied arguments by the user. The arguments are
either an .asm file or a directory. If given a directory, the function
will operate on each of the .asm files contained within it, if any exist.
The file(s) name(s) will be sent to assemble_file func, which in turn,
and by helper functions, will translate (each) .asm file to a respective
.hack file - a hack computer binary file.
When a folder is supplied, all .hack files will be stored in that folder.
:return: None.
"""
path = os.path.expanduser(sys.argv[1])
if os.path.isdir(path):
file_root = path + "/"
for file in os.listdir(path):
filename = os.path.splitext(file)
if filename[1] == ".asm":
hack_file_name = file_root + filename[0] + ".hack"
assemble_file(file_root + file, hack_file_name)
else:
filename = os.path.splitext(path)
hack_file_name = filename[0] + ".hack"
assemble_file(path, hack_file_name)
assemble_files()
| write_cmd | identifier_name |
unqfy.js | const picklify = require('picklify') // para cargar/guarfar unqfy
const fs = require('fs') // para cargar/guarfar unqfy
const lyricFinderModule = require('../musicMatch') // contiene la request a MusicMatch
const populatorModule = require('../spotify')// contiene la funcion de request a spotify
const LyricFinder = require('../musicMatch').module.LyricFinder
const artistObserver = require('./ObserverArtist');
const albumObserver = require('./ObserverAlbum');
const trackObserver = require('./ObserverTrack');
const ArtistNotFound = require('./exceptions/ArtistNotFound')
const { Artist, Album, Track, User, Playlist, Listening } = require('./entities/all') // esto hace falta para el framework de persistencia
const {ArtistCreation, TrackCreation, UserCreation} = require('./entities-creation/all') // Method objects
const PlaylistGenerator = require('./PlaylistGenerator.js')
const EntitiesRepository = require('./entities-repositories/EntitiesRepository')
class UNQfy {
constructor(
entitiesRepository = new EntitiesRepository(),
listOfObserbers
)
{
this._entitiesRepository = entitiesRepository
this._nextId = 1
this.lyricsProvider = new LyricFinder();
this.artistObs = new artistObserver();
this.albumObs = new albumObserver();
this.trackObs = new trackObserver();
}
_generateUniqueId() { return this._nextId++ }
get playlists() { return this._entitiesRepository.playlists }
get artists() { return this._entitiesRepository.artists }
get albums() { return this._entitiesRepository.albums }
get tracks() { return this._entitiesRepository.tracks }
get | () { return this._nextId }
/////////////////////
addUser({name, email}) {
const newUser = new UserCreation(this, {name, email}).handle()
this._entitiesRepository.add('user', newUser)
return newUser
}
verifyId(id){
return this._nextId >= id
}
registerListening(userId, trackId) {
const user = this.getUserById(userId)
const track = this.getTrackById(trackId)
const album = this._getAlbumContaining(track)
const artist = this._getAuthorOfAlbum(album)
const newListening = new Listening({listener: user, artist, album, track})
user.addToHistory(newListening)
artist.registerOthersListeningsOfHisArt(newListening)
}
_getAlbumContaining(aTrack) {
return this._entitiesRepository.find('album', album => album.hasTrack(aTrack))
}
createPlaylistFor(userId, playlistName, genresToInclude, maxDuration) {
const newPlaylist = this.createPlaylist(playlistName, genresToInclude, maxDuration)
const user = this.getUserById(userId)
user.registerPlaylist(newPlaylist)
return newPlaylist
}
/* ARTIST */
addArtist({name, country}) {
const newArtist = new ArtistCreation(this, {name, country}).handle()
this._entitiesRepository.add('artist', newArtist);
this.artistObs.update(newArtist.name);
return newArtist
}
removeArtist(artistId) {
const artist = this.getArtistById(artistId)
this._removeFromAllPlaylists(artist.allTracks)
this._entitiesRepository.removeBy('artist' , {prop: 'id', value: artistId})
}
existsArtistWithId(id) {
return this._entitiesRepository.someHas('artist', {prop: 'id', value: id})
}
existsArtistWithName(name){
return this._entitiesRepository.someHas('artist', {prop: 'name', value: name})
}
existSomeoneCalled(aName) {
return this._entitiesRepository.someHas('artist', {prop: 'name', value: aName}) ||
this._entitiesRepository.someHas('user' , {prop: 'name', value: aName})
}
/* ALBUM */
addAlbum(artistId, {name, year}) {
const newAlbum = new Album({ id: this._generateUniqueId(), ...{name, year} })
const artist = this.getArtistById(artistId)
artist.addAlbumByForce(newAlbum);
this.albumObs.update(artist.id, artist.name, newAlbum.name)
return newAlbum
}
verifyAlbum(artistId, name){
return this.getArtistById(artistId);
}
removeAlbum(albumId) {
const album = this.getAlbumById(albumId)
const artist = this._getAuthorOfAlbum(album)
this._removeFromAllPlaylists(album.tracks)
artist.removeAlbum(album)
}
/* TRACK */
addTrack(albumId, {name, duration, genres}) {
const lyricsProvider = this.lyricsProvider;
const newTrack = new TrackCreation(this, {name, duration, genres, lyricsProvider}).handle()
const album = this.getAlbumById(albumId);
const artist = this._getAuthorOfAlbum(album);
artist.addTrackTo(album, newTrack);
this.trackObs.update(album.name, newTrack.name)
return newTrack
}
removeTrack(trackId) {
const track = this.getTrackById(trackId)
const artist = this.getAuthorOfTrack(track)
this._removeFromAllPlaylists([track])
artist.removeTrack(track)
}
/* PLAYLIST */
createPlaylist(name, genresToInclude, maxDuration) {
const newPlaylist = new PlaylistGenerator().generate(this._generateUniqueId(), name, genresToInclude, maxDuration, this.tracks)
this._entitiesRepository.add('playlist', newPlaylist)
return newPlaylist
}
removePlaylist(playlistId) {
this._entitiesRepository.removeBy('playlist', {prop: 'id', value: playlistId})
}
_removeFromAllPlaylists(tracks) {
this._entitiesRepository.forEach('playlist', playlist => playlist.removeAll(tracks))
}
/** BUSQUEDAS **/
searchByName(aName) {return this._entitiesRepository.filterAll(entity => new RegExp(`\\b${aName}\\b`, 'i').test(entity.name))}
searchByNamePartial(aPartialName) { return this._entitiesRepository.filterAll(entity => new RegExp(aPartialName, 'i').test(entity.name)) }
getArtistById(id) { return this._entitiesRepository.findBy('artist' , {prop: 'id', value: id}) }
getAlbumById(id) { return this._entitiesRepository.findBy('album' , {prop: 'id', value: id}) }
getTrackById(id) { return this._entitiesRepository.findBy('track' , {prop: 'id', value: id}) }
getPlaylistById(id) { return this._entitiesRepository.findBy('playlist', {prop: 'id', value: id}) }
getUserById(id) { return this._entitiesRepository.findBy('user' , {prop: 'id', value: id}) }
getArtistByName(aName) { return this._entitiesRepository.findBy('artist', { prop: 'name', value: aName }) }
getUserByName(aName) { return this._entitiesRepository.findBy('user', { prop: 'name', value: aName }) }
getUserByEmail(aEmail) { return this._entitiesRepository.findBy('user', { prop: 'email', value: aEmail }) }
getTracksMatchingGenres(genres) {
return this._entitiesRepository.filter('track', track => track.matchSomeGenreFrom(genres))
}
getTracksMatchingArtist(artist) {
return artist.allTracks
}
getTracksMatchingArtistName(artistName) {
return this.getArtistByName(artistName).allTracks
}
_getAuthorOfAlbum(anAlbum) {
return this._entitiesRepository.find('artist', artist => artist.isTheAuthorOfAlbum(anAlbum))
}
isAuthorOfAlbum(id, name){
const artist = this.getArtistById(id)
const album = this.searchByName(name).albums[0]
return artist.isTheAuthorOfAlbum(album)
}
getAuthorOfTrack(aTrack) {
return this._entitiesRepository.find('artist', artist => artist.isTheAuthorOfTrack(aTrack))
}
findBy(entityName, {prop, value}) {
return this._entitiesRepository.findBy(entityName, {prop, value})
}
filterAllBy({prop, value}) {
return this._entitiesRepository.filterAllBy({prop, value})}
getPlaylistByQuery(query){
const durationLT = query.durationLT === undefined
const durationGT = query.durationGT === undefined
const name = query.name === undefined
var playlist = this.playlists.filter(playlist =>
((durationLT)||(playlist.duration <= query.durationLT))
&&((durationGT)||(playlist.duration >= query.durationGT))
&&(( name )||(new RegExp(query.name, 'i').test(playlist.name))))
return playlist
}
/** PERSISTENCIA **/
save(filename) {
const listenersBkp = this.listeners
this.listeners = []
const serializedData = picklify.picklify(this)
this.listeners = listenersBkp
fs.writeFileSync(filename, JSON.stringify(serializedData, null, 2))
}
load(filename) {
const serializedData = fs.readFileSync(filename, {encoding: 'utf-8'})
//COMPLETAR POR EL ALUMNO: Agregar a la lista todas las clases que necesitan ser instanciadas
const classes = [UNQfy, Artist, Album, Track, Playlist, EntitiesRepository, LyricFinder];
return picklify.unpicklify(JSON.parse(serializedData), classes)
}
/* VISADO 2 */
getAlbumsForArtist(artistName) {
if(this.existsArtistWithName(artistName)){
this.populateAlbumsForArtist(artistName);
//return this.getArtistByName(artistName).albums
}else{
throw new ArtistNotFound(artistName)
}
}
updateArtist(artistId, artistData) {
const artist = this.getArtistById(artistId)
artist.update(artistData)
return artist
}
updateAlbum(albumId, { year }) {
const album = this.getAlbumById(albumId)
album.update({year})
return album
}
async populateAlbumsForArtist(artistName) {
const artist = this.getArtistByName(artistName);
const x = ((new populatorModule.module.Populator()).populateResult(artistName)
.then( response => {return response.items})
.then( items => items.forEach(elem => {
this.addAlbum(artist.id, {name: elem.name, year: elem.release_date})
})).then(res => this.save('./backend.json'))
).catch(error => {console.log(error)})
}
getLyiricsFor(artistname, trackName){
let tracks = this.getTracksMatchingArtistName(artistname).filter(elem => elem.name == trackName);
if(tracks.length != 0){
let firstMatch = tracks[0]
if(firstMatch.lyrics == ''){
this.lyricsProvider.getLyrics(artistname, trackName, firstMatch, this)
return 'sorry, try again later'
}
else{
return firstMatch.lyrics
}
}
}
}
// COMPLETAR POR EL ALUMNO: exportar todas las clases que necesiten ser utilizadas desde un modulo cliente
module.exports = {
UNQfy,
}
| id | identifier_name |
unqfy.js | const picklify = require('picklify') // para cargar/guarfar unqfy
const fs = require('fs') // para cargar/guarfar unqfy
const lyricFinderModule = require('../musicMatch') // contiene la request a MusicMatch
const populatorModule = require('../spotify')// contiene la funcion de request a spotify
const LyricFinder = require('../musicMatch').module.LyricFinder
const artistObserver = require('./ObserverArtist');
const albumObserver = require('./ObserverAlbum');
const trackObserver = require('./ObserverTrack');
const ArtistNotFound = require('./exceptions/ArtistNotFound')
const { Artist, Album, Track, User, Playlist, Listening } = require('./entities/all') // esto hace falta para el framework de persistencia
const {ArtistCreation, TrackCreation, UserCreation} = require('./entities-creation/all') // Method objects
const PlaylistGenerator = require('./PlaylistGenerator.js')
const EntitiesRepository = require('./entities-repositories/EntitiesRepository')
class UNQfy {
constructor(
entitiesRepository = new EntitiesRepository(),
listOfObserbers
)
{
this._entitiesRepository = entitiesRepository
this._nextId = 1
this.lyricsProvider = new LyricFinder();
this.artistObs = new artistObserver();
this.albumObs = new albumObserver();
this.trackObs = new trackObserver();
}
_generateUniqueId() { return this._nextId++ }
get playlists() { return this._entitiesRepository.playlists }
get artists() { return this._entitiesRepository.artists }
get albums() { return this._entitiesRepository.albums }
get tracks() { return this._entitiesRepository.tracks }
get id() { return this._nextId }
/////////////////////
addUser({name, email}) {
const newUser = new UserCreation(this, {name, email}).handle()
this._entitiesRepository.add('user', newUser)
return newUser
}
verifyId(id){
return this._nextId >= id
}
registerListening(userId, trackId) {
const user = this.getUserById(userId)
const track = this.getTrackById(trackId)
const album = this._getAlbumContaining(track)
const artist = this._getAuthorOfAlbum(album)
const newListening = new Listening({listener: user, artist, album, track})
user.addToHistory(newListening)
artist.registerOthersListeningsOfHisArt(newListening)
}
_getAlbumContaining(aTrack) {
return this._entitiesRepository.find('album', album => album.hasTrack(aTrack))
}
createPlaylistFor(userId, playlistName, genresToInclude, maxDuration) {
const newPlaylist = this.createPlaylist(playlistName, genresToInclude, maxDuration)
const user = this.getUserById(userId)
user.registerPlaylist(newPlaylist)
return newPlaylist
}
/* ARTIST */
addArtist({name, country}) {
const newArtist = new ArtistCreation(this, {name, country}).handle()
this._entitiesRepository.add('artist', newArtist);
this.artistObs.update(newArtist.name);
return newArtist
}
removeArtist(artistId) {
const artist = this.getArtistById(artistId)
this._removeFromAllPlaylists(artist.allTracks)
this._entitiesRepository.removeBy('artist' , {prop: 'id', value: artistId})
}
existsArtistWithId(id) {
return this._entitiesRepository.someHas('artist', {prop: 'id', value: id})
}
existsArtistWithName(name){
return this._entitiesRepository.someHas('artist', {prop: 'name', value: name})
}
existSomeoneCalled(aName) {
return this._entitiesRepository.someHas('artist', {prop: 'name', value: aName}) ||
this._entitiesRepository.someHas('user' , {prop: 'name', value: aName})
}
/* ALBUM */
addAlbum(artistId, {name, year}) {
const newAlbum = new Album({ id: this._generateUniqueId(), ...{name, year} })
const artist = this.getArtistById(artistId)
artist.addAlbumByForce(newAlbum);
this.albumObs.update(artist.id, artist.name, newAlbum.name)
return newAlbum
}
verifyAlbum(artistId, name){
return this.getArtistById(artistId);
}
removeAlbum(albumId) {
const album = this.getAlbumById(albumId)
const artist = this._getAuthorOfAlbum(album)
this._removeFromAllPlaylists(album.tracks)
artist.removeAlbum(album)
}
/* TRACK */
addTrack(albumId, {name, duration, genres}) {
const lyricsProvider = this.lyricsProvider;
const newTrack = new TrackCreation(this, {name, duration, genres, lyricsProvider}).handle()
const album = this.getAlbumById(albumId);
const artist = this._getAuthorOfAlbum(album);
artist.addTrackTo(album, newTrack);
this.trackObs.update(album.name, newTrack.name)
return newTrack
}
removeTrack(trackId) {
const track = this.getTrackById(trackId)
const artist = this.getAuthorOfTrack(track)
this._removeFromAllPlaylists([track])
artist.removeTrack(track)
}
/* PLAYLIST */
createPlaylist(name, genresToInclude, maxDuration) {
const newPlaylist = new PlaylistGenerator().generate(this._generateUniqueId(), name, genresToInclude, maxDuration, this.tracks)
this._entitiesRepository.add('playlist', newPlaylist)
return newPlaylist
}
removePlaylist(playlistId) {
this._entitiesRepository.removeBy('playlist', {prop: 'id', value: playlistId})
}
_removeFromAllPlaylists(tracks) {
this._entitiesRepository.forEach('playlist', playlist => playlist.removeAll(tracks))
}
/** BUSQUEDAS **/
searchByName(aName) {return this._entitiesRepository.filterAll(entity => new RegExp(`\\b${aName}\\b`, 'i').test(entity.name))}
searchByNamePartial(aPartialName) { return this._entitiesRepository.filterAll(entity => new RegExp(aPartialName, 'i').test(entity.name)) }
getArtistById(id) { return this._entitiesRepository.findBy('artist' , {prop: 'id', value: id}) }
getAlbumById(id) { return this._entitiesRepository.findBy('album' , {prop: 'id', value: id}) }
getTrackById(id) |
getPlaylistById(id) { return this._entitiesRepository.findBy('playlist', {prop: 'id', value: id}) }
getUserById(id) { return this._entitiesRepository.findBy('user' , {prop: 'id', value: id}) }
getArtistByName(aName) { return this._entitiesRepository.findBy('artist', { prop: 'name', value: aName }) }
getUserByName(aName) { return this._entitiesRepository.findBy('user', { prop: 'name', value: aName }) }
getUserByEmail(aEmail) { return this._entitiesRepository.findBy('user', { prop: 'email', value: aEmail }) }
getTracksMatchingGenres(genres) {
return this._entitiesRepository.filter('track', track => track.matchSomeGenreFrom(genres))
}
getTracksMatchingArtist(artist) {
return artist.allTracks
}
getTracksMatchingArtistName(artistName) {
return this.getArtistByName(artistName).allTracks
}
_getAuthorOfAlbum(anAlbum) {
return this._entitiesRepository.find('artist', artist => artist.isTheAuthorOfAlbum(anAlbum))
}
isAuthorOfAlbum(id, name){
const artist = this.getArtistById(id)
const album = this.searchByName(name).albums[0]
return artist.isTheAuthorOfAlbum(album)
}
getAuthorOfTrack(aTrack) {
return this._entitiesRepository.find('artist', artist => artist.isTheAuthorOfTrack(aTrack))
}
findBy(entityName, {prop, value}) {
return this._entitiesRepository.findBy(entityName, {prop, value})
}
filterAllBy({prop, value}) {
return this._entitiesRepository.filterAllBy({prop, value})}
getPlaylistByQuery(query){
const durationLT = query.durationLT === undefined
const durationGT = query.durationGT === undefined
const name = query.name === undefined
var playlist = this.playlists.filter(playlist =>
((durationLT)||(playlist.duration <= query.durationLT))
&&((durationGT)||(playlist.duration >= query.durationGT))
&&(( name )||(new RegExp(query.name, 'i').test(playlist.name))))
return playlist
}
/** PERSISTENCIA **/
save(filename) {
const listenersBkp = this.listeners
this.listeners = []
const serializedData = picklify.picklify(this)
this.listeners = listenersBkp
fs.writeFileSync(filename, JSON.stringify(serializedData, null, 2))
}
load(filename) {
const serializedData = fs.readFileSync(filename, {encoding: 'utf-8'})
//COMPLETAR POR EL ALUMNO: Agregar a la lista todas las clases que necesitan ser instanciadas
const classes = [UNQfy, Artist, Album, Track, Playlist, EntitiesRepository, LyricFinder];
return picklify.unpicklify(JSON.parse(serializedData), classes)
}
/* VISADO 2 */
getAlbumsForArtist(artistName) {
if(this.existsArtistWithName(artistName)){
this.populateAlbumsForArtist(artistName);
//return this.getArtistByName(artistName).albums
}else{
throw new ArtistNotFound(artistName)
}
}
updateArtist(artistId, artistData) {
const artist = this.getArtistById(artistId)
artist.update(artistData)
return artist
}
updateAlbum(albumId, { year }) {
const album = this.getAlbumById(albumId)
album.update({year})
return album
}
async populateAlbumsForArtist(artistName) {
const artist = this.getArtistByName(artistName);
const x = ((new populatorModule.module.Populator()).populateResult(artistName)
.then( response => {return response.items})
.then( items => items.forEach(elem => {
this.addAlbum(artist.id, {name: elem.name, year: elem.release_date})
})).then(res => this.save('./backend.json'))
).catch(error => {console.log(error)})
}
getLyiricsFor(artistname, trackName){
let tracks = this.getTracksMatchingArtistName(artistname).filter(elem => elem.name == trackName);
if(tracks.length != 0){
let firstMatch = tracks[0]
if(firstMatch.lyrics == ''){
this.lyricsProvider.getLyrics(artistname, trackName, firstMatch, this)
return 'sorry, try again later'
}
else{
return firstMatch.lyrics
}
}
}
}
// COMPLETAR POR EL ALUMNO: exportar todas las clases que necesiten ser utilizadas desde un modulo cliente
module.exports = {
UNQfy,
}
| { return this._entitiesRepository.findBy('track' , {prop: 'id', value: id}) } | identifier_body |
unqfy.js | const picklify = require('picklify') // para cargar/guarfar unqfy
const fs = require('fs') // para cargar/guarfar unqfy
const lyricFinderModule = require('../musicMatch') // contiene la request a MusicMatch
const populatorModule = require('../spotify')// contiene la funcion de request a spotify
const LyricFinder = require('../musicMatch').module.LyricFinder
const artistObserver = require('./ObserverArtist');
const albumObserver = require('./ObserverAlbum');
const trackObserver = require('./ObserverTrack');
const ArtistNotFound = require('./exceptions/ArtistNotFound')
const { Artist, Album, Track, User, Playlist, Listening } = require('./entities/all') // esto hace falta para el framework de persistencia
const {ArtistCreation, TrackCreation, UserCreation} = require('./entities-creation/all') // Method objects
const PlaylistGenerator = require('./PlaylistGenerator.js')
const EntitiesRepository = require('./entities-repositories/EntitiesRepository')
class UNQfy {
constructor(
entitiesRepository = new EntitiesRepository(),
listOfObserbers
)
{
this._entitiesRepository = entitiesRepository
this._nextId = 1
this.lyricsProvider = new LyricFinder();
this.artistObs = new artistObserver();
this.albumObs = new albumObserver();
this.trackObs = new trackObserver();
}
_generateUniqueId() { return this._nextId++ }
get playlists() { return this._entitiesRepository.playlists }
get artists() { return this._entitiesRepository.artists }
get albums() { return this._entitiesRepository.albums }
get tracks() { return this._entitiesRepository.tracks }
get id() { return this._nextId }
/////////////////////
addUser({name, email}) {
const newUser = new UserCreation(this, {name, email}).handle()
this._entitiesRepository.add('user', newUser)
return newUser
}
verifyId(id){
return this._nextId >= id
}
registerListening(userId, trackId) {
const user = this.getUserById(userId)
const track = this.getTrackById(trackId)
const album = this._getAlbumContaining(track)
const artist = this._getAuthorOfAlbum(album)
const newListening = new Listening({listener: user, artist, album, track})
user.addToHistory(newListening)
artist.registerOthersListeningsOfHisArt(newListening)
}
_getAlbumContaining(aTrack) {
return this._entitiesRepository.find('album', album => album.hasTrack(aTrack))
}
createPlaylistFor(userId, playlistName, genresToInclude, maxDuration) {
const newPlaylist = this.createPlaylist(playlistName, genresToInclude, maxDuration)
const user = this.getUserById(userId)
user.registerPlaylist(newPlaylist)
return newPlaylist
}
/* ARTIST */
addArtist({name, country}) {
const newArtist = new ArtistCreation(this, {name, country}).handle()
this._entitiesRepository.add('artist', newArtist);
this.artistObs.update(newArtist.name);
return newArtist
}
removeArtist(artistId) {
const artist = this.getArtistById(artistId)
this._removeFromAllPlaylists(artist.allTracks)
this._entitiesRepository.removeBy('artist' , {prop: 'id', value: artistId})
}
existsArtistWithId(id) {
return this._entitiesRepository.someHas('artist', {prop: 'id', value: id})
}
existsArtistWithName(name){
return this._entitiesRepository.someHas('artist', {prop: 'name', value: name})
}
existSomeoneCalled(aName) {
return this._entitiesRepository.someHas('artist', {prop: 'name', value: aName}) ||
this._entitiesRepository.someHas('user' , {prop: 'name', value: aName})
}
/* ALBUM */
addAlbum(artistId, {name, year}) {
const newAlbum = new Album({ id: this._generateUniqueId(), ...{name, year} })
const artist = this.getArtistById(artistId)
artist.addAlbumByForce(newAlbum);
this.albumObs.update(artist.id, artist.name, newAlbum.name)
return newAlbum
}
verifyAlbum(artistId, name){
return this.getArtistById(artistId);
}
| const artist = this._getAuthorOfAlbum(album)
this._removeFromAllPlaylists(album.tracks)
artist.removeAlbum(album)
}
/* TRACK */
addTrack(albumId, {name, duration, genres}) {
const lyricsProvider = this.lyricsProvider;
const newTrack = new TrackCreation(this, {name, duration, genres, lyricsProvider}).handle()
const album = this.getAlbumById(albumId);
const artist = this._getAuthorOfAlbum(album);
artist.addTrackTo(album, newTrack);
this.trackObs.update(album.name, newTrack.name)
return newTrack
}
removeTrack(trackId) {
const track = this.getTrackById(trackId)
const artist = this.getAuthorOfTrack(track)
this._removeFromAllPlaylists([track])
artist.removeTrack(track)
}
/* PLAYLIST */
createPlaylist(name, genresToInclude, maxDuration) {
const newPlaylist = new PlaylistGenerator().generate(this._generateUniqueId(), name, genresToInclude, maxDuration, this.tracks)
this._entitiesRepository.add('playlist', newPlaylist)
return newPlaylist
}
removePlaylist(playlistId) {
this._entitiesRepository.removeBy('playlist', {prop: 'id', value: playlistId})
}
_removeFromAllPlaylists(tracks) {
this._entitiesRepository.forEach('playlist', playlist => playlist.removeAll(tracks))
}
/** BUSQUEDAS **/
searchByName(aName) {return this._entitiesRepository.filterAll(entity => new RegExp(`\\b${aName}\\b`, 'i').test(entity.name))}
searchByNamePartial(aPartialName) { return this._entitiesRepository.filterAll(entity => new RegExp(aPartialName, 'i').test(entity.name)) }
getArtistById(id) { return this._entitiesRepository.findBy('artist' , {prop: 'id', value: id}) }
getAlbumById(id) { return this._entitiesRepository.findBy('album' , {prop: 'id', value: id}) }
getTrackById(id) { return this._entitiesRepository.findBy('track' , {prop: 'id', value: id}) }
getPlaylistById(id) { return this._entitiesRepository.findBy('playlist', {prop: 'id', value: id}) }
getUserById(id) { return this._entitiesRepository.findBy('user' , {prop: 'id', value: id}) }
getArtistByName(aName) { return this._entitiesRepository.findBy('artist', { prop: 'name', value: aName }) }
getUserByName(aName) { return this._entitiesRepository.findBy('user', { prop: 'name', value: aName }) }
getUserByEmail(aEmail) { return this._entitiesRepository.findBy('user', { prop: 'email', value: aEmail }) }
getTracksMatchingGenres(genres) {
return this._entitiesRepository.filter('track', track => track.matchSomeGenreFrom(genres))
}
getTracksMatchingArtist(artist) {
return artist.allTracks
}
getTracksMatchingArtistName(artistName) {
return this.getArtistByName(artistName).allTracks
}
_getAuthorOfAlbum(anAlbum) {
return this._entitiesRepository.find('artist', artist => artist.isTheAuthorOfAlbum(anAlbum))
}
isAuthorOfAlbum(id, name){
const artist = this.getArtistById(id)
const album = this.searchByName(name).albums[0]
return artist.isTheAuthorOfAlbum(album)
}
getAuthorOfTrack(aTrack) {
return this._entitiesRepository.find('artist', artist => artist.isTheAuthorOfTrack(aTrack))
}
findBy(entityName, {prop, value}) {
return this._entitiesRepository.findBy(entityName, {prop, value})
}
filterAllBy({prop, value}) {
return this._entitiesRepository.filterAllBy({prop, value})}
getPlaylistByQuery(query){
const durationLT = query.durationLT === undefined
const durationGT = query.durationGT === undefined
const name = query.name === undefined
var playlist = this.playlists.filter(playlist =>
((durationLT)||(playlist.duration <= query.durationLT))
&&((durationGT)||(playlist.duration >= query.durationGT))
&&(( name )||(new RegExp(query.name, 'i').test(playlist.name))))
return playlist
}
/** PERSISTENCIA **/
save(filename) {
const listenersBkp = this.listeners
this.listeners = []
const serializedData = picklify.picklify(this)
this.listeners = listenersBkp
fs.writeFileSync(filename, JSON.stringify(serializedData, null, 2))
}
load(filename) {
const serializedData = fs.readFileSync(filename, {encoding: 'utf-8'})
//COMPLETAR POR EL ALUMNO: Agregar a la lista todas las clases que necesitan ser instanciadas
const classes = [UNQfy, Artist, Album, Track, Playlist, EntitiesRepository, LyricFinder];
return picklify.unpicklify(JSON.parse(serializedData), classes)
}
/* VISADO 2 */
getAlbumsForArtist(artistName) {
if(this.existsArtistWithName(artistName)){
this.populateAlbumsForArtist(artistName);
//return this.getArtistByName(artistName).albums
}else{
throw new ArtistNotFound(artistName)
}
}
updateArtist(artistId, artistData) {
const artist = this.getArtistById(artistId)
artist.update(artistData)
return artist
}
updateAlbum(albumId, { year }) {
const album = this.getAlbumById(albumId)
album.update({year})
return album
}
async populateAlbumsForArtist(artistName) {
const artist = this.getArtistByName(artistName);
const x = ((new populatorModule.module.Populator()).populateResult(artistName)
.then( response => {return response.items})
.then( items => items.forEach(elem => {
this.addAlbum(artist.id, {name: elem.name, year: elem.release_date})
})).then(res => this.save('./backend.json'))
).catch(error => {console.log(error)})
}
getLyiricsFor(artistname, trackName){
let tracks = this.getTracksMatchingArtistName(artistname).filter(elem => elem.name == trackName);
if(tracks.length != 0){
let firstMatch = tracks[0]
if(firstMatch.lyrics == ''){
this.lyricsProvider.getLyrics(artistname, trackName, firstMatch, this)
return 'sorry, try again later'
}
else{
return firstMatch.lyrics
}
}
}
}
// COMPLETAR POR EL ALUMNO: exportar todas las clases que necesiten ser utilizadas desde un modulo cliente
module.exports = {
UNQfy,
} | removeAlbum(albumId) {
const album = this.getAlbumById(albumId) | random_line_split |
unqfy.js | const picklify = require('picklify') // para cargar/guarfar unqfy
const fs = require('fs') // para cargar/guarfar unqfy
const lyricFinderModule = require('../musicMatch') // contiene la request a MusicMatch
const populatorModule = require('../spotify')// contiene la funcion de request a spotify
const LyricFinder = require('../musicMatch').module.LyricFinder
const artistObserver = require('./ObserverArtist');
const albumObserver = require('./ObserverAlbum');
const trackObserver = require('./ObserverTrack');
const ArtistNotFound = require('./exceptions/ArtistNotFound')
const { Artist, Album, Track, User, Playlist, Listening } = require('./entities/all') // esto hace falta para el framework de persistencia
const {ArtistCreation, TrackCreation, UserCreation} = require('./entities-creation/all') // Method objects
const PlaylistGenerator = require('./PlaylistGenerator.js')
const EntitiesRepository = require('./entities-repositories/EntitiesRepository')
class UNQfy {
constructor(
entitiesRepository = new EntitiesRepository(),
listOfObserbers
)
{
this._entitiesRepository = entitiesRepository
this._nextId = 1
this.lyricsProvider = new LyricFinder();
this.artistObs = new artistObserver();
this.albumObs = new albumObserver();
this.trackObs = new trackObserver();
}
_generateUniqueId() { return this._nextId++ }
get playlists() { return this._entitiesRepository.playlists }
get artists() { return this._entitiesRepository.artists }
get albums() { return this._entitiesRepository.albums }
get tracks() { return this._entitiesRepository.tracks }
get id() { return this._nextId }
/////////////////////
addUser({name, email}) {
const newUser = new UserCreation(this, {name, email}).handle()
this._entitiesRepository.add('user', newUser)
return newUser
}
verifyId(id){
return this._nextId >= id
}
registerListening(userId, trackId) {
const user = this.getUserById(userId)
const track = this.getTrackById(trackId)
const album = this._getAlbumContaining(track)
const artist = this._getAuthorOfAlbum(album)
const newListening = new Listening({listener: user, artist, album, track})
user.addToHistory(newListening)
artist.registerOthersListeningsOfHisArt(newListening)
}
_getAlbumContaining(aTrack) {
return this._entitiesRepository.find('album', album => album.hasTrack(aTrack))
}
createPlaylistFor(userId, playlistName, genresToInclude, maxDuration) {
const newPlaylist = this.createPlaylist(playlistName, genresToInclude, maxDuration)
const user = this.getUserById(userId)
user.registerPlaylist(newPlaylist)
return newPlaylist
}
/* ARTIST */
addArtist({name, country}) {
const newArtist = new ArtistCreation(this, {name, country}).handle()
this._entitiesRepository.add('artist', newArtist);
this.artistObs.update(newArtist.name);
return newArtist
}
removeArtist(artistId) {
const artist = this.getArtistById(artistId)
this._removeFromAllPlaylists(artist.allTracks)
this._entitiesRepository.removeBy('artist' , {prop: 'id', value: artistId})
}
existsArtistWithId(id) {
return this._entitiesRepository.someHas('artist', {prop: 'id', value: id})
}
existsArtistWithName(name){
return this._entitiesRepository.someHas('artist', {prop: 'name', value: name})
}
existSomeoneCalled(aName) {
return this._entitiesRepository.someHas('artist', {prop: 'name', value: aName}) ||
this._entitiesRepository.someHas('user' , {prop: 'name', value: aName})
}
/* ALBUM */
addAlbum(artistId, {name, year}) {
const newAlbum = new Album({ id: this._generateUniqueId(), ...{name, year} })
const artist = this.getArtistById(artistId)
artist.addAlbumByForce(newAlbum);
this.albumObs.update(artist.id, artist.name, newAlbum.name)
return newAlbum
}
verifyAlbum(artistId, name){
return this.getArtistById(artistId);
}
removeAlbum(albumId) {
const album = this.getAlbumById(albumId)
const artist = this._getAuthorOfAlbum(album)
this._removeFromAllPlaylists(album.tracks)
artist.removeAlbum(album)
}
/* TRACK */
addTrack(albumId, {name, duration, genres}) {
const lyricsProvider = this.lyricsProvider;
const newTrack = new TrackCreation(this, {name, duration, genres, lyricsProvider}).handle()
const album = this.getAlbumById(albumId);
const artist = this._getAuthorOfAlbum(album);
artist.addTrackTo(album, newTrack);
this.trackObs.update(album.name, newTrack.name)
return newTrack
}
removeTrack(trackId) {
const track = this.getTrackById(trackId)
const artist = this.getAuthorOfTrack(track)
this._removeFromAllPlaylists([track])
artist.removeTrack(track)
}
/* PLAYLIST */
createPlaylist(name, genresToInclude, maxDuration) {
const newPlaylist = new PlaylistGenerator().generate(this._generateUniqueId(), name, genresToInclude, maxDuration, this.tracks)
this._entitiesRepository.add('playlist', newPlaylist)
return newPlaylist
}
removePlaylist(playlistId) {
this._entitiesRepository.removeBy('playlist', {prop: 'id', value: playlistId})
}
_removeFromAllPlaylists(tracks) {
this._entitiesRepository.forEach('playlist', playlist => playlist.removeAll(tracks))
}
/** BUSQUEDAS **/
searchByName(aName) {return this._entitiesRepository.filterAll(entity => new RegExp(`\\b${aName}\\b`, 'i').test(entity.name))}
searchByNamePartial(aPartialName) { return this._entitiesRepository.filterAll(entity => new RegExp(aPartialName, 'i').test(entity.name)) }
getArtistById(id) { return this._entitiesRepository.findBy('artist' , {prop: 'id', value: id}) }
getAlbumById(id) { return this._entitiesRepository.findBy('album' , {prop: 'id', value: id}) }
getTrackById(id) { return this._entitiesRepository.findBy('track' , {prop: 'id', value: id}) }
getPlaylistById(id) { return this._entitiesRepository.findBy('playlist', {prop: 'id', value: id}) }
getUserById(id) { return this._entitiesRepository.findBy('user' , {prop: 'id', value: id}) }
getArtistByName(aName) { return this._entitiesRepository.findBy('artist', { prop: 'name', value: aName }) }
getUserByName(aName) { return this._entitiesRepository.findBy('user', { prop: 'name', value: aName }) }
getUserByEmail(aEmail) { return this._entitiesRepository.findBy('user', { prop: 'email', value: aEmail }) }
getTracksMatchingGenres(genres) {
return this._entitiesRepository.filter('track', track => track.matchSomeGenreFrom(genres))
}
getTracksMatchingArtist(artist) {
return artist.allTracks
}
getTracksMatchingArtistName(artistName) {
return this.getArtistByName(artistName).allTracks
}
_getAuthorOfAlbum(anAlbum) {
return this._entitiesRepository.find('artist', artist => artist.isTheAuthorOfAlbum(anAlbum))
}
isAuthorOfAlbum(id, name){
const artist = this.getArtistById(id)
const album = this.searchByName(name).albums[0]
return artist.isTheAuthorOfAlbum(album)
}
getAuthorOfTrack(aTrack) {
return this._entitiesRepository.find('artist', artist => artist.isTheAuthorOfTrack(aTrack))
}
findBy(entityName, {prop, value}) {
return this._entitiesRepository.findBy(entityName, {prop, value})
}
filterAllBy({prop, value}) {
return this._entitiesRepository.filterAllBy({prop, value})}
getPlaylistByQuery(query){
const durationLT = query.durationLT === undefined
const durationGT = query.durationGT === undefined
const name = query.name === undefined
var playlist = this.playlists.filter(playlist =>
((durationLT)||(playlist.duration <= query.durationLT))
&&((durationGT)||(playlist.duration >= query.durationGT))
&&(( name )||(new RegExp(query.name, 'i').test(playlist.name))))
return playlist
}
/** PERSISTENCIA **/
save(filename) {
const listenersBkp = this.listeners
this.listeners = []
const serializedData = picklify.picklify(this)
this.listeners = listenersBkp
fs.writeFileSync(filename, JSON.stringify(serializedData, null, 2))
}
load(filename) {
const serializedData = fs.readFileSync(filename, {encoding: 'utf-8'})
//COMPLETAR POR EL ALUMNO: Agregar a la lista todas las clases que necesitan ser instanciadas
const classes = [UNQfy, Artist, Album, Track, Playlist, EntitiesRepository, LyricFinder];
return picklify.unpicklify(JSON.parse(serializedData), classes)
}
/* VISADO 2 */
getAlbumsForArtist(artistName) {
if(this.existsArtistWithName(artistName)){
this.populateAlbumsForArtist(artistName);
//return this.getArtistByName(artistName).albums
}else |
}
updateArtist(artistId, artistData) {
const artist = this.getArtistById(artistId)
artist.update(artistData)
return artist
}
updateAlbum(albumId, { year }) {
const album = this.getAlbumById(albumId)
album.update({year})
return album
}
async populateAlbumsForArtist(artistName) {
const artist = this.getArtistByName(artistName);
const x = ((new populatorModule.module.Populator()).populateResult(artistName)
.then( response => {return response.items})
.then( items => items.forEach(elem => {
this.addAlbum(artist.id, {name: elem.name, year: elem.release_date})
})).then(res => this.save('./backend.json'))
).catch(error => {console.log(error)})
}
getLyiricsFor(artistname, trackName){
let tracks = this.getTracksMatchingArtistName(artistname).filter(elem => elem.name == trackName);
if(tracks.length != 0){
let firstMatch = tracks[0]
if(firstMatch.lyrics == ''){
this.lyricsProvider.getLyrics(artistname, trackName, firstMatch, this)
return 'sorry, try again later'
}
else{
return firstMatch.lyrics
}
}
}
}
// COMPLETAR POR EL ALUMNO: exportar todas las clases que necesiten ser utilizadas desde un modulo cliente
module.exports = {
UNQfy,
}
| {
throw new ArtistNotFound(artistName)
} | conditional_block |
tag.rs | //! A dictionary-based tagger. The raw format is tuples of the form `(word, lemma, part-of-speech)`
//! where each word typically has multiple entries with different part-of-speech tags.
use crate::types::*;
use bimap::BiMap;
use fs_err::File;
use fst::{IntoStreamer, Map, Streamer};
use indexmap::IndexMap;
use log::error;
use serde::{Deserialize, Serialize};
use std::io::BufRead;
use std::{borrow::Cow, iter::once};
use std::{collections::HashSet, path::Path};
#[derive(Serialize, Deserialize)]
struct TaggerFields {
tag_fst: Vec<u8>,
word_store_fst: Vec<u8>,
tag_store: BiMap<String, PosIdInt>,
}
impl From<Tagger> for TaggerFields {
fn from(tagger: Tagger) -> Self {
let mut tag_fst_items = Vec::new();
for (word_id, map) in tagger.tags.iter() {
let mut i = 0u8;
let word = tagger.str_for_word_id(word_id);
for (inflect_id, pos_ids) in map.iter() {
for pos_id in pos_ids {
assert!(i < 255);
i += 1;
let key: Vec<u8> = word.as_bytes().iter().chain(once(&i)).copied().collect();
let pos_bytes = pos_id.0.to_be_bytes();
let inflect_bytes = inflect_id.0.to_be_bytes();
let value = u64::from_be_bytes([
inflect_bytes[0],
inflect_bytes[1],
inflect_bytes[2],
inflect_bytes[3],
0,
0,
pos_bytes[0],
pos_bytes[1],
]);
tag_fst_items.push((key, value));
}
}
}
tag_fst_items.sort_by(|(a, _), (b, _)| a.cmp(b));
let mut word_store_items: Vec<_> = tagger
.word_store
.iter()
.map(|(key, value)| (key.clone(), value.0 as u64))
.collect();
word_store_items.sort_by(|(a, _), (b, _)| a.cmp(b));
let tag_fst = Map::from_iter(tag_fst_items)
.unwrap()
.into_fst()
.as_bytes()
.to_vec();
let word_store_fst = Map::from_iter(word_store_items)
.unwrap()
.into_fst()
.as_bytes()
.to_vec();
TaggerFields {
tag_fst,
word_store_fst,
tag_store: tagger.tag_store,
}
}
}
impl From<TaggerFields> for Tagger {
fn from(data: TaggerFields) -> Self {
let word_store_fst = Map::new(data.word_store_fst).unwrap();
let word_store: BiMap<String, WordIdInt> = word_store_fst
.into_stream()
.into_str_vec()
.unwrap()
.into_iter()
.map(|(key, value)| (key, WordIdInt(value as u32)))
.collect();
let mut tags = DefaultHashMap::new();
let mut groups = DefaultHashMap::new();
let tag_fst = Map::new(data.tag_fst).unwrap();
let mut stream = tag_fst.into_stream();
while let Some((key, value)) = stream.next() {
let word = std::str::from_utf8(&key[..key.len() - 1]).unwrap();
let word_id = *word_store.get_by_left(word).unwrap();
let value_bytes = value.to_be_bytes();
let inflection_id = WordIdInt(u32::from_be_bytes([
value_bytes[0],
value_bytes[1],
value_bytes[2],
value_bytes[3],
]));
let pos_id = PosIdInt(u16::from_be_bytes([value_bytes[6], value_bytes[7]]));
let group = groups.entry(inflection_id).or_insert_with(Vec::new);
if !group.contains(&word_id) {
group.push(word_id);
}
tags.entry(word_id)
.or_insert_with(IndexMap::new)
.entry(inflection_id)
.or_insert_with(Vec::new)
.push(pos_id);
}
Tagger {
tags,
tag_store: data.tag_store,
word_store,
groups,
}
}
}
/// The lexical tagger.
#[derive(Default, Serialize, Deserialize, Clone)]
#[serde(from = "TaggerFields", into = "TaggerFields")]
pub struct Tagger {
tags: DefaultHashMap<WordIdInt, IndexMap<WordIdInt, Vec<PosIdInt>>>,
tag_store: BiMap<String, PosIdInt>,
word_store: BiMap<String, WordIdInt>,
groups: DefaultHashMap<WordIdInt, Vec<WordIdInt>>,
}
impl Tagger {
fn get_lines<S1: AsRef<Path>, S2: AsRef<Path>>(
paths: &[S1],
remove_paths: &[S2],
) -> std::io::Result<Vec<(String, String, String)>> {
let mut output = Vec::new();
let mut disallowed: Vec<String> = Vec::new();
for path in remove_paths {
let file = File::open(path.as_ref())?;
let reader = std::io::BufReader::new(file);
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
disallowed.push(line.to_string());
}
}
for path in paths {
let file = File::open(path.as_ref())?;
let reader = std::io::BufReader::new(file);
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
if disallowed.contains(&line) {
continue;
}
let parts: Vec<_> = line.split('\t').collect();
let word = parts[0].to_string();
let inflection = parts[1].to_string();
let tag = parts[2].to_string();
output.push((word, inflection, tag))
}
}
Ok(output)
}
/// Creates a tagger from raw files.
///
/// # Arguments
/// * `paths`: Paths to files where each line contains the word, lemma and tag, respectively,
/// separated by tabs, to be added to the tagger.
/// * `remove_paths`: Paths to files where each line contains the word, lemma and tag, respectively,
/// separated by tabs, to be removed from the tagger if present in the files from `paths`.
pub fn from_dumps<S1: AsRef<Path>, S2: AsRef<Path>, S3: AsRef<str>>(
paths: &[S1],
remove_paths: &[S2],
extra_tags: &[S3],
common_words: &HashSet<String>,
) -> std::io::Result<Self> {
let mut tags = DefaultHashMap::default();
let mut groups = DefaultHashMap::default();
let mut tag_store = HashSet::new();
let mut word_store = HashSet::new();
// hardcoded special tags
tag_store.insert("");
tag_store.insert("SENT_START");
tag_store.insert("SENT_END");
tag_store.insert("UNKNOWN");
// add language specific special tags
tag_store.extend(extra_tags.iter().map(|x| x.as_ref()));
let lines = Tagger::get_lines(paths, remove_paths)?;
let punct = "!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~";
for i in 0..punct.len() {
word_store.insert(&punct[i..(i + 1)]);
}
word_store.extend(common_words.iter().map(|x| x.as_str()));
for (word, inflection, tag) in lines.iter() {
word_store.insert(word);
word_store.insert(inflection);
tag_store.insert(tag);
}
// word store ids should be consistent across runs
let mut word_store: Vec<_> = word_store.iter().collect();
word_store.sort();
// tag store ids should be consistent across runs
let mut tag_store: Vec<_> = tag_store.iter().collect();
tag_store.sort();
let word_store: BiMap<_, _> = word_store
.iter()
.enumerate()
.map(|(i, x)| (x.to_string(), WordIdInt(i as u32)))
.collect();
let tag_store: BiMap<_, _> = tag_store
.iter()
.enumerate()
.map(|(i, x)| (x.to_string(), PosIdInt(i as u16)))
.collect();
for (word, inflection, tag) in lines.iter() {
let word_id = word_store.get_by_left(word).unwrap();
let inflection_id = word_store.get_by_left(inflection).unwrap();
let pos_id = tag_store.get_by_left(tag).unwrap();
let group = groups.entry(*inflection_id).or_insert_with(Vec::new);
if !group.contains(word_id) {
group.push(*word_id);
}
tags.entry(*word_id)
.or_insert_with(IndexMap::new)
.entry(*inflection_id)
.or_insert_with(Vec::new)
.push(*pos_id);
}
Ok(Tagger {
tags,
groups,
word_store,
tag_store,
})
}
fn get_raw(&self, word: &str) -> Vec<WordData> {
if let Some(map) = self
.word_store
.get_by_left(word)
.and_then(|x| self.tags.get(x))
{
let mut output = Vec::new();
for (key, value) in map.iter() {
for pos_id in value {
output.push(WordData::new(
self.id_word(self.str_for_word_id(key).into()),
self.id_tag(self.str_for_pos_id(pos_id)),
))
}
}
output
} else {
Vec::new()
}
}
fn | (
&self,
word: &str,
add_lower: bool,
add_lower_if_empty: bool,
) -> Vec<WordData> {
let mut tags = self.get_raw(&word);
let lower = word.to_lowercase();
if (add_lower || (add_lower_if_empty && tags.is_empty()))
&& (word != lower
&& (crate::utils::is_title_case(word) || crate::utils::is_uppercase(word)))
{
tags.extend(self.get_raw(&lower));
}
tags
}
#[allow(dead_code)] // used by compile module
pub(crate) fn tag_store(&self) -> &BiMap<String, PosIdInt> {
&self.tag_store
}
#[allow(dead_code)] // used by compile module
pub(crate) fn word_store(&self) -> &BiMap<String, WordIdInt> {
&self.word_store
}
fn str_for_word_id(&self, id: &WordIdInt) -> &str {
self.word_store
.get_by_right(id)
.expect("only valid word ids are created")
}
fn str_for_pos_id(&self, id: &PosIdInt) -> &str {
self.tag_store
.get_by_right(id)
.expect("only valid pos ids are created")
}
pub fn id_tag<'a>(&self, tag: &'a str) -> PosId<'a> {
PosId(
tag,
*self.tag_store.get_by_left(tag).unwrap_or_else(|| {
error!(
"'{}' not found in tag store, please add it to the `extra_tags`. Using UNKNOWN instead.",
tag
);
self.tag_store.get_by_left("UNKNOWN").expect("UNKNOWN tag must exist in tag store")
}),
)
}
pub fn id_word<'t>(&'t self, text: Cow<'t, str>) -> WordId<'t> {
let id = self.word_store.get_by_left(text.as_ref()).copied();
WordId(text, id)
}
/// Get the tags and lemmas (as [WordData][crate::types::WordData]) for the given word.
///
/// # Arguments
/// * `word`: The word to lookup data for.
/// * `add_lower`: Whether to add data for the lowercase variant of the word.
/// * `use_compound_split_heuristic`: Whether to use a heuristic to split compound words.
/// If true, will attempt to find tags for words which are longer than some cutoff and unknown by looking up tags
/// for substrings from left to right until tags are found or a minimum length reached.
pub fn get_tags(
&self,
word: &str,
add_lower: bool,
use_compound_split_heuristic: bool,
) -> Vec<WordData> {
let mut tags = self.get_strict_tags(word, add_lower, true);
// compound splitting heuristic, seems to work reasonably well
if use_compound_split_heuristic && tags.is_empty() {
let n_chars = word.chars().count() as isize;
if n_chars >= 7 {
let indices = word
.char_indices()
.take(std::cmp::max(n_chars - 4, 0) as usize)
.skip(1)
.map(|x| x.0);
// the word always has at least one char if the above condition is satisfied
// but semantically this is false if no char exists
let starts_with_uppercase = word.chars().next().map_or(false, |x| x.is_uppercase());
for i in indices {
let next = if starts_with_uppercase {
crate::utils::apply_to_first(&word[i..], |c| c.to_uppercase().collect())
} else {
word[i..].to_string()
};
let next_tags = self.get_strict_tags(&next, add_lower, false);
if !next_tags.is_empty() {
tags = next_tags
.into_iter()
.map(|mut x| {
x.lemma = self.id_word(
format!("{}{}", &word[..i], x.lemma.as_ref().to_lowercase())
.into(),
);
x
})
.collect();
break;
}
}
}
}
tags
}
/// Get the words with the same lemma as the given lemma.
pub fn get_group_members(&self, lemma: &str) -> Vec<&str> {
self.word_store
.get_by_left(lemma)
.and_then(|x| self.groups.get(x))
.map(|vec| vec.iter().map(|x| self.str_for_word_id(x)).collect())
.unwrap_or_else(Vec::new)
}
}
| get_strict_tags | identifier_name |
tag.rs | //! A dictionary-based tagger. The raw format is tuples of the form `(word, lemma, part-of-speech)`
//! where each word typically has multiple entries with different part-of-speech tags.
use crate::types::*;
use bimap::BiMap;
use fs_err::File;
use fst::{IntoStreamer, Map, Streamer};
use indexmap::IndexMap;
use log::error;
use serde::{Deserialize, Serialize};
use std::io::BufRead;
use std::{borrow::Cow, iter::once};
use std::{collections::HashSet, path::Path};
#[derive(Serialize, Deserialize)]
struct TaggerFields {
tag_fst: Vec<u8>,
word_store_fst: Vec<u8>,
tag_store: BiMap<String, PosIdInt>,
}
impl From<Tagger> for TaggerFields {
fn from(tagger: Tagger) -> Self {
let mut tag_fst_items = Vec::new();
for (word_id, map) in tagger.tags.iter() {
let mut i = 0u8;
let word = tagger.str_for_word_id(word_id);
for (inflect_id, pos_ids) in map.iter() {
for pos_id in pos_ids {
assert!(i < 255);
i += 1;
let key: Vec<u8> = word.as_bytes().iter().chain(once(&i)).copied().collect();
let pos_bytes = pos_id.0.to_be_bytes();
let inflect_bytes = inflect_id.0.to_be_bytes();
let value = u64::from_be_bytes([
inflect_bytes[0],
inflect_bytes[1],
inflect_bytes[2],
inflect_bytes[3],
0,
0,
pos_bytes[0],
pos_bytes[1],
]);
tag_fst_items.push((key, value));
}
}
}
tag_fst_items.sort_by(|(a, _), (b, _)| a.cmp(b));
let mut word_store_items: Vec<_> = tagger
.word_store
.iter()
.map(|(key, value)| (key.clone(), value.0 as u64))
.collect();
word_store_items.sort_by(|(a, _), (b, _)| a.cmp(b));
let tag_fst = Map::from_iter(tag_fst_items)
.unwrap()
.into_fst()
.as_bytes()
.to_vec();
let word_store_fst = Map::from_iter(word_store_items)
.unwrap()
.into_fst()
.as_bytes()
.to_vec();
TaggerFields {
tag_fst,
word_store_fst,
tag_store: tagger.tag_store,
}
}
}
impl From<TaggerFields> for Tagger {
fn from(data: TaggerFields) -> Self {
let word_store_fst = Map::new(data.word_store_fst).unwrap();
let word_store: BiMap<String, WordIdInt> = word_store_fst
.into_stream()
.into_str_vec()
.unwrap()
.into_iter()
.map(|(key, value)| (key, WordIdInt(value as u32)))
.collect();
let mut tags = DefaultHashMap::new();
let mut groups = DefaultHashMap::new();
let tag_fst = Map::new(data.tag_fst).unwrap();
let mut stream = tag_fst.into_stream();
while let Some((key, value)) = stream.next() {
let word = std::str::from_utf8(&key[..key.len() - 1]).unwrap();
let word_id = *word_store.get_by_left(word).unwrap();
let value_bytes = value.to_be_bytes();
let inflection_id = WordIdInt(u32::from_be_bytes([
value_bytes[0],
value_bytes[1],
value_bytes[2],
value_bytes[3],
]));
let pos_id = PosIdInt(u16::from_be_bytes([value_bytes[6], value_bytes[7]]));
let group = groups.entry(inflection_id).or_insert_with(Vec::new);
if !group.contains(&word_id) {
group.push(word_id);
}
tags.entry(word_id)
.or_insert_with(IndexMap::new)
.entry(inflection_id)
.or_insert_with(Vec::new)
.push(pos_id);
}
Tagger {
tags,
tag_store: data.tag_store,
word_store,
groups,
}
}
}
/// The lexical tagger.
#[derive(Default, Serialize, Deserialize, Clone)]
#[serde(from = "TaggerFields", into = "TaggerFields")]
pub struct Tagger {
tags: DefaultHashMap<WordIdInt, IndexMap<WordIdInt, Vec<PosIdInt>>>,
tag_store: BiMap<String, PosIdInt>,
word_store: BiMap<String, WordIdInt>,
groups: DefaultHashMap<WordIdInt, Vec<WordIdInt>>,
}
impl Tagger {
fn get_lines<S1: AsRef<Path>, S2: AsRef<Path>>(
paths: &[S1],
remove_paths: &[S2],
) -> std::io::Result<Vec<(String, String, String)>> {
let mut output = Vec::new();
let mut disallowed: Vec<String> = Vec::new();
for path in remove_paths {
let file = File::open(path.as_ref())?;
let reader = std::io::BufReader::new(file);
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
disallowed.push(line.to_string());
}
}
for path in paths {
let file = File::open(path.as_ref())?;
let reader = std::io::BufReader::new(file);
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
if disallowed.contains(&line) {
continue;
}
let parts: Vec<_> = line.split('\t').collect();
let word = parts[0].to_string();
let inflection = parts[1].to_string();
let tag = parts[2].to_string();
output.push((word, inflection, tag))
}
}
Ok(output)
}
/// Creates a tagger from raw files.
///
/// # Arguments
/// * `paths`: Paths to files where each line contains the word, lemma and tag, respectively,
/// separated by tabs, to be added to the tagger.
/// * `remove_paths`: Paths to files where each line contains the word, lemma and tag, respectively,
/// separated by tabs, to be removed from the tagger if present in the files from `paths`.
pub fn from_dumps<S1: AsRef<Path>, S2: AsRef<Path>, S3: AsRef<str>>(
paths: &[S1],
remove_paths: &[S2],
extra_tags: &[S3],
common_words: &HashSet<String>,
) -> std::io::Result<Self> {
let mut tags = DefaultHashMap::default();
let mut groups = DefaultHashMap::default();
let mut tag_store = HashSet::new();
let mut word_store = HashSet::new();
// hardcoded special tags
tag_store.insert("");
tag_store.insert("SENT_START");
tag_store.insert("SENT_END");
tag_store.insert("UNKNOWN");
// add language specific special tags
tag_store.extend(extra_tags.iter().map(|x| x.as_ref()));
let lines = Tagger::get_lines(paths, remove_paths)?;
let punct = "!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~";
for i in 0..punct.len() {
word_store.insert(&punct[i..(i + 1)]);
}
word_store.extend(common_words.iter().map(|x| x.as_str()));
for (word, inflection, tag) in lines.iter() {
word_store.insert(word);
word_store.insert(inflection);
tag_store.insert(tag);
}
// word store ids should be consistent across runs
let mut word_store: Vec<_> = word_store.iter().collect();
word_store.sort();
// tag store ids should be consistent across runs
let mut tag_store: Vec<_> = tag_store.iter().collect();
tag_store.sort();
let word_store: BiMap<_, _> = word_store
.iter()
.enumerate()
.map(|(i, x)| (x.to_string(), WordIdInt(i as u32)))
.collect();
let tag_store: BiMap<_, _> = tag_store
.iter()
.enumerate()
.map(|(i, x)| (x.to_string(), PosIdInt(i as u16)))
.collect();
for (word, inflection, tag) in lines.iter() {
let word_id = word_store.get_by_left(word).unwrap();
let inflection_id = word_store.get_by_left(inflection).unwrap();
let pos_id = tag_store.get_by_left(tag).unwrap();
let group = groups.entry(*inflection_id).or_insert_with(Vec::new);
if !group.contains(word_id) {
group.push(*word_id);
}
tags.entry(*word_id)
.or_insert_with(IndexMap::new)
.entry(*inflection_id)
.or_insert_with(Vec::new)
.push(*pos_id);
}
Ok(Tagger {
tags,
groups,
word_store,
tag_store,
})
}
fn get_raw(&self, word: &str) -> Vec<WordData> {
if let Some(map) = self
.word_store
.get_by_left(word)
.and_then(|x| self.tags.get(x))
{
let mut output = Vec::new();
for (key, value) in map.iter() {
for pos_id in value {
output.push(WordData::new(
self.id_word(self.str_for_word_id(key).into()),
self.id_tag(self.str_for_pos_id(pos_id)),
))
}
}
output
} else {
Vec::new()
}
}
fn get_strict_tags(
&self,
word: &str,
add_lower: bool,
add_lower_if_empty: bool,
) -> Vec<WordData> {
let mut tags = self.get_raw(&word);
let lower = word.to_lowercase();
if (add_lower || (add_lower_if_empty && tags.is_empty()))
&& (word != lower
&& (crate::utils::is_title_case(word) || crate::utils::is_uppercase(word)))
{
tags.extend(self.get_raw(&lower));
}
tags
}
#[allow(dead_code)] // used by compile module
pub(crate) fn tag_store(&self) -> &BiMap<String, PosIdInt> {
&self.tag_store
}
#[allow(dead_code)] // used by compile module
pub(crate) fn word_store(&self) -> &BiMap<String, WordIdInt> {
&self.word_store
}
fn str_for_word_id(&self, id: &WordIdInt) -> &str {
self.word_store
.get_by_right(id)
.expect("only valid word ids are created")
}
fn str_for_pos_id(&self, id: &PosIdInt) -> &str {
self.tag_store
.get_by_right(id)
.expect("only valid pos ids are created")
}
pub fn id_tag<'a>(&self, tag: &'a str) -> PosId<'a> {
PosId(
tag,
*self.tag_store.get_by_left(tag).unwrap_or_else(|| {
error!(
"'{}' not found in tag store, please add it to the `extra_tags`. Using UNKNOWN instead.",
tag
);
self.tag_store.get_by_left("UNKNOWN").expect("UNKNOWN tag must exist in tag store")
}),
)
}
pub fn id_word<'t>(&'t self, text: Cow<'t, str>) -> WordId<'t> {
let id = self.word_store.get_by_left(text.as_ref()).copied();
WordId(text, id)
}
/// Get the tags and lemmas (as [WordData][crate::types::WordData]) for the given word.
///
/// # Arguments
/// * `word`: The word to lookup data for.
/// * `add_lower`: Whether to add data for the lowercase variant of the word.
/// * `use_compound_split_heuristic`: Whether to use a heuristic to split compound words.
/// If true, will attempt to find tags for words which are longer than some cutoff and unknown by looking up tags
/// for substrings from left to right until tags are found or a minimum length reached.
pub fn get_tags(
&self,
word: &str,
add_lower: bool,
use_compound_split_heuristic: bool,
) -> Vec<WordData> {
let mut tags = self.get_strict_tags(word, add_lower, true);
// compound splitting heuristic, seems to work reasonably well
if use_compound_split_heuristic && tags.is_empty() {
let n_chars = word.chars().count() as isize;
if n_chars >= 7 |
}
tags
}
/// Get the words with the same lemma as the given lemma.
pub fn get_group_members(&self, lemma: &str) -> Vec<&str> {
self.word_store
.get_by_left(lemma)
.and_then(|x| self.groups.get(x))
.map(|vec| vec.iter().map(|x| self.str_for_word_id(x)).collect())
.unwrap_or_else(Vec::new)
}
}
| {
let indices = word
.char_indices()
.take(std::cmp::max(n_chars - 4, 0) as usize)
.skip(1)
.map(|x| x.0);
// the word always has at least one char if the above condition is satisfied
// but semantically this is false if no char exists
let starts_with_uppercase = word.chars().next().map_or(false, |x| x.is_uppercase());
for i in indices {
let next = if starts_with_uppercase {
crate::utils::apply_to_first(&word[i..], |c| c.to_uppercase().collect())
} else {
word[i..].to_string()
};
let next_tags = self.get_strict_tags(&next, add_lower, false);
if !next_tags.is_empty() {
tags = next_tags
.into_iter()
.map(|mut x| {
x.lemma = self.id_word(
format!("{}{}", &word[..i], x.lemma.as_ref().to_lowercase())
.into(),
);
x
})
.collect();
break;
}
}
} | conditional_block |
tag.rs | //! A dictionary-based tagger. The raw format is tuples of the form `(word, lemma, part-of-speech)`
//! where each word typically has multiple entries with different part-of-speech tags.
use crate::types::*;
use bimap::BiMap;
use fs_err::File;
use fst::{IntoStreamer, Map, Streamer};
use indexmap::IndexMap;
use log::error;
use serde::{Deserialize, Serialize};
use std::io::BufRead;
use std::{borrow::Cow, iter::once};
use std::{collections::HashSet, path::Path};
#[derive(Serialize, Deserialize)]
struct TaggerFields {
tag_fst: Vec<u8>,
word_store_fst: Vec<u8>,
tag_store: BiMap<String, PosIdInt>,
}
impl From<Tagger> for TaggerFields {
fn from(tagger: Tagger) -> Self {
let mut tag_fst_items = Vec::new();
for (word_id, map) in tagger.tags.iter() {
let mut i = 0u8;
let word = tagger.str_for_word_id(word_id);
for (inflect_id, pos_ids) in map.iter() {
for pos_id in pos_ids {
assert!(i < 255);
i += 1;
let key: Vec<u8> = word.as_bytes().iter().chain(once(&i)).copied().collect();
let pos_bytes = pos_id.0.to_be_bytes();
let inflect_bytes = inflect_id.0.to_be_bytes();
let value = u64::from_be_bytes([
inflect_bytes[0],
inflect_bytes[1],
inflect_bytes[2],
inflect_bytes[3],
0,
0,
pos_bytes[0],
pos_bytes[1],
]);
tag_fst_items.push((key, value));
}
}
}
tag_fst_items.sort_by(|(a, _), (b, _)| a.cmp(b));
let mut word_store_items: Vec<_> = tagger
.word_store
.iter()
.map(|(key, value)| (key.clone(), value.0 as u64))
.collect();
word_store_items.sort_by(|(a, _), (b, _)| a.cmp(b));
let tag_fst = Map::from_iter(tag_fst_items)
.unwrap()
.into_fst()
.as_bytes()
.to_vec();
let word_store_fst = Map::from_iter(word_store_items)
.unwrap()
.into_fst()
.as_bytes()
.to_vec();
TaggerFields {
tag_fst,
word_store_fst,
tag_store: tagger.tag_store,
}
}
}
impl From<TaggerFields> for Tagger {
fn from(data: TaggerFields) -> Self {
let word_store_fst = Map::new(data.word_store_fst).unwrap();
let word_store: BiMap<String, WordIdInt> = word_store_fst
.into_stream()
.into_str_vec()
.unwrap()
.into_iter()
.map(|(key, value)| (key, WordIdInt(value as u32)))
.collect();
let mut tags = DefaultHashMap::new();
let mut groups = DefaultHashMap::new();
let tag_fst = Map::new(data.tag_fst).unwrap();
let mut stream = tag_fst.into_stream();
while let Some((key, value)) = stream.next() {
let word = std::str::from_utf8(&key[..key.len() - 1]).unwrap();
let word_id = *word_store.get_by_left(word).unwrap();
let value_bytes = value.to_be_bytes();
let inflection_id = WordIdInt(u32::from_be_bytes([
value_bytes[0],
value_bytes[1],
value_bytes[2],
value_bytes[3],
]));
let pos_id = PosIdInt(u16::from_be_bytes([value_bytes[6], value_bytes[7]]));
let group = groups.entry(inflection_id).or_insert_with(Vec::new);
if !group.contains(&word_id) {
group.push(word_id);
}
tags.entry(word_id)
.or_insert_with(IndexMap::new)
.entry(inflection_id)
.or_insert_with(Vec::new)
.push(pos_id);
}
Tagger {
tags,
tag_store: data.tag_store,
word_store,
groups,
}
}
}
/// The lexical tagger.
#[derive(Default, Serialize, Deserialize, Clone)]
#[serde(from = "TaggerFields", into = "TaggerFields")]
pub struct Tagger {
tags: DefaultHashMap<WordIdInt, IndexMap<WordIdInt, Vec<PosIdInt>>>,
tag_store: BiMap<String, PosIdInt>,
word_store: BiMap<String, WordIdInt>,
groups: DefaultHashMap<WordIdInt, Vec<WordIdInt>>,
}
impl Tagger {
fn get_lines<S1: AsRef<Path>, S2: AsRef<Path>>(
paths: &[S1],
remove_paths: &[S2],
) -> std::io::Result<Vec<(String, String, String)>> {
let mut output = Vec::new();
let mut disallowed: Vec<String> = Vec::new();
for path in remove_paths {
let file = File::open(path.as_ref())?;
let reader = std::io::BufReader::new(file);
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
disallowed.push(line.to_string());
}
}
for path in paths {
let file = File::open(path.as_ref())?; | let reader = std::io::BufReader::new(file);
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
if disallowed.contains(&line) {
continue;
}
let parts: Vec<_> = line.split('\t').collect();
let word = parts[0].to_string();
let inflection = parts[1].to_string();
let tag = parts[2].to_string();
output.push((word, inflection, tag))
}
}
Ok(output)
}
/// Creates a tagger from raw files.
///
/// # Arguments
/// * `paths`: Paths to files where each line contains the word, lemma and tag, respectively,
/// separated by tabs, to be added to the tagger.
/// * `remove_paths`: Paths to files where each line contains the word, lemma and tag, respectively,
/// separated by tabs, to be removed from the tagger if present in the files from `paths`.
pub fn from_dumps<S1: AsRef<Path>, S2: AsRef<Path>, S3: AsRef<str>>(
paths: &[S1],
remove_paths: &[S2],
extra_tags: &[S3],
common_words: &HashSet<String>,
) -> std::io::Result<Self> {
let mut tags = DefaultHashMap::default();
let mut groups = DefaultHashMap::default();
let mut tag_store = HashSet::new();
let mut word_store = HashSet::new();
// hardcoded special tags
tag_store.insert("");
tag_store.insert("SENT_START");
tag_store.insert("SENT_END");
tag_store.insert("UNKNOWN");
// add language specific special tags
tag_store.extend(extra_tags.iter().map(|x| x.as_ref()));
let lines = Tagger::get_lines(paths, remove_paths)?;
let punct = "!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~";
for i in 0..punct.len() {
word_store.insert(&punct[i..(i + 1)]);
}
word_store.extend(common_words.iter().map(|x| x.as_str()));
for (word, inflection, tag) in lines.iter() {
word_store.insert(word);
word_store.insert(inflection);
tag_store.insert(tag);
}
// word store ids should be consistent across runs
let mut word_store: Vec<_> = word_store.iter().collect();
word_store.sort();
// tag store ids should be consistent across runs
let mut tag_store: Vec<_> = tag_store.iter().collect();
tag_store.sort();
let word_store: BiMap<_, _> = word_store
.iter()
.enumerate()
.map(|(i, x)| (x.to_string(), WordIdInt(i as u32)))
.collect();
let tag_store: BiMap<_, _> = tag_store
.iter()
.enumerate()
.map(|(i, x)| (x.to_string(), PosIdInt(i as u16)))
.collect();
for (word, inflection, tag) in lines.iter() {
let word_id = word_store.get_by_left(word).unwrap();
let inflection_id = word_store.get_by_left(inflection).unwrap();
let pos_id = tag_store.get_by_left(tag).unwrap();
let group = groups.entry(*inflection_id).or_insert_with(Vec::new);
if !group.contains(word_id) {
group.push(*word_id);
}
tags.entry(*word_id)
.or_insert_with(IndexMap::new)
.entry(*inflection_id)
.or_insert_with(Vec::new)
.push(*pos_id);
}
Ok(Tagger {
tags,
groups,
word_store,
tag_store,
})
}
fn get_raw(&self, word: &str) -> Vec<WordData> {
if let Some(map) = self
.word_store
.get_by_left(word)
.and_then(|x| self.tags.get(x))
{
let mut output = Vec::new();
for (key, value) in map.iter() {
for pos_id in value {
output.push(WordData::new(
self.id_word(self.str_for_word_id(key).into()),
self.id_tag(self.str_for_pos_id(pos_id)),
))
}
}
output
} else {
Vec::new()
}
}
fn get_strict_tags(
&self,
word: &str,
add_lower: bool,
add_lower_if_empty: bool,
) -> Vec<WordData> {
let mut tags = self.get_raw(&word);
let lower = word.to_lowercase();
if (add_lower || (add_lower_if_empty && tags.is_empty()))
&& (word != lower
&& (crate::utils::is_title_case(word) || crate::utils::is_uppercase(word)))
{
tags.extend(self.get_raw(&lower));
}
tags
}
#[allow(dead_code)] // used by compile module
pub(crate) fn tag_store(&self) -> &BiMap<String, PosIdInt> {
&self.tag_store
}
#[allow(dead_code)] // used by compile module
pub(crate) fn word_store(&self) -> &BiMap<String, WordIdInt> {
&self.word_store
}
fn str_for_word_id(&self, id: &WordIdInt) -> &str {
self.word_store
.get_by_right(id)
.expect("only valid word ids are created")
}
fn str_for_pos_id(&self, id: &PosIdInt) -> &str {
self.tag_store
.get_by_right(id)
.expect("only valid pos ids are created")
}
pub fn id_tag<'a>(&self, tag: &'a str) -> PosId<'a> {
PosId(
tag,
*self.tag_store.get_by_left(tag).unwrap_or_else(|| {
error!(
"'{}' not found in tag store, please add it to the `extra_tags`. Using UNKNOWN instead.",
tag
);
self.tag_store.get_by_left("UNKNOWN").expect("UNKNOWN tag must exist in tag store")
}),
)
}
pub fn id_word<'t>(&'t self, text: Cow<'t, str>) -> WordId<'t> {
let id = self.word_store.get_by_left(text.as_ref()).copied();
WordId(text, id)
}
/// Get the tags and lemmas (as [WordData][crate::types::WordData]) for the given word.
///
/// # Arguments
/// * `word`: The word to lookup data for.
/// * `add_lower`: Whether to add data for the lowercase variant of the word.
/// * `use_compound_split_heuristic`: Whether to use a heuristic to split compound words.
/// If true, will attempt to find tags for words which are longer than some cutoff and unknown by looking up tags
/// for substrings from left to right until tags are found or a minimum length reached.
pub fn get_tags(
&self,
word: &str,
add_lower: bool,
use_compound_split_heuristic: bool,
) -> Vec<WordData> {
let mut tags = self.get_strict_tags(word, add_lower, true);
// compound splitting heuristic, seems to work reasonably well
if use_compound_split_heuristic && tags.is_empty() {
let n_chars = word.chars().count() as isize;
if n_chars >= 7 {
let indices = word
.char_indices()
.take(std::cmp::max(n_chars - 4, 0) as usize)
.skip(1)
.map(|x| x.0);
// the word always has at least one char if the above condition is satisfied
// but semantically this is false if no char exists
let starts_with_uppercase = word.chars().next().map_or(false, |x| x.is_uppercase());
for i in indices {
let next = if starts_with_uppercase {
crate::utils::apply_to_first(&word[i..], |c| c.to_uppercase().collect())
} else {
word[i..].to_string()
};
let next_tags = self.get_strict_tags(&next, add_lower, false);
if !next_tags.is_empty() {
tags = next_tags
.into_iter()
.map(|mut x| {
x.lemma = self.id_word(
format!("{}{}", &word[..i], x.lemma.as_ref().to_lowercase())
.into(),
);
x
})
.collect();
break;
}
}
}
}
tags
}
/// Get the words with the same lemma as the given lemma.
pub fn get_group_members(&self, lemma: &str) -> Vec<&str> {
self.word_store
.get_by_left(lemma)
.and_then(|x| self.groups.get(x))
.map(|vec| vec.iter().map(|x| self.str_for_word_id(x)).collect())
.unwrap_or_else(Vec::new)
}
} | random_line_split |
|
tag.rs | //! A dictionary-based tagger. The raw format is tuples of the form `(word, lemma, part-of-speech)`
//! where each word typically has multiple entries with different part-of-speech tags.
use crate::types::*;
use bimap::BiMap;
use fs_err::File;
use fst::{IntoStreamer, Map, Streamer};
use indexmap::IndexMap;
use log::error;
use serde::{Deserialize, Serialize};
use std::io::BufRead;
use std::{borrow::Cow, iter::once};
use std::{collections::HashSet, path::Path};
#[derive(Serialize, Deserialize)]
struct TaggerFields {
tag_fst: Vec<u8>,
word_store_fst: Vec<u8>,
tag_store: BiMap<String, PosIdInt>,
}
impl From<Tagger> for TaggerFields {
fn from(tagger: Tagger) -> Self {
let mut tag_fst_items = Vec::new();
for (word_id, map) in tagger.tags.iter() {
let mut i = 0u8;
let word = tagger.str_for_word_id(word_id);
for (inflect_id, pos_ids) in map.iter() {
for pos_id in pos_ids {
assert!(i < 255);
i += 1;
let key: Vec<u8> = word.as_bytes().iter().chain(once(&i)).copied().collect();
let pos_bytes = pos_id.0.to_be_bytes();
let inflect_bytes = inflect_id.0.to_be_bytes();
let value = u64::from_be_bytes([
inflect_bytes[0],
inflect_bytes[1],
inflect_bytes[2],
inflect_bytes[3],
0,
0,
pos_bytes[0],
pos_bytes[1],
]);
tag_fst_items.push((key, value));
}
}
}
tag_fst_items.sort_by(|(a, _), (b, _)| a.cmp(b));
let mut word_store_items: Vec<_> = tagger
.word_store
.iter()
.map(|(key, value)| (key.clone(), value.0 as u64))
.collect();
word_store_items.sort_by(|(a, _), (b, _)| a.cmp(b));
let tag_fst = Map::from_iter(tag_fst_items)
.unwrap()
.into_fst()
.as_bytes()
.to_vec();
let word_store_fst = Map::from_iter(word_store_items)
.unwrap()
.into_fst()
.as_bytes()
.to_vec();
TaggerFields {
tag_fst,
word_store_fst,
tag_store: tagger.tag_store,
}
}
}
impl From<TaggerFields> for Tagger {
fn from(data: TaggerFields) -> Self {
let word_store_fst = Map::new(data.word_store_fst).unwrap();
let word_store: BiMap<String, WordIdInt> = word_store_fst
.into_stream()
.into_str_vec()
.unwrap()
.into_iter()
.map(|(key, value)| (key, WordIdInt(value as u32)))
.collect();
let mut tags = DefaultHashMap::new();
let mut groups = DefaultHashMap::new();
let tag_fst = Map::new(data.tag_fst).unwrap();
let mut stream = tag_fst.into_stream();
while let Some((key, value)) = stream.next() {
let word = std::str::from_utf8(&key[..key.len() - 1]).unwrap();
let word_id = *word_store.get_by_left(word).unwrap();
let value_bytes = value.to_be_bytes();
let inflection_id = WordIdInt(u32::from_be_bytes([
value_bytes[0],
value_bytes[1],
value_bytes[2],
value_bytes[3],
]));
let pos_id = PosIdInt(u16::from_be_bytes([value_bytes[6], value_bytes[7]]));
let group = groups.entry(inflection_id).or_insert_with(Vec::new);
if !group.contains(&word_id) {
group.push(word_id);
}
tags.entry(word_id)
.or_insert_with(IndexMap::new)
.entry(inflection_id)
.or_insert_with(Vec::new)
.push(pos_id);
}
Tagger {
tags,
tag_store: data.tag_store,
word_store,
groups,
}
}
}
/// The lexical tagger.
#[derive(Default, Serialize, Deserialize, Clone)]
#[serde(from = "TaggerFields", into = "TaggerFields")]
pub struct Tagger {
tags: DefaultHashMap<WordIdInt, IndexMap<WordIdInt, Vec<PosIdInt>>>,
tag_store: BiMap<String, PosIdInt>,
word_store: BiMap<String, WordIdInt>,
groups: DefaultHashMap<WordIdInt, Vec<WordIdInt>>,
}
impl Tagger {
fn get_lines<S1: AsRef<Path>, S2: AsRef<Path>>(
paths: &[S1],
remove_paths: &[S2],
) -> std::io::Result<Vec<(String, String, String)>> {
let mut output = Vec::new();
let mut disallowed: Vec<String> = Vec::new();
for path in remove_paths {
let file = File::open(path.as_ref())?;
let reader = std::io::BufReader::new(file);
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
disallowed.push(line.to_string());
}
}
for path in paths {
let file = File::open(path.as_ref())?;
let reader = std::io::BufReader::new(file);
for line in reader.lines() {
let line = line?;
if line.starts_with('#') {
continue;
}
if disallowed.contains(&line) {
continue;
}
let parts: Vec<_> = line.split('\t').collect();
let word = parts[0].to_string();
let inflection = parts[1].to_string();
let tag = parts[2].to_string();
output.push((word, inflection, tag))
}
}
Ok(output)
}
/// Creates a tagger from raw files.
///
/// # Arguments
/// * `paths`: Paths to files where each line contains the word, lemma and tag, respectively,
/// separated by tabs, to be added to the tagger.
/// * `remove_paths`: Paths to files where each line contains the word, lemma and tag, respectively,
/// separated by tabs, to be removed from the tagger if present in the files from `paths`.
pub fn from_dumps<S1: AsRef<Path>, S2: AsRef<Path>, S3: AsRef<str>>(
paths: &[S1],
remove_paths: &[S2],
extra_tags: &[S3],
common_words: &HashSet<String>,
) -> std::io::Result<Self> {
let mut tags = DefaultHashMap::default();
let mut groups = DefaultHashMap::default();
let mut tag_store = HashSet::new();
let mut word_store = HashSet::new();
// hardcoded special tags
tag_store.insert("");
tag_store.insert("SENT_START");
tag_store.insert("SENT_END");
tag_store.insert("UNKNOWN");
// add language specific special tags
tag_store.extend(extra_tags.iter().map(|x| x.as_ref()));
let lines = Tagger::get_lines(paths, remove_paths)?;
let punct = "!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~";
for i in 0..punct.len() {
word_store.insert(&punct[i..(i + 1)]);
}
word_store.extend(common_words.iter().map(|x| x.as_str()));
for (word, inflection, tag) in lines.iter() {
word_store.insert(word);
word_store.insert(inflection);
tag_store.insert(tag);
}
// word store ids should be consistent across runs
let mut word_store: Vec<_> = word_store.iter().collect();
word_store.sort();
// tag store ids should be consistent across runs
let mut tag_store: Vec<_> = tag_store.iter().collect();
tag_store.sort();
let word_store: BiMap<_, _> = word_store
.iter()
.enumerate()
.map(|(i, x)| (x.to_string(), WordIdInt(i as u32)))
.collect();
let tag_store: BiMap<_, _> = tag_store
.iter()
.enumerate()
.map(|(i, x)| (x.to_string(), PosIdInt(i as u16)))
.collect();
for (word, inflection, tag) in lines.iter() {
let word_id = word_store.get_by_left(word).unwrap();
let inflection_id = word_store.get_by_left(inflection).unwrap();
let pos_id = tag_store.get_by_left(tag).unwrap();
let group = groups.entry(*inflection_id).or_insert_with(Vec::new);
if !group.contains(word_id) {
group.push(*word_id);
}
tags.entry(*word_id)
.or_insert_with(IndexMap::new)
.entry(*inflection_id)
.or_insert_with(Vec::new)
.push(*pos_id);
}
Ok(Tagger {
tags,
groups,
word_store,
tag_store,
})
}
fn get_raw(&self, word: &str) -> Vec<WordData> {
if let Some(map) = self
.word_store
.get_by_left(word)
.and_then(|x| self.tags.get(x))
{
let mut output = Vec::new();
for (key, value) in map.iter() {
for pos_id in value {
output.push(WordData::new(
self.id_word(self.str_for_word_id(key).into()),
self.id_tag(self.str_for_pos_id(pos_id)),
))
}
}
output
} else {
Vec::new()
}
}
fn get_strict_tags(
&self,
word: &str,
add_lower: bool,
add_lower_if_empty: bool,
) -> Vec<WordData> {
let mut tags = self.get_raw(&word);
let lower = word.to_lowercase();
if (add_lower || (add_lower_if_empty && tags.is_empty()))
&& (word != lower
&& (crate::utils::is_title_case(word) || crate::utils::is_uppercase(word)))
{
tags.extend(self.get_raw(&lower));
}
tags
}
#[allow(dead_code)] // used by compile module
pub(crate) fn tag_store(&self) -> &BiMap<String, PosIdInt> {
&self.tag_store
}
#[allow(dead_code)] // used by compile module
pub(crate) fn word_store(&self) -> &BiMap<String, WordIdInt> {
&self.word_store
}
fn str_for_word_id(&self, id: &WordIdInt) -> &str {
self.word_store
.get_by_right(id)
.expect("only valid word ids are created")
}
fn str_for_pos_id(&self, id: &PosIdInt) -> &str {
self.tag_store
.get_by_right(id)
.expect("only valid pos ids are created")
}
pub fn id_tag<'a>(&self, tag: &'a str) -> PosId<'a> {
PosId(
tag,
*self.tag_store.get_by_left(tag).unwrap_or_else(|| {
error!(
"'{}' not found in tag store, please add it to the `extra_tags`. Using UNKNOWN instead.",
tag
);
self.tag_store.get_by_left("UNKNOWN").expect("UNKNOWN tag must exist in tag store")
}),
)
}
pub fn id_word<'t>(&'t self, text: Cow<'t, str>) -> WordId<'t> {
let id = self.word_store.get_by_left(text.as_ref()).copied();
WordId(text, id)
}
/// Get the tags and lemmas (as [WordData][crate::types::WordData]) for the given word.
///
/// # Arguments
/// * `word`: The word to lookup data for.
/// * `add_lower`: Whether to add data for the lowercase variant of the word.
/// * `use_compound_split_heuristic`: Whether to use a heuristic to split compound words.
/// If true, will attempt to find tags for words which are longer than some cutoff and unknown by looking up tags
/// for substrings from left to right until tags are found or a minimum length reached.
pub fn get_tags(
&self,
word: &str,
add_lower: bool,
use_compound_split_heuristic: bool,
) -> Vec<WordData> {
let mut tags = self.get_strict_tags(word, add_lower, true);
// compound splitting heuristic, seems to work reasonably well
if use_compound_split_heuristic && tags.is_empty() {
let n_chars = word.chars().count() as isize;
if n_chars >= 7 {
let indices = word
.char_indices()
.take(std::cmp::max(n_chars - 4, 0) as usize)
.skip(1)
.map(|x| x.0);
// the word always has at least one char if the above condition is satisfied
// but semantically this is false if no char exists
let starts_with_uppercase = word.chars().next().map_or(false, |x| x.is_uppercase());
for i in indices {
let next = if starts_with_uppercase {
crate::utils::apply_to_first(&word[i..], |c| c.to_uppercase().collect())
} else {
word[i..].to_string()
};
let next_tags = self.get_strict_tags(&next, add_lower, false);
if !next_tags.is_empty() {
tags = next_tags
.into_iter()
.map(|mut x| {
x.lemma = self.id_word(
format!("{}{}", &word[..i], x.lemma.as_ref().to_lowercase())
.into(),
);
x
})
.collect();
break;
}
}
}
}
tags
}
/// Get the words with the same lemma as the given lemma.
pub fn get_group_members(&self, lemma: &str) -> Vec<&str> |
}
| {
self.word_store
.get_by_left(lemma)
.and_then(|x| self.groups.get(x))
.map(|vec| vec.iter().map(|x| self.str_for_word_id(x)).collect())
.unwrap_or_else(Vec::new)
} | identifier_body |
base.py | ################################################################################
# MIT License
#
# Copyright (c) 2017 Jean-Charles Fosse & Johann Bigler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import fields, collections, bcrypt
from twisted.internet.defer import inlineCallbacks, returnValue
from fields import PrimaryKeyField
from query import SelectQuery, \
InsertQuery, \
AddQuery, \
RemoveQuery, \
UpdateQuery, \
DeleteQuery
"""
Metaclass enables to have a set of variable for each class Model.
This set of variable is represented by the class ModelOptions
"""
_METACLASS_ = '_metaclass_helper_'
def with_metaclass(meta, base=object):
return meta(_METACLASS_, (base,), {})
class ModelOptions(object):
"""
Represents all the options associated to a model.
They are accesible using the _meta variable from a Model object
"""
def __init__(self, cls,
table_name = None,
database = None,
primary_key = True,
on_conflict = [],
unique = [],
many_to_many = False,
order = [],
propagate = False,
hypertable = []):
# Model class
self.model_class = cls
# Model name
self.name = cls.__name__.lower()
# Table name. Either set by the user or derivated from name
self.table_name = table_name.lower() if table_name else self.name
# Database to use
self.database = database
# Does the models have a primary key. If so it will be set by Kameleon
self.primary_key = primary_key
# XXX
self.on_conflict = on_conflict
# List of field which association should be unique.
# XXX #3 Today it receive a string.
# It should be receiving a list of fields
self.unique = unique
# Is this model a middle table for a many to many link
self.many_to_many = many_to_many
# Map of links represented by this table. Filled by the class
self.links = {}
# Order to respect. Useful if table not created by the ORM
self.order = order
# Should any change on a model be propagate
self.propagate = propagate
# Should the table change to hyper table.
self.hypertable = hypertable
# Map of fields
self.fields = {}
# Map of reverse relation fields
self.reverse_fields = {}
# List of fields sorted in order
self.sorted_fields = []
# Fields name sorted in order
self.sorted_fields_names = []
# Map of direct relation
self.rel = {}
# Map of reverse relation
self.reverse_rel = {}
# Map of related classes and the field associated
self.rel_class = {}
def add_field(self, field):
"""
Add a field to the class. It makes sure all related variables are
up to date
"""
if field.name in self.fields:
print("WARNING: Field {0} already in model {1}"
.format(field.name, self.table_name))
return
self.fields[field.name] = field
self.sorted_fields.append(field)
self.sorted_fields_names.append(field.name)
class BaseModel(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
if name == _METACLASS_ or bases[0].__name__ == _METACLASS_:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
# Get all variable defined in the meta class of each model.
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in meta.__dict__.items():
if not k.startswith('_'):
meta_options[k] = v
# Create Model class and its options
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
# If many to many initialize the links between the two tables.
if cls._meta.many_to_many:
links = []
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
links.append((attr, attrs[attr]))
else:
for key, value in attrs.items():
if not key.startswith('_'):
links.append((key, value))
links[0][1].related_name = links[1][0]
links[0][1].add_to_model(cls, links[0][0])
links[1][1].related_name = links[0][0]
links[1][1].add_to_model(cls, links[1][0])
# Else it is a basic model.
else:
# If primary key
if cls._meta.primary_key:
# Create primary key field
cls.id = fields.PrimaryKeyField()
# Add field to the model
cls.id.add_to_model(cls, PrimaryKeyField.name)
# Add each field to the model
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
attrs[attr].add_to_model(cls, attr)
else:
for key, value in attrs.items():
if not key.startswith('_'):
value.add_to_model(cls, key)
return cls
class Model(with_metaclass(BaseModel)):
"""
Represents a model in the database with all its fields and current values
"""
def __init__(self, **kwargs):
# Map of all fields and associated values
self.dictValues = {}
# Initialize each field. If no value set it to None
for k, v in self._meta.fields.items():
if k in kwargs:
self.dictValues[k] = kwargs[k]
setattr(self, k, kwargs[k])
else:
self.dictValues[k] = None
setattr(self, k, None)
# Set primary key to None if no value provided
if self._meta.primary_key and not "id" in self.dictValues:
self.dictValues["id"] = None
object.__setattr__(self, "id", None)
# Initialize reverse relation as empty list.
for field in self._meta.reverse_rel:
object.__setattr__(self, field, [])
if self._meta.propagate and self._meta.database.subscribe:
self._subscribe()
def __setattr__(self, name, value):
"""
Overide __setattr__ to update dict value and field value at once
"""
object.__setattr__(self, name, value)
if name in self.dictValues: # If updating a field value
if self._meta.fields[name].salt: # field is salt
# If field is already salt do nothing.
# XXX Could create a security issue. What happend is value
# starts with $2b$ but it's not encrypted. Not critical for now
if not ("$2b$" in value and value[:4] == "$2b$"):
value = bcrypt.hashpw(value.encode('utf8'), bcrypt.gensalt())
object.__setattr__(self, name, value)
# If value is an instance of model class and has a relation.
# Append it to the corresponding field list
if hasattr(value, "_meta") and self.isForeignKey(self._meta.fields[name]):
self.dictValues[name] = getattr(value, self._meta.fields[name].reference.name)
return
self.dictValues[name] = value
@classmethod
def isForeignKey(cls, _field):
"""
Is the field an instance of ForeignKeyField
"""
return isinstance(_field, fields.ForeignKeyField)
@classmethod
def isReferenceField(cls, _field):
"""
Is the field an instance of ReferenceField
"""
return isinstance(_field, fields.ReferenceField)
@classmethod
@inlineCallbacks
def create_table(cls, *args, **kwargs):
"""
Creates a table in the database.
"""
init = cls._meta.database.create_table_title(cls._meta.table_name)
i = 1
fields = zip(cls._meta.sorted_fields_names, cls._meta.sorted_fields)
for field in fields:
field_string = field[1].create_field(field[0])
if i == len(fields):
if cls._meta.unique:
init = cls._meta.database.create_unique(init, cls._meta.unique)
init = cls._meta.database.create_table_field_end(init, field_string)
if cls._meta.hypertable:
init = cls._meta.database.create_hypertable(init,
cls._meta)
else:
init = cls._meta.database.create_table_field(init, field_string)
i+=1
yield cls._meta.database.runOperation(init)
@classmethod
@inlineCallbacks
def delete_table(cls, *args, **kwargs):
"""
Deletes table from database
"""
operation = cls._meta.database.delete_table(cls._meta.table_name)
yield cls._meta.database.runOperation(operation)
@classmethod
@inlineCallbacks
def insert(cls, values):
"""
Insert a row to the table with the given values
"""
result = yield InsertQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def update(cls, values):
"""
Update values in row
"""
result = yield UpdateQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def create(cls, **kwargs):
"""
Instanciates a model class object and save it into the database.
"""
inst = cls(**kwargs)
yield inst.save()
returnValue(inst)
@classmethod
def all(cls):
"""
Get all rows from a table
"""
return SelectQuery(cls)
@classmethod
@inlineCallbacks
def add(cls, obj1, obj2):
"""
Add a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Add called on non many to many model")
query = AddQuery(cls, obj1, obj2)
yield query.execute()
if not getattr(obj1, obj2._meta.name):
setattr(obj1, obj2._meta.name, [obj2])
else:
getattr(obj1, obj2._meta.name).append(obj2)
if not getattr(obj2, obj1._meta.name):
setattr(obj2, obj1._meta.name, [obj1])
else:
getattr(obj2, obj1._meta.name).append(obj1)
@classmethod
@inlineCallbacks
def remove(cls, obj1, obj2):
"""
Remove a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Remove called on non many to many model")
query = RemoveQuery(cls, obj1, obj2)
yield query.execute()
if obj2 in getattr(obj1, obj2._meta.name):
getattr(obj1, obj2._meta.name).remove(obj2)
if obj1 in getattr(obj2, obj1._meta.name):
getattr(obj2, obj1._meta.name).remove(obj1)
@classmethod
def delete(cls):
"""
Delete a row in the database
"""
query_instance = DeleteQuery(cls)
return query_instance
@inlineCallbacks
def save(self):
|
def _subscribe(self):
self._meta.database.connection.subscribe(self.propagate_update, u"wamp.postgresql.propagadate.{0}".format(self._meta.name))
def propagate_update(self, dictValues):
if dictValues["id"] == self.id:
for field, value in dictValues.iteritems():
self.__setattr__(field, value)
| """
Save a row
"""
# For each field get the value to insert
values = {key : self._meta.fields[key].insert_format(value) for key, value in self.dictValues.items()}
if self._meta.primary_key:
# If an id exist then we should update
if self.id:
pk = yield self.update(values)
if self._meta.propagate:
self._meta.database.propagate(self)
# Else it means we should create the row
else:
# XXX To Do: What happen if insert failed. What should we return
del values["id"]
pk = yield self.insert(values)
# Update id value
self.id = pk
else:
yield self.insert(values) | identifier_body |
base.py | ################################################################################
# MIT License
#
# Copyright (c) 2017 Jean-Charles Fosse & Johann Bigler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import fields, collections, bcrypt
from twisted.internet.defer import inlineCallbacks, returnValue
from fields import PrimaryKeyField
from query import SelectQuery, \
InsertQuery, \
AddQuery, \
RemoveQuery, \
UpdateQuery, \
DeleteQuery
"""
Metaclass enables to have a set of variable for each class Model.
This set of variable is represented by the class ModelOptions
"""
_METACLASS_ = '_metaclass_helper_'
def with_metaclass(meta, base=object):
return meta(_METACLASS_, (base,), {})
class ModelOptions(object):
"""
Represents all the options associated to a model.
They are accesible using the _meta variable from a Model object
"""
def __init__(self, cls,
table_name = None,
database = None,
primary_key = True,
on_conflict = [],
unique = [],
many_to_many = False,
order = [],
propagate = False,
hypertable = []):
# Model class
self.model_class = cls
# Model name
self.name = cls.__name__.lower()
# Table name. Either set by the user or derivated from name
self.table_name = table_name.lower() if table_name else self.name
# Database to use
self.database = database
# Does the models have a primary key. If so it will be set by Kameleon
self.primary_key = primary_key
# XXX
self.on_conflict = on_conflict
# List of field which association should be unique.
# XXX #3 Today it receive a string.
# It should be receiving a list of fields
self.unique = unique
# Is this model a middle table for a many to many link
self.many_to_many = many_to_many
# Map of links represented by this table. Filled by the class
self.links = {}
# Order to respect. Useful if table not created by the ORM
self.order = order
# Should any change on a model be propagate
self.propagate = propagate
# Should the table change to hyper table.
self.hypertable = hypertable
# Map of fields
self.fields = {}
# Map of reverse relation fields
self.reverse_fields = {}
# List of fields sorted in order
self.sorted_fields = []
# Fields name sorted in order
self.sorted_fields_names = []
# Map of direct relation
self.rel = {}
# Map of reverse relation
self.reverse_rel = {}
# Map of related classes and the field associated
self.rel_class = {}
def add_field(self, field):
"""
Add a field to the class. It makes sure all related variables are
up to date
"""
if field.name in self.fields:
print("WARNING: Field {0} already in model {1}"
.format(field.name, self.table_name))
return
self.fields[field.name] = field
self.sorted_fields.append(field)
self.sorted_fields_names.append(field.name)
class BaseModel(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
if name == _METACLASS_ or bases[0].__name__ == _METACLASS_:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
# Get all variable defined in the meta class of each model.
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in meta.__dict__.items():
if not k.startswith('_'):
meta_options[k] = v
# Create Model class and its options
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
# If many to many initialize the links between the two tables.
if cls._meta.many_to_many:
links = []
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
links.append((attr, attrs[attr]))
else:
for key, value in attrs.items():
if not key.startswith('_'):
links.append((key, value))
links[0][1].related_name = links[1][0]
links[0][1].add_to_model(cls, links[0][0])
links[1][1].related_name = links[0][0]
links[1][1].add_to_model(cls, links[1][0])
# Else it is a basic model.
else:
# If primary key
if cls._meta.primary_key:
# Create primary key field
cls.id = fields.PrimaryKeyField()
# Add field to the model
cls.id.add_to_model(cls, PrimaryKeyField.name)
# Add each field to the model
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
attrs[attr].add_to_model(cls, attr)
else:
for key, value in attrs.items():
if not key.startswith('_'):
value.add_to_model(cls, key)
return cls
class Model(with_metaclass(BaseModel)):
"""
Represents a model in the database with all its fields and current values
"""
def __init__(self, **kwargs):
# Map of all fields and associated values
self.dictValues = {}
# Initialize each field. If no value set it to None
for k, v in self._meta.fields.items():
if k in kwargs:
self.dictValues[k] = kwargs[k]
setattr(self, k, kwargs[k])
else:
self.dictValues[k] = None
setattr(self, k, None)
# Set primary key to None if no value provided
if self._meta.primary_key and not "id" in self.dictValues:
self.dictValues["id"] = None
object.__setattr__(self, "id", None)
# Initialize reverse relation as empty list.
for field in self._meta.reverse_rel:
object.__setattr__(self, field, [])
if self._meta.propagate and self._meta.database.subscribe:
self._subscribe()
def __setattr__(self, name, value):
"""
Overide __setattr__ to update dict value and field value at once
"""
object.__setattr__(self, name, value)
if name in self.dictValues: # If updating a field value
if self._meta.fields[name].salt: # field is salt
# If field is already salt do nothing.
# XXX Could create a security issue. What happend is value
# starts with $2b$ but it's not encrypted. Not critical for now
if not ("$2b$" in value and value[:4] == "$2b$"):
value = bcrypt.hashpw(value.encode('utf8'), bcrypt.gensalt())
object.__setattr__(self, name, value)
# If value is an instance of model class and has a relation.
# Append it to the corresponding field list
if hasattr(value, "_meta") and self.isForeignKey(self._meta.fields[name]):
self.dictValues[name] = getattr(value, self._meta.fields[name].reference.name)
return
self.dictValues[name] = value
@classmethod
def isForeignKey(cls, _field):
"""
Is the field an instance of ForeignKeyField
"""
return isinstance(_field, fields.ForeignKeyField)
@classmethod
def isReferenceField(cls, _field):
"""
Is the field an instance of ReferenceField
"""
return isinstance(_field, fields.ReferenceField)
@classmethod
@inlineCallbacks
def create_table(cls, *args, **kwargs):
"""
Creates a table in the database.
"""
init = cls._meta.database.create_table_title(cls._meta.table_name)
i = 1
fields = zip(cls._meta.sorted_fields_names, cls._meta.sorted_fields)
for field in fields:
field_string = field[1].create_field(field[0])
if i == len(fields):
if cls._meta.unique:
init = cls._meta.database.create_unique(init, cls._meta.unique)
init = cls._meta.database.create_table_field_end(init, field_string)
if cls._meta.hypertable:
init = cls._meta.database.create_hypertable(init,
cls._meta)
else:
init = cls._meta.database.create_table_field(init, field_string)
i+=1
yield cls._meta.database.runOperation(init)
@classmethod
@inlineCallbacks
def delete_table(cls, *args, **kwargs):
"""
Deletes table from database
"""
operation = cls._meta.database.delete_table(cls._meta.table_name)
yield cls._meta.database.runOperation(operation)
@classmethod
@inlineCallbacks
def insert(cls, values):
"""
Insert a row to the table with the given values
"""
result = yield InsertQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def update(cls, values):
"""
Update values in row
"""
result = yield UpdateQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def create(cls, **kwargs):
"""
Instanciates a model class object and save it into the database.
"""
inst = cls(**kwargs)
yield inst.save()
returnValue(inst)
@classmethod
def all(cls):
"""
Get all rows from a table
"""
return SelectQuery(cls)
@classmethod
@inlineCallbacks
def add(cls, obj1, obj2):
"""
Add a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Add called on non many to many model")
query = AddQuery(cls, obj1, obj2)
yield query.execute()
if not getattr(obj1, obj2._meta.name):
setattr(obj1, obj2._meta.name, [obj2])
else:
getattr(obj1, obj2._meta.name).append(obj2)
if not getattr(obj2, obj1._meta.name):
setattr(obj2, obj1._meta.name, [obj1])
else:
getattr(obj2, obj1._meta.name).append(obj1)
@classmethod
@inlineCallbacks
def remove(cls, obj1, obj2):
"""
Remove a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Remove called on non many to many model")
query = RemoveQuery(cls, obj1, obj2)
yield query.execute()
if obj2 in getattr(obj1, obj2._meta.name):
getattr(obj1, obj2._meta.name).remove(obj2)
if obj1 in getattr(obj2, obj1._meta.name):
getattr(obj2, obj1._meta.name).remove(obj1)
@classmethod
def delete(cls):
"""
Delete a row in the database
"""
query_instance = DeleteQuery(cls)
return query_instance
@inlineCallbacks
def save(self):
"""
Save a row
"""
# For each field get the value to insert
values = {key : self._meta.fields[key].insert_format(value) for key, value in self.dictValues.items()}
if self._meta.primary_key:
# If an id exist then we should update
|
else:
yield self.insert(values)
def _subscribe(self):
self._meta.database.connection.subscribe(self.propagate_update, u"wamp.postgresql.propagadate.{0}".format(self._meta.name))
def propagate_update(self, dictValues):
if dictValues["id"] == self.id:
for field, value in dictValues.iteritems():
self.__setattr__(field, value)
| if self.id:
pk = yield self.update(values)
if self._meta.propagate:
self._meta.database.propagate(self)
# Else it means we should create the row
else:
# XXX To Do: What happen if insert failed. What should we return
del values["id"]
pk = yield self.insert(values)
# Update id value
self.id = pk | conditional_block |
base.py | ################################################################################
# MIT License
#
# Copyright (c) 2017 Jean-Charles Fosse & Johann Bigler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import fields, collections, bcrypt
from twisted.internet.defer import inlineCallbacks, returnValue
from fields import PrimaryKeyField
from query import SelectQuery, \
InsertQuery, \
AddQuery, \
RemoveQuery, \
UpdateQuery, \
DeleteQuery
"""
Metaclass enables to have a set of variable for each class Model.
This set of variable is represented by the class ModelOptions
"""
_METACLASS_ = '_metaclass_helper_'
def with_metaclass(meta, base=object):
return meta(_METACLASS_, (base,), {})
class ModelOptions(object):
"""
Represents all the options associated to a model.
They are accesible using the _meta variable from a Model object
"""
def __init__(self, cls,
table_name = None,
database = None,
primary_key = True,
on_conflict = [],
unique = [],
many_to_many = False,
order = [],
propagate = False,
hypertable = []):
# Model class
self.model_class = cls
# Model name
self.name = cls.__name__.lower()
# Table name. Either set by the user or derivated from name
self.table_name = table_name.lower() if table_name else self.name
# Database to use
self.database = database
# Does the models have a primary key. If so it will be set by Kameleon
self.primary_key = primary_key
# XXX
self.on_conflict = on_conflict
# List of field which association should be unique.
# XXX #3 Today it receive a string.
# It should be receiving a list of fields
self.unique = unique
# Is this model a middle table for a many to many link
self.many_to_many = many_to_many
# Map of links represented by this table. Filled by the class
self.links = {}
# Order to respect. Useful if table not created by the ORM
self.order = order
# Should any change on a model be propagate
self.propagate = propagate
# Should the table change to hyper table.
self.hypertable = hypertable
# Map of fields
self.fields = {}
# Map of reverse relation fields
self.reverse_fields = {}
# List of fields sorted in order
self.sorted_fields = []
# Fields name sorted in order
self.sorted_fields_names = []
# Map of direct relation
self.rel = {}
# Map of reverse relation
self.reverse_rel = {}
# Map of related classes and the field associated
self.rel_class = {}
def add_field(self, field):
"""
Add a field to the class. It makes sure all related variables are
up to date
"""
if field.name in self.fields:
print("WARNING: Field {0} already in model {1}"
.format(field.name, self.table_name))
return
self.fields[field.name] = field
self.sorted_fields.append(field)
self.sorted_fields_names.append(field.name)
class BaseModel(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
if name == _METACLASS_ or bases[0].__name__ == _METACLASS_:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
# Get all variable defined in the meta class of each model.
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in meta.__dict__.items():
if not k.startswith('_'):
meta_options[k] = v
# Create Model class and its options
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
# If many to many initialize the links between the two tables.
if cls._meta.many_to_many:
links = []
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
links.append((attr, attrs[attr]))
else:
for key, value in attrs.items():
if not key.startswith('_'):
links.append((key, value))
links[0][1].related_name = links[1][0]
links[0][1].add_to_model(cls, links[0][0])
links[1][1].related_name = links[0][0]
links[1][1].add_to_model(cls, links[1][0])
# Else it is a basic model.
else:
# If primary key
if cls._meta.primary_key:
# Create primary key field
cls.id = fields.PrimaryKeyField()
# Add field to the model
cls.id.add_to_model(cls, PrimaryKeyField.name)
# Add each field to the model
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
attrs[attr].add_to_model(cls, attr)
else:
for key, value in attrs.items():
if not key.startswith('_'):
value.add_to_model(cls, key)
return cls
class Model(with_metaclass(BaseModel)):
"""
Represents a model in the database with all its fields and current values
"""
def __init__(self, **kwargs):
# Map of all fields and associated values
self.dictValues = {}
# Initialize each field. If no value set it to None
for k, v in self._meta.fields.items():
if k in kwargs:
self.dictValues[k] = kwargs[k]
setattr(self, k, kwargs[k])
else:
self.dictValues[k] = None
setattr(self, k, None)
# Set primary key to None if no value provided
if self._meta.primary_key and not "id" in self.dictValues:
self.dictValues["id"] = None
object.__setattr__(self, "id", None)
# Initialize reverse relation as empty list.
for field in self._meta.reverse_rel:
object.__setattr__(self, field, [])
if self._meta.propagate and self._meta.database.subscribe:
self._subscribe()
def | (self, name, value):
"""
Overide __setattr__ to update dict value and field value at once
"""
object.__setattr__(self, name, value)
if name in self.dictValues: # If updating a field value
if self._meta.fields[name].salt: # field is salt
# If field is already salt do nothing.
# XXX Could create a security issue. What happend is value
# starts with $2b$ but it's not encrypted. Not critical for now
if not ("$2b$" in value and value[:4] == "$2b$"):
value = bcrypt.hashpw(value.encode('utf8'), bcrypt.gensalt())
object.__setattr__(self, name, value)
# If value is an instance of model class and has a relation.
# Append it to the corresponding field list
if hasattr(value, "_meta") and self.isForeignKey(self._meta.fields[name]):
self.dictValues[name] = getattr(value, self._meta.fields[name].reference.name)
return
self.dictValues[name] = value
@classmethod
def isForeignKey(cls, _field):
"""
Is the field an instance of ForeignKeyField
"""
return isinstance(_field, fields.ForeignKeyField)
@classmethod
def isReferenceField(cls, _field):
"""
Is the field an instance of ReferenceField
"""
return isinstance(_field, fields.ReferenceField)
@classmethod
@inlineCallbacks
def create_table(cls, *args, **kwargs):
"""
Creates a table in the database.
"""
init = cls._meta.database.create_table_title(cls._meta.table_name)
i = 1
fields = zip(cls._meta.sorted_fields_names, cls._meta.sorted_fields)
for field in fields:
field_string = field[1].create_field(field[0])
if i == len(fields):
if cls._meta.unique:
init = cls._meta.database.create_unique(init, cls._meta.unique)
init = cls._meta.database.create_table_field_end(init, field_string)
if cls._meta.hypertable:
init = cls._meta.database.create_hypertable(init,
cls._meta)
else:
init = cls._meta.database.create_table_field(init, field_string)
i+=1
yield cls._meta.database.runOperation(init)
@classmethod
@inlineCallbacks
def delete_table(cls, *args, **kwargs):
"""
Deletes table from database
"""
operation = cls._meta.database.delete_table(cls._meta.table_name)
yield cls._meta.database.runOperation(operation)
@classmethod
@inlineCallbacks
def insert(cls, values):
"""
Insert a row to the table with the given values
"""
result = yield InsertQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def update(cls, values):
"""
Update values in row
"""
result = yield UpdateQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def create(cls, **kwargs):
"""
Instanciates a model class object and save it into the database.
"""
inst = cls(**kwargs)
yield inst.save()
returnValue(inst)
@classmethod
def all(cls):
"""
Get all rows from a table
"""
return SelectQuery(cls)
@classmethod
@inlineCallbacks
def add(cls, obj1, obj2):
"""
Add a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Add called on non many to many model")
query = AddQuery(cls, obj1, obj2)
yield query.execute()
if not getattr(obj1, obj2._meta.name):
setattr(obj1, obj2._meta.name, [obj2])
else:
getattr(obj1, obj2._meta.name).append(obj2)
if not getattr(obj2, obj1._meta.name):
setattr(obj2, obj1._meta.name, [obj1])
else:
getattr(obj2, obj1._meta.name).append(obj1)
@classmethod
@inlineCallbacks
def remove(cls, obj1, obj2):
"""
Remove a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Remove called on non many to many model")
query = RemoveQuery(cls, obj1, obj2)
yield query.execute()
if obj2 in getattr(obj1, obj2._meta.name):
getattr(obj1, obj2._meta.name).remove(obj2)
if obj1 in getattr(obj2, obj1._meta.name):
getattr(obj2, obj1._meta.name).remove(obj1)
@classmethod
def delete(cls):
"""
Delete a row in the database
"""
query_instance = DeleteQuery(cls)
return query_instance
@inlineCallbacks
def save(self):
"""
Save a row
"""
# For each field get the value to insert
values = {key : self._meta.fields[key].insert_format(value) for key, value in self.dictValues.items()}
if self._meta.primary_key:
# If an id exist then we should update
if self.id:
pk = yield self.update(values)
if self._meta.propagate:
self._meta.database.propagate(self)
# Else it means we should create the row
else:
# XXX To Do: What happen if insert failed. What should we return
del values["id"]
pk = yield self.insert(values)
# Update id value
self.id = pk
else:
yield self.insert(values)
def _subscribe(self):
self._meta.database.connection.subscribe(self.propagate_update, u"wamp.postgresql.propagadate.{0}".format(self._meta.name))
def propagate_update(self, dictValues):
if dictValues["id"] == self.id:
for field, value in dictValues.iteritems():
self.__setattr__(field, value)
| __setattr__ | identifier_name |
base.py | ################################################################################
# MIT License
#
# Copyright (c) 2017 Jean-Charles Fosse & Johann Bigler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import fields, collections, bcrypt
from twisted.internet.defer import inlineCallbacks, returnValue
from fields import PrimaryKeyField
from query import SelectQuery, \
InsertQuery, \
AddQuery, \
RemoveQuery, \
UpdateQuery, \
DeleteQuery
"""
Metaclass enables to have a set of variable for each class Model.
This set of variable is represented by the class ModelOptions
"""
_METACLASS_ = '_metaclass_helper_'
def with_metaclass(meta, base=object):
return meta(_METACLASS_, (base,), {})
class ModelOptions(object):
"""
Represents all the options associated to a model.
They are accesible using the _meta variable from a Model object
"""
def __init__(self, cls,
table_name = None,
database = None,
primary_key = True,
on_conflict = [],
unique = [],
many_to_many = False,
order = [],
propagate = False,
hypertable = []):
# Model class
self.model_class = cls
# Model name
self.name = cls.__name__.lower()
# Table name. Either set by the user or derivated from name
self.table_name = table_name.lower() if table_name else self.name
# Database to use
self.database = database
# Does the models have a primary key. If so it will be set by Kameleon
self.primary_key = primary_key
# XXX
self.on_conflict = on_conflict
# List of field which association should be unique.
# XXX #3 Today it receive a string.
# It should be receiving a list of fields
self.unique = unique
# Is this model a middle table for a many to many link
self.many_to_many = many_to_many
# Map of links represented by this table. Filled by the class
self.links = {}
# Order to respect. Useful if table not created by the ORM
self.order = order
# Should any change on a model be propagate
self.propagate = propagate
# Should the table change to hyper table.
self.hypertable = hypertable
# Map of fields
self.fields = {}
# Map of reverse relation fields
self.reverse_fields = {}
# List of fields sorted in order
self.sorted_fields = []
# Fields name sorted in order
self.sorted_fields_names = []
# Map of direct relation
self.rel = {}
# Map of reverse relation
self.reverse_rel = {}
# Map of related classes and the field associated
self.rel_class = {}
def add_field(self, field):
"""
Add a field to the class. It makes sure all related variables are
up to date
"""
if field.name in self.fields:
print("WARNING: Field {0} already in model {1}"
.format(field.name, self.table_name))
return
self.fields[field.name] = field
self.sorted_fields.append(field)
self.sorted_fields_names.append(field.name)
class BaseModel(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
if name == _METACLASS_ or bases[0].__name__ == _METACLASS_:
return super(BaseModel, cls).__new__(cls, name, bases, attrs)
# Get all variable defined in the meta class of each model.
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in meta.__dict__.items():
if not k.startswith('_'):
meta_options[k] = v
# Create Model class and its options
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
cls._meta = ModelOptions(cls, **meta_options)
# If many to many initialize the links between the two tables.
if cls._meta.many_to_many:
links = []
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
links.append((attr, attrs[attr]))
else:
for key, value in attrs.items():
if not key.startswith('_'):
links.append((key, value))
links[0][1].related_name = links[1][0]
links[0][1].add_to_model(cls, links[0][0])
links[1][1].related_name = links[0][0]
links[1][1].add_to_model(cls, links[1][0])
# Else it is a basic model.
else:
# If primary key
if cls._meta.primary_key:
# Create primary key field
cls.id = fields.PrimaryKeyField()
# Add field to the model
cls.id.add_to_model(cls, PrimaryKeyField.name)
# Add each field to the model
if cls._meta.order:
for attr in cls._meta.order:
if attr in attrs:
attrs[attr].add_to_model(cls, attr)
else:
for key, value in attrs.items():
if not key.startswith('_'):
value.add_to_model(cls, key)
return cls
class Model(with_metaclass(BaseModel)):
"""
Represents a model in the database with all its fields and current values
"""
def __init__(self, **kwargs):
# Map of all fields and associated values
self.dictValues = {}
# Initialize each field. If no value set it to None
for k, v in self._meta.fields.items():
if k in kwargs:
self.dictValues[k] = kwargs[k]
setattr(self, k, kwargs[k])
else:
self.dictValues[k] = None
setattr(self, k, None)
# Set primary key to None if no value provided
if self._meta.primary_key and not "id" in self.dictValues:
self.dictValues["id"] = None
object.__setattr__(self, "id", None)
# Initialize reverse relation as empty list.
for field in self._meta.reverse_rel:
object.__setattr__(self, field, [])
if self._meta.propagate and self._meta.database.subscribe:
self._subscribe()
def __setattr__(self, name, value):
"""
Overide __setattr__ to update dict value and field value at once
"""
object.__setattr__(self, name, value)
if name in self.dictValues: # If updating a field value
if self._meta.fields[name].salt: # field is salt
# If field is already salt do nothing.
# XXX Could create a security issue. What happend is value
# starts with $2b$ but it's not encrypted. Not critical for now
if not ("$2b$" in value and value[:4] == "$2b$"):
value = bcrypt.hashpw(value.encode('utf8'), bcrypt.gensalt())
object.__setattr__(self, name, value)
# If value is an instance of model class and has a relation.
# Append it to the corresponding field list
if hasattr(value, "_meta") and self.isForeignKey(self._meta.fields[name]):
self.dictValues[name] = getattr(value, self._meta.fields[name].reference.name)
return
self.dictValues[name] = value
@classmethod
def isForeignKey(cls, _field):
"""
Is the field an instance of ForeignKeyField
"""
return isinstance(_field, fields.ForeignKeyField)
@classmethod
def isReferenceField(cls, _field):
"""
Is the field an instance of ReferenceField
"""
return isinstance(_field, fields.ReferenceField)
@classmethod
@inlineCallbacks
def create_table(cls, *args, **kwargs):
"""
Creates a table in the database.
"""
init = cls._meta.database.create_table_title(cls._meta.table_name)
i = 1
fields = zip(cls._meta.sorted_fields_names, cls._meta.sorted_fields)
for field in fields:
field_string = field[1].create_field(field[0])
if i == len(fields):
if cls._meta.unique:
init = cls._meta.database.create_unique(init, cls._meta.unique)
init = cls._meta.database.create_table_field_end(init, field_string)
if cls._meta.hypertable:
init = cls._meta.database.create_hypertable(init,
cls._meta)
else:
init = cls._meta.database.create_table_field(init, field_string)
i+=1
yield cls._meta.database.runOperation(init)
@classmethod
@inlineCallbacks
def delete_table(cls, *args, **kwargs):
"""
Deletes table from database
"""
operation = cls._meta.database.delete_table(cls._meta.table_name)
yield cls._meta.database.runOperation(operation)
@classmethod
@inlineCallbacks
def insert(cls, values):
"""
Insert a row to the table with the given values
"""
result = yield InsertQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def update(cls, values):
"""
Update values in row
"""
result = yield UpdateQuery(cls, values).execute()
returnValue(result)
@classmethod
@inlineCallbacks
def create(cls, **kwargs):
"""
Instanciates a model class object and save it into the database.
"""
inst = cls(**kwargs)
yield inst.save()
returnValue(inst)
@classmethod
def all(cls):
"""
Get all rows from a table
"""
return SelectQuery(cls)
@classmethod
@inlineCallbacks
def add(cls, obj1, obj2):
"""
Add a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Add called on non many to many model")
query = AddQuery(cls, obj1, obj2)
yield query.execute()
if not getattr(obj1, obj2._meta.name):
setattr(obj1, obj2._meta.name, [obj2])
else:
getattr(obj1, obj2._meta.name).append(obj2)
if not getattr(obj2, obj1._meta.name):
setattr(obj2, obj1._meta.name, [obj1])
else:
getattr(obj2, obj1._meta.name).append(obj1)
@classmethod
@inlineCallbacks
def remove(cls, obj1, obj2):
"""
Remove a link between two model
"""
if not cls._meta.many_to_many:
raise Exception("ERROR: Remove called on non many to many model")
query = RemoveQuery(cls, obj1, obj2)
yield query.execute()
if obj2 in getattr(obj1, obj2._meta.name):
getattr(obj1, obj2._meta.name).remove(obj2)
if obj1 in getattr(obj2, obj1._meta.name):
getattr(obj2, obj1._meta.name).remove(obj1)
@classmethod
def delete(cls):
"""
Delete a row in the database
"""
query_instance = DeleteQuery(cls)
return query_instance
@inlineCallbacks
def save(self):
"""
Save a row
"""
# For each field get the value to insert
values = {key : self._meta.fields[key].insert_format(value) for key, value in self.dictValues.items()}
if self._meta.primary_key:
# If an id exist then we should update
if self.id:
pk = yield self.update(values)
if self._meta.propagate:
self._meta.database.propagate(self)
# Else it means we should create the row
else:
# XXX To Do: What happen if insert failed. What should we return
del values["id"]
pk = yield self.insert(values)
# Update id value
self.id = pk
else:
yield self.insert(values)
def _subscribe(self):
self._meta.database.connection.subscribe(self.propagate_update, u"wamp.postgresql.propagadate.{0}".format(self._meta.name)) | if dictValues["id"] == self.id:
for field, value in dictValues.iteritems():
self.__setattr__(field, value) |
def propagate_update(self, dictValues): | random_line_split |
rca.rs | // Optimization for RCA
// Ordinarily, just a, b, c, and d are scanned separately and then combined by joins.
// a: (each product, each city) // can be cut on drill 1
// b: (all products, each city)
// c: (each product, all cities) // can be cut on drill 1
// d: (all products, all cities)
//
// Note that external cuts are always valid (i.e. if above abcd were cut by a year).
//
// However, this results in extra scans, especially if there's no internal cuts (cuts on an rca
// drill dim).
//
// The optimization is to derive the c and d aggregates from a and b. Since cuts are allowed on the
// first drill in the rca, both a and b have to be scanned (b cannot be cut on the first drill).
//
// In clickhouse there is no partition, so it's trickier to do what looks like two different group
// by.
//
// The general idea is to do one group by, in which both the measure and the 2nd drill are rolled
// up.
// - measure is rolled up by aggregate fn (e.g. sum)
// - 2nd drill is rolled up by groupArray, which just collects all the values into an array in
// order.
// - the original measure is also rolled up by groupArray.
//
// Then the pivoted table is melted using Array Join on the 2nd drill and the original measure
// (which would be a or c), while preserving the aggregated measure (c or d) from the pivoted
// table.
//
// An example (not accounting for external cuts or dims) would be
// select drill_1_id, drill_2_id, a, c from (
// select drill_1_id, groupArray(drill_2_id) as drill_2_id_s, groupArray(a) a_s, sum(a) as c from (
// select * from a_table
// )
// group by drill_1_id
// )
// array join drill_2_id_s as drill_2_id, a_s as a
use itertools::join;
use crate::sql::primary_agg::primary_agg;
use super::{
TableSql,
CutSql,
DrilldownSql,
MeasureSql,
RcaSql,
};
pub fn | (
table: &TableSql,
cuts: &[CutSql],
drills: &[DrilldownSql],
meas: &[MeasureSql],
rca: &RcaSql,
) -> (String, String)
{
// append the correct rca drill to drilldowns
// for a, both
// for b, d2
// for c, d1
// for d, none
let mut a_drills = drills.to_vec();
let mut b_drills = drills.to_vec();
let mut c_drills = drills.to_vec();
let d_drills = drills.to_vec();
a_drills.extend_from_slice(&rca.drill_1);
a_drills.extend_from_slice(&rca.drill_2);
b_drills.extend_from_slice(&rca.drill_2);
c_drills.extend_from_slice(&rca.drill_1);
println!("a: {:?}", a_drills);
println!("b: {:?}", b_drills);
println!("c: {:?}", c_drills);
println!("d: {:?}", d_drills);
// prepend the rca sql to meas
let all_meas = {
let mut temp = vec![rca.mea.clone()];
temp.extend_from_slice(meas);
temp
};
// for cuts,
// - a can be cut on d1 and ext
// - b cannot be int cut, only ext
// - c can be cut on d1 and ext
// - d cannot be int cut, only ext
//
// In the future, would I allow more cuts? Maybe depending on use case
//
// The blacklist is the drilldowns contained in each of a, b, c, d
//
// Note: parent of rca drills are not filtered, because they are meant
// to limit the rca calculation space!
//
// don't need to worry about aliases, because cuts don't use aliases,
// and are just matching against drill key col
let ac_cut_cols_blacklist: Vec<_> = rca.drill_2.iter()
.flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone()))
.collect();
let bd_cut_cols_blacklist: Vec<_> = rca.drill_1.iter().chain(rca.drill_2.iter())
.flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone()))
.collect();
let ac_cuts: Vec<_> = cuts.iter()
.filter(|cut| {
ac_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none()
})
.cloned()
.collect();
let bd_cuts: Vec<_> = cuts.iter()
.filter(|cut| {
bd_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none()
})
.cloned()
.collect();
println!("{:#?}", cuts);
println!("{:#?}", ac_cuts);
println!("{:#?}", bd_cuts);
// now aggregate each component
//
// As an optimization, c is calculated from a, and d is calculated from b
// If there's no internal cuts, then b, c, d are calculated from a.
// First do aggregation for part a, b
let (a, a_final_drills) = primary_agg(table, &ac_cuts, &a_drills, &all_meas, None);
let (b, b_final_drills) = primary_agg(table, &bd_cuts, &b_drills, &all_meas, None);
// replace final_m0 with letter name.
// I put the rca measure at the beginning of the drills, so it should
// always be m0
let a = a.replace("final_m0", "a");
let b = b.replace("final_m0", "b");
// for clickhouse, need to make groupArray and Array Join clauses for drill_1 for when
// aggregating a to c, and b to d.
// (drill_2 would be needed if going from a to b)
// TODO refacto these lines out to helpers
let group_array_rca_drill_2 = rca.drill_2.iter()
.flat_map(|d| {
let alias_postfix = &d.alias_postfix;
d.level_columns.iter().map(move |l| {
if let Some(ref name_col) = l.name_column {
format!("groupArray({key_col}_{alias_postfix}) as {key_col}_{alias_postfix}_s, groupArray({name_col}_{alias_postfix}) as {name_col}_{alias_postfix}_s", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix)
} else {
format!("groupArray({col}_{alias_postfix}) as {col}_{alias_postfix}_s", col=l.key_column, alias_postfix=alias_postfix)
}
})
});
let group_array_rca_drill_2 = join(group_array_rca_drill_2, ", ");
let join_array_rca_drill_2 = rca.drill_2.iter()
.flat_map(|d| {
let alias_postfix = &d.alias_postfix;
d.level_columns.iter().map(move |l| {
if let Some(ref name_col) = l.name_column {
format!("{key_col}_{alias_postfix}_s as {key_col}_{alias_postfix}, {name_col}_{alias_postfix}_s as {name_col}_{alias_postfix}", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix)
} else {
format!("{col}_{alias_postfix}_s as {col}_{alias_postfix}", col=l.key_column, alias_postfix=alias_postfix)
}
})
});
let join_array_rca_drill_2 = join(join_array_rca_drill_2, ", ");
// Do GroupArray and Array Join clauses for external measures, also
let mea_cols = (1..=meas.len())
.map(|m_idx| format!("final_m{col}", col=m_idx));
let mea_cols = join(mea_cols, ", ");
let group_array_ext_mea = (1..=meas.len())
.map(|m_idx| format!("groupArray(final_m{col}) as final_m{col}_s", col=m_idx));
let group_array_ext_mea = join(group_array_ext_mea, ", ");
let join_array_ext_mea = (1..=meas.len())
.map(|m_idx| format!("final_m{col}_s as final_m{col}", col=m_idx));
let join_array_ext_mea = join(join_array_ext_mea, ", ");
// groupArray cols (the drill_2 from rca) can't be included in the group by or select
let c_drills_minus_rca_drill_2 = c_drills.iter()
.filter(|d| !rca.drill_2.contains(&d))
.map(|d| d.col_alias_only_string());
let c_drills_minus_rca_drill_2 = join(c_drills_minus_rca_drill_2, ", ");
let d_drills_minus_rca_drill_2 = d_drills.iter()
.filter(|d| !rca.drill_2.contains(&d))
.map(|d| d.col_alias_only_string());
let d_drills_minus_rca_drill_2 = join(d_drills_minus_rca_drill_2, ", ");
// a and c drills are kept as-is
let a_drills_str = a_drills.iter()
.map(|d| d.col_alias_only_string());
let a_drills_str = join(a_drills_str, ", ");
let b_drills_str = b_drills.iter()
.map(|d| d.col_alias_only_string());
let b_drills_str = join(b_drills_str, ", ");
// Now add part c
let ac = format!("select {}, {}{} a, c from \
(select {}, {}, {}{} groupArray(a) as a_s, sum(a) as c from ({}) group by {}) \
Array Join {}, {}{} a_s as a",
a_drills_str,
mea_cols,
if mea_cols.is_empty() { "" } else { "," },
c_drills_minus_rca_drill_2,
group_array_rca_drill_2,
group_array_ext_mea,
if group_array_ext_mea.is_empty() { "" } else { "," },
a,
c_drills_minus_rca_drill_2,
join_array_rca_drill_2,
join_array_ext_mea,
if join_array_ext_mea.is_empty() { "" } else { "," },
);
println!("{}", ac);
// Now add part d
let bd = if d_drills.is_empty() {
format!("select {}, b, d from \
(select {}, groupArray(b) as b_s, sum(b) as d from ({})) \
Array Join {}, b_s as b",
b_drills_str,
group_array_rca_drill_2,
b,
join_array_rca_drill_2,
)
} else {
format!("select {}, b, d from \
(select {}, {}, groupArray(b) as b_s, sum(b) as d from ({}) group by {}) \
Array Join {}, b_s as b",
b_drills_str,
d_drills_minus_rca_drill_2,
group_array_rca_drill_2,
b,
d_drills_minus_rca_drill_2,
join_array_rca_drill_2,
)
};
println!("bd: {}", bd);
// now do the final join
let mut final_sql = format!("select * from ({}) all inner join ({}) using {}",
ac,
bd,
b_final_drills,
);
// adding final measures at the end
let final_ext_meas = if !meas.is_empty() {
", ".to_owned() + &join((1..meas.len()+1).map(|i| format!("final_m{}", i)), ", ")
} else {
"".to_owned()
};
final_sql = format!("select {}, {}((a/b) / (c/d)) as rca{} from ({})",
a_final_drills,
if rca.debug { "a, b, c, d, " } else { "" },
final_ext_meas,
final_sql,
);
// SPECIAL CASE
// Hack to deal with no drills on d
// Later, make this better
final_sql = final_sql.replace("select , ", "select ");
final_sql = final_sql.replace("group by )", ")");
(final_sql, a_final_drills)
}
| calculate | identifier_name |
rca.rs | // Optimization for RCA
// Ordinarily, just a, b, c, and d are scanned separately and then combined by joins.
// a: (each product, each city) // can be cut on drill 1
// b: (all products, each city)
// c: (each product, all cities) // can be cut on drill 1
// d: (all products, all cities)
//
// Note that external cuts are always valid (i.e. if above abcd were cut by a year).
//
// However, this results in extra scans, especially if there's no internal cuts (cuts on an rca
// drill dim).
//
// The optimization is to derive the c and d aggregates from a and b. Since cuts are allowed on the
// first drill in the rca, both a and b have to be scanned (b cannot be cut on the first drill).
//
// In clickhouse there is no partition, so it's trickier to do what looks like two different group
// by.
//
// The general idea is to do one group by, in which both the measure and the 2nd drill are rolled
// up.
// - measure is rolled up by aggregate fn (e.g. sum)
// - 2nd drill is rolled up by groupArray, which just collects all the values into an array in
// order.
// - the original measure is also rolled up by groupArray.
//
// Then the pivoted table is melted using Array Join on the 2nd drill and the original measure
// (which would be a or c), while preserving the aggregated measure (c or d) from the pivoted
// table.
//
// An example (not accounting for external cuts or dims) would be
// select drill_1_id, drill_2_id, a, c from (
// select drill_1_id, groupArray(drill_2_id) as drill_2_id_s, groupArray(a) a_s, sum(a) as c from (
// select * from a_table
// )
// group by drill_1_id
// )
// array join drill_2_id_s as drill_2_id, a_s as a
use itertools::join;
use crate::sql::primary_agg::primary_agg;
use super::{
TableSql,
CutSql,
DrilldownSql,
MeasureSql,
RcaSql,
};
pub fn calculate(
table: &TableSql,
cuts: &[CutSql],
drills: &[DrilldownSql],
meas: &[MeasureSql],
rca: &RcaSql,
) -> (String, String)
{
// append the correct rca drill to drilldowns
// for a, both
// for b, d2
// for c, d1
// for d, none
let mut a_drills = drills.to_vec();
let mut b_drills = drills.to_vec();
let mut c_drills = drills.to_vec();
let d_drills = drills.to_vec();
a_drills.extend_from_slice(&rca.drill_1);
a_drills.extend_from_slice(&rca.drill_2);
b_drills.extend_from_slice(&rca.drill_2);
c_drills.extend_from_slice(&rca.drill_1);
println!("a: {:?}", a_drills);
println!("b: {:?}", b_drills);
println!("c: {:?}", c_drills);
println!("d: {:?}", d_drills);
// prepend the rca sql to meas
let all_meas = {
let mut temp = vec![rca.mea.clone()];
temp.extend_from_slice(meas);
temp
};
// for cuts,
// - a can be cut on d1 and ext
// - b cannot be int cut, only ext
// - c can be cut on d1 and ext
// - d cannot be int cut, only ext
//
// In the future, would I allow more cuts? Maybe depending on use case
//
// The blacklist is the drilldowns contained in each of a, b, c, d
//
// Note: parent of rca drills are not filtered, because they are meant
// to limit the rca calculation space!
//
// don't need to worry about aliases, because cuts don't use aliases,
// and are just matching against drill key col
let ac_cut_cols_blacklist: Vec<_> = rca.drill_2.iter()
.flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone()))
.collect();
let bd_cut_cols_blacklist: Vec<_> = rca.drill_1.iter().chain(rca.drill_2.iter())
.flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone()))
.collect();
let ac_cuts: Vec<_> = cuts.iter()
.filter(|cut| {
ac_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none()
})
.cloned()
.collect();
let bd_cuts: Vec<_> = cuts.iter()
.filter(|cut| {
bd_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none()
})
.cloned()
.collect();
println!("{:#?}", cuts);
println!("{:#?}", ac_cuts);
println!("{:#?}", bd_cuts);
// now aggregate each component
//
// As an optimization, c is calculated from a, and d is calculated from b
// If there's no internal cuts, then b, c, d are calculated from a.
// First do aggregation for part a, b
let (a, a_final_drills) = primary_agg(table, &ac_cuts, &a_drills, &all_meas, None);
let (b, b_final_drills) = primary_agg(table, &bd_cuts, &b_drills, &all_meas, None);
// replace final_m0 with letter name.
// I put the rca measure at the beginning of the drills, so it should
// always be m0
let a = a.replace("final_m0", "a");
let b = b.replace("final_m0", "b");
// for clickhouse, need to make groupArray and Array Join clauses for drill_1 for when
// aggregating a to c, and b to d.
// (drill_2 would be needed if going from a to b)
// TODO refacto these lines out to helpers
let group_array_rca_drill_2 = rca.drill_2.iter()
.flat_map(|d| {
let alias_postfix = &d.alias_postfix;
d.level_columns.iter().map(move |l| {
if let Some(ref name_col) = l.name_column | else {
format!("groupArray({col}_{alias_postfix}) as {col}_{alias_postfix}_s", col=l.key_column, alias_postfix=alias_postfix)
}
})
});
let group_array_rca_drill_2 = join(group_array_rca_drill_2, ", ");
let join_array_rca_drill_2 = rca.drill_2.iter()
.flat_map(|d| {
let alias_postfix = &d.alias_postfix;
d.level_columns.iter().map(move |l| {
if let Some(ref name_col) = l.name_column {
format!("{key_col}_{alias_postfix}_s as {key_col}_{alias_postfix}, {name_col}_{alias_postfix}_s as {name_col}_{alias_postfix}", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix)
} else {
format!("{col}_{alias_postfix}_s as {col}_{alias_postfix}", col=l.key_column, alias_postfix=alias_postfix)
}
})
});
let join_array_rca_drill_2 = join(join_array_rca_drill_2, ", ");
// Do GroupArray and Array Join clauses for external measures, also
let mea_cols = (1..=meas.len())
.map(|m_idx| format!("final_m{col}", col=m_idx));
let mea_cols = join(mea_cols, ", ");
let group_array_ext_mea = (1..=meas.len())
.map(|m_idx| format!("groupArray(final_m{col}) as final_m{col}_s", col=m_idx));
let group_array_ext_mea = join(group_array_ext_mea, ", ");
let join_array_ext_mea = (1..=meas.len())
.map(|m_idx| format!("final_m{col}_s as final_m{col}", col=m_idx));
let join_array_ext_mea = join(join_array_ext_mea, ", ");
// groupArray cols (the drill_2 from rca) can't be included in the group by or select
let c_drills_minus_rca_drill_2 = c_drills.iter()
.filter(|d| !rca.drill_2.contains(&d))
.map(|d| d.col_alias_only_string());
let c_drills_minus_rca_drill_2 = join(c_drills_minus_rca_drill_2, ", ");
let d_drills_minus_rca_drill_2 = d_drills.iter()
.filter(|d| !rca.drill_2.contains(&d))
.map(|d| d.col_alias_only_string());
let d_drills_minus_rca_drill_2 = join(d_drills_minus_rca_drill_2, ", ");
// a and c drills are kept as-is
let a_drills_str = a_drills.iter()
.map(|d| d.col_alias_only_string());
let a_drills_str = join(a_drills_str, ", ");
let b_drills_str = b_drills.iter()
.map(|d| d.col_alias_only_string());
let b_drills_str = join(b_drills_str, ", ");
// Now add part c
let ac = format!("select {}, {}{} a, c from \
(select {}, {}, {}{} groupArray(a) as a_s, sum(a) as c from ({}) group by {}) \
Array Join {}, {}{} a_s as a",
a_drills_str,
mea_cols,
if mea_cols.is_empty() { "" } else { "," },
c_drills_minus_rca_drill_2,
group_array_rca_drill_2,
group_array_ext_mea,
if group_array_ext_mea.is_empty() { "" } else { "," },
a,
c_drills_minus_rca_drill_2,
join_array_rca_drill_2,
join_array_ext_mea,
if join_array_ext_mea.is_empty() { "" } else { "," },
);
println!("{}", ac);
// Now add part d
let bd = if d_drills.is_empty() {
format!("select {}, b, d from \
(select {}, groupArray(b) as b_s, sum(b) as d from ({})) \
Array Join {}, b_s as b",
b_drills_str,
group_array_rca_drill_2,
b,
join_array_rca_drill_2,
)
} else {
format!("select {}, b, d from \
(select {}, {}, groupArray(b) as b_s, sum(b) as d from ({}) group by {}) \
Array Join {}, b_s as b",
b_drills_str,
d_drills_minus_rca_drill_2,
group_array_rca_drill_2,
b,
d_drills_minus_rca_drill_2,
join_array_rca_drill_2,
)
};
println!("bd: {}", bd);
// now do the final join
let mut final_sql = format!("select * from ({}) all inner join ({}) using {}",
ac,
bd,
b_final_drills,
);
// adding final measures at the end
let final_ext_meas = if !meas.is_empty() {
", ".to_owned() + &join((1..meas.len()+1).map(|i| format!("final_m{}", i)), ", ")
} else {
"".to_owned()
};
final_sql = format!("select {}, {}((a/b) / (c/d)) as rca{} from ({})",
a_final_drills,
if rca.debug { "a, b, c, d, " } else { "" },
final_ext_meas,
final_sql,
);
// SPECIAL CASE
// Hack to deal with no drills on d
// Later, make this better
final_sql = final_sql.replace("select , ", "select ");
final_sql = final_sql.replace("group by )", ")");
(final_sql, a_final_drills)
}
| {
format!("groupArray({key_col}_{alias_postfix}) as {key_col}_{alias_postfix}_s, groupArray({name_col}_{alias_postfix}) as {name_col}_{alias_postfix}_s", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix)
} | conditional_block |
rca.rs | // Optimization for RCA
// Ordinarily, just a, b, c, and d are scanned separately and then combined by joins.
// a: (each product, each city) // can be cut on drill 1
// b: (all products, each city)
// c: (each product, all cities) // can be cut on drill 1
// d: (all products, all cities)
//
// Note that external cuts are always valid (i.e. if above abcd were cut by a year).
//
// However, this results in extra scans, especially if there's no internal cuts (cuts on an rca | //
// The optimization is to derive the c and d aggregates from a and b. Since cuts are allowed on the
// first drill in the rca, both a and b have to be scanned (b cannot be cut on the first drill).
//
// In clickhouse there is no partition, so it's trickier to do what looks like two different group
// by.
//
// The general idea is to do one group by, in which both the measure and the 2nd drill are rolled
// up.
// - measure is rolled up by aggregate fn (e.g. sum)
// - 2nd drill is rolled up by groupArray, which just collects all the values into an array in
// order.
// - the original measure is also rolled up by groupArray.
//
// Then the pivoted table is melted using Array Join on the 2nd drill and the original measure
// (which would be a or c), while preserving the aggregated measure (c or d) from the pivoted
// table.
//
// An example (not accounting for external cuts or dims) would be
// select drill_1_id, drill_2_id, a, c from (
// select drill_1_id, groupArray(drill_2_id) as drill_2_id_s, groupArray(a) a_s, sum(a) as c from (
// select * from a_table
// )
// group by drill_1_id
// )
// array join drill_2_id_s as drill_2_id, a_s as a
use itertools::join;
use crate::sql::primary_agg::primary_agg;
use super::{
TableSql,
CutSql,
DrilldownSql,
MeasureSql,
RcaSql,
};
pub fn calculate(
table: &TableSql,
cuts: &[CutSql],
drills: &[DrilldownSql],
meas: &[MeasureSql],
rca: &RcaSql,
) -> (String, String)
{
// append the correct rca drill to drilldowns
// for a, both
// for b, d2
// for c, d1
// for d, none
let mut a_drills = drills.to_vec();
let mut b_drills = drills.to_vec();
let mut c_drills = drills.to_vec();
let d_drills = drills.to_vec();
a_drills.extend_from_slice(&rca.drill_1);
a_drills.extend_from_slice(&rca.drill_2);
b_drills.extend_from_slice(&rca.drill_2);
c_drills.extend_from_slice(&rca.drill_1);
println!("a: {:?}", a_drills);
println!("b: {:?}", b_drills);
println!("c: {:?}", c_drills);
println!("d: {:?}", d_drills);
// prepend the rca sql to meas
let all_meas = {
let mut temp = vec![rca.mea.clone()];
temp.extend_from_slice(meas);
temp
};
// for cuts,
// - a can be cut on d1 and ext
// - b cannot be int cut, only ext
// - c can be cut on d1 and ext
// - d cannot be int cut, only ext
//
// In the future, would I allow more cuts? Maybe depending on use case
//
// The blacklist is the drilldowns contained in each of a, b, c, d
//
// Note: parent of rca drills are not filtered, because they are meant
// to limit the rca calculation space!
//
// don't need to worry about aliases, because cuts don't use aliases,
// and are just matching against drill key col
let ac_cut_cols_blacklist: Vec<_> = rca.drill_2.iter()
.flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone()))
.collect();
let bd_cut_cols_blacklist: Vec<_> = rca.drill_1.iter().chain(rca.drill_2.iter())
.flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone()))
.collect();
let ac_cuts: Vec<_> = cuts.iter()
.filter(|cut| {
ac_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none()
})
.cloned()
.collect();
let bd_cuts: Vec<_> = cuts.iter()
.filter(|cut| {
bd_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none()
})
.cloned()
.collect();
println!("{:#?}", cuts);
println!("{:#?}", ac_cuts);
println!("{:#?}", bd_cuts);
// now aggregate each component
//
// As an optimization, c is calculated from a, and d is calculated from b
// If there's no internal cuts, then b, c, d are calculated from a.
// First do aggregation for part a, b
let (a, a_final_drills) = primary_agg(table, &ac_cuts, &a_drills, &all_meas, None);
let (b, b_final_drills) = primary_agg(table, &bd_cuts, &b_drills, &all_meas, None);
// replace final_m0 with letter name.
// I put the rca measure at the beginning of the drills, so it should
// always be m0
let a = a.replace("final_m0", "a");
let b = b.replace("final_m0", "b");
// for clickhouse, need to make groupArray and Array Join clauses for drill_1 for when
// aggregating a to c, and b to d.
// (drill_2 would be needed if going from a to b)
// TODO refacto these lines out to helpers
let group_array_rca_drill_2 = rca.drill_2.iter()
.flat_map(|d| {
let alias_postfix = &d.alias_postfix;
d.level_columns.iter().map(move |l| {
if let Some(ref name_col) = l.name_column {
format!("groupArray({key_col}_{alias_postfix}) as {key_col}_{alias_postfix}_s, groupArray({name_col}_{alias_postfix}) as {name_col}_{alias_postfix}_s", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix)
} else {
format!("groupArray({col}_{alias_postfix}) as {col}_{alias_postfix}_s", col=l.key_column, alias_postfix=alias_postfix)
}
})
});
let group_array_rca_drill_2 = join(group_array_rca_drill_2, ", ");
let join_array_rca_drill_2 = rca.drill_2.iter()
.flat_map(|d| {
let alias_postfix = &d.alias_postfix;
d.level_columns.iter().map(move |l| {
if let Some(ref name_col) = l.name_column {
format!("{key_col}_{alias_postfix}_s as {key_col}_{alias_postfix}, {name_col}_{alias_postfix}_s as {name_col}_{alias_postfix}", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix)
} else {
format!("{col}_{alias_postfix}_s as {col}_{alias_postfix}", col=l.key_column, alias_postfix=alias_postfix)
}
})
});
let join_array_rca_drill_2 = join(join_array_rca_drill_2, ", ");
// Do GroupArray and Array Join clauses for external measures, also
let mea_cols = (1..=meas.len())
.map(|m_idx| format!("final_m{col}", col=m_idx));
let mea_cols = join(mea_cols, ", ");
let group_array_ext_mea = (1..=meas.len())
.map(|m_idx| format!("groupArray(final_m{col}) as final_m{col}_s", col=m_idx));
let group_array_ext_mea = join(group_array_ext_mea, ", ");
let join_array_ext_mea = (1..=meas.len())
.map(|m_idx| format!("final_m{col}_s as final_m{col}", col=m_idx));
let join_array_ext_mea = join(join_array_ext_mea, ", ");
// groupArray cols (the drill_2 from rca) can't be included in the group by or select
let c_drills_minus_rca_drill_2 = c_drills.iter()
.filter(|d| !rca.drill_2.contains(&d))
.map(|d| d.col_alias_only_string());
let c_drills_minus_rca_drill_2 = join(c_drills_minus_rca_drill_2, ", ");
let d_drills_minus_rca_drill_2 = d_drills.iter()
.filter(|d| !rca.drill_2.contains(&d))
.map(|d| d.col_alias_only_string());
let d_drills_minus_rca_drill_2 = join(d_drills_minus_rca_drill_2, ", ");
// a and c drills are kept as-is
let a_drills_str = a_drills.iter()
.map(|d| d.col_alias_only_string());
let a_drills_str = join(a_drills_str, ", ");
let b_drills_str = b_drills.iter()
.map(|d| d.col_alias_only_string());
let b_drills_str = join(b_drills_str, ", ");
// Now add part c
let ac = format!("select {}, {}{} a, c from \
(select {}, {}, {}{} groupArray(a) as a_s, sum(a) as c from ({}) group by {}) \
Array Join {}, {}{} a_s as a",
a_drills_str,
mea_cols,
if mea_cols.is_empty() { "" } else { "," },
c_drills_minus_rca_drill_2,
group_array_rca_drill_2,
group_array_ext_mea,
if group_array_ext_mea.is_empty() { "" } else { "," },
a,
c_drills_minus_rca_drill_2,
join_array_rca_drill_2,
join_array_ext_mea,
if join_array_ext_mea.is_empty() { "" } else { "," },
);
println!("{}", ac);
// Now add part d
let bd = if d_drills.is_empty() {
format!("select {}, b, d from \
(select {}, groupArray(b) as b_s, sum(b) as d from ({})) \
Array Join {}, b_s as b",
b_drills_str,
group_array_rca_drill_2,
b,
join_array_rca_drill_2,
)
} else {
format!("select {}, b, d from \
(select {}, {}, groupArray(b) as b_s, sum(b) as d from ({}) group by {}) \
Array Join {}, b_s as b",
b_drills_str,
d_drills_minus_rca_drill_2,
group_array_rca_drill_2,
b,
d_drills_minus_rca_drill_2,
join_array_rca_drill_2,
)
};
println!("bd: {}", bd);
// now do the final join
let mut final_sql = format!("select * from ({}) all inner join ({}) using {}",
ac,
bd,
b_final_drills,
);
// adding final measures at the end
let final_ext_meas = if !meas.is_empty() {
", ".to_owned() + &join((1..meas.len()+1).map(|i| format!("final_m{}", i)), ", ")
} else {
"".to_owned()
};
final_sql = format!("select {}, {}((a/b) / (c/d)) as rca{} from ({})",
a_final_drills,
if rca.debug { "a, b, c, d, " } else { "" },
final_ext_meas,
final_sql,
);
// SPECIAL CASE
// Hack to deal with no drills on d
// Later, make this better
final_sql = final_sql.replace("select , ", "select ");
final_sql = final_sql.replace("group by )", ")");
(final_sql, a_final_drills)
} | // drill dim). | random_line_split |
rca.rs | // Optimization for RCA
// Ordinarily, just a, b, c, and d are scanned separately and then combined by joins.
// a: (each product, each city) // can be cut on drill 1
// b: (all products, each city)
// c: (each product, all cities) // can be cut on drill 1
// d: (all products, all cities)
//
// Note that external cuts are always valid (i.e. if above abcd were cut by a year).
//
// However, this results in extra scans, especially if there's no internal cuts (cuts on an rca
// drill dim).
//
// The optimization is to derive the c and d aggregates from a and b. Since cuts are allowed on the
// first drill in the rca, both a and b have to be scanned (b cannot be cut on the first drill).
//
// In clickhouse there is no partition, so it's trickier to do what looks like two different group
// by.
//
// The general idea is to do one group by, in which both the measure and the 2nd drill are rolled
// up.
// - measure is rolled up by aggregate fn (e.g. sum)
// - 2nd drill is rolled up by groupArray, which just collects all the values into an array in
// order.
// - the original measure is also rolled up by groupArray.
//
// Then the pivoted table is melted using Array Join on the 2nd drill and the original measure
// (which would be a or c), while preserving the aggregated measure (c or d) from the pivoted
// table.
//
// An example (not accounting for external cuts or dims) would be
// select drill_1_id, drill_2_id, a, c from (
// select drill_1_id, groupArray(drill_2_id) as drill_2_id_s, groupArray(a) a_s, sum(a) as c from (
// select * from a_table
// )
// group by drill_1_id
// )
// array join drill_2_id_s as drill_2_id, a_s as a
use itertools::join;
use crate::sql::primary_agg::primary_agg;
use super::{
TableSql,
CutSql,
DrilldownSql,
MeasureSql,
RcaSql,
};
pub fn calculate(
table: &TableSql,
cuts: &[CutSql],
drills: &[DrilldownSql],
meas: &[MeasureSql],
rca: &RcaSql,
) -> (String, String)
| {
// append the correct rca drill to drilldowns
// for a, both
// for b, d2
// for c, d1
// for d, none
let mut a_drills = drills.to_vec();
let mut b_drills = drills.to_vec();
let mut c_drills = drills.to_vec();
let d_drills = drills.to_vec();
a_drills.extend_from_slice(&rca.drill_1);
a_drills.extend_from_slice(&rca.drill_2);
b_drills.extend_from_slice(&rca.drill_2);
c_drills.extend_from_slice(&rca.drill_1);
println!("a: {:?}", a_drills);
println!("b: {:?}", b_drills);
println!("c: {:?}", c_drills);
println!("d: {:?}", d_drills);
// prepend the rca sql to meas
let all_meas = {
let mut temp = vec![rca.mea.clone()];
temp.extend_from_slice(meas);
temp
};
// for cuts,
// - a can be cut on d1 and ext
// - b cannot be int cut, only ext
// - c can be cut on d1 and ext
// - d cannot be int cut, only ext
//
// In the future, would I allow more cuts? Maybe depending on use case
//
// The blacklist is the drilldowns contained in each of a, b, c, d
//
// Note: parent of rca drills are not filtered, because they are meant
// to limit the rca calculation space!
//
// don't need to worry about aliases, because cuts don't use aliases,
// and are just matching against drill key col
let ac_cut_cols_blacklist: Vec<_> = rca.drill_2.iter()
.flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone()))
.collect();
let bd_cut_cols_blacklist: Vec<_> = rca.drill_1.iter().chain(rca.drill_2.iter())
.flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone()))
.collect();
let ac_cuts: Vec<_> = cuts.iter()
.filter(|cut| {
ac_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none()
})
.cloned()
.collect();
let bd_cuts: Vec<_> = cuts.iter()
.filter(|cut| {
bd_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none()
})
.cloned()
.collect();
println!("{:#?}", cuts);
println!("{:#?}", ac_cuts);
println!("{:#?}", bd_cuts);
// now aggregate each component
//
// As an optimization, c is calculated from a, and d is calculated from b
// If there's no internal cuts, then b, c, d are calculated from a.
// First do aggregation for part a, b
let (a, a_final_drills) = primary_agg(table, &ac_cuts, &a_drills, &all_meas, None);
let (b, b_final_drills) = primary_agg(table, &bd_cuts, &b_drills, &all_meas, None);
// replace final_m0 with letter name.
// I put the rca measure at the beginning of the drills, so it should
// always be m0
let a = a.replace("final_m0", "a");
let b = b.replace("final_m0", "b");
// for clickhouse, need to make groupArray and Array Join clauses for drill_1 for when
// aggregating a to c, and b to d.
// (drill_2 would be needed if going from a to b)
// TODO refacto these lines out to helpers
let group_array_rca_drill_2 = rca.drill_2.iter()
.flat_map(|d| {
let alias_postfix = &d.alias_postfix;
d.level_columns.iter().map(move |l| {
if let Some(ref name_col) = l.name_column {
format!("groupArray({key_col}_{alias_postfix}) as {key_col}_{alias_postfix}_s, groupArray({name_col}_{alias_postfix}) as {name_col}_{alias_postfix}_s", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix)
} else {
format!("groupArray({col}_{alias_postfix}) as {col}_{alias_postfix}_s", col=l.key_column, alias_postfix=alias_postfix)
}
})
});
let group_array_rca_drill_2 = join(group_array_rca_drill_2, ", ");
let join_array_rca_drill_2 = rca.drill_2.iter()
.flat_map(|d| {
let alias_postfix = &d.alias_postfix;
d.level_columns.iter().map(move |l| {
if let Some(ref name_col) = l.name_column {
format!("{key_col}_{alias_postfix}_s as {key_col}_{alias_postfix}, {name_col}_{alias_postfix}_s as {name_col}_{alias_postfix}", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix)
} else {
format!("{col}_{alias_postfix}_s as {col}_{alias_postfix}", col=l.key_column, alias_postfix=alias_postfix)
}
})
});
let join_array_rca_drill_2 = join(join_array_rca_drill_2, ", ");
// Do GroupArray and Array Join clauses for external measures, also
let mea_cols = (1..=meas.len())
.map(|m_idx| format!("final_m{col}", col=m_idx));
let mea_cols = join(mea_cols, ", ");
let group_array_ext_mea = (1..=meas.len())
.map(|m_idx| format!("groupArray(final_m{col}) as final_m{col}_s", col=m_idx));
let group_array_ext_mea = join(group_array_ext_mea, ", ");
let join_array_ext_mea = (1..=meas.len())
.map(|m_idx| format!("final_m{col}_s as final_m{col}", col=m_idx));
let join_array_ext_mea = join(join_array_ext_mea, ", ");
// groupArray cols (the drill_2 from rca) can't be included in the group by or select
let c_drills_minus_rca_drill_2 = c_drills.iter()
.filter(|d| !rca.drill_2.contains(&d))
.map(|d| d.col_alias_only_string());
let c_drills_minus_rca_drill_2 = join(c_drills_minus_rca_drill_2, ", ");
let d_drills_minus_rca_drill_2 = d_drills.iter()
.filter(|d| !rca.drill_2.contains(&d))
.map(|d| d.col_alias_only_string());
let d_drills_minus_rca_drill_2 = join(d_drills_minus_rca_drill_2, ", ");
// a and c drills are kept as-is
let a_drills_str = a_drills.iter()
.map(|d| d.col_alias_only_string());
let a_drills_str = join(a_drills_str, ", ");
let b_drills_str = b_drills.iter()
.map(|d| d.col_alias_only_string());
let b_drills_str = join(b_drills_str, ", ");
// Now add part c
let ac = format!("select {}, {}{} a, c from \
(select {}, {}, {}{} groupArray(a) as a_s, sum(a) as c from ({}) group by {}) \
Array Join {}, {}{} a_s as a",
a_drills_str,
mea_cols,
if mea_cols.is_empty() { "" } else { "," },
c_drills_minus_rca_drill_2,
group_array_rca_drill_2,
group_array_ext_mea,
if group_array_ext_mea.is_empty() { "" } else { "," },
a,
c_drills_minus_rca_drill_2,
join_array_rca_drill_2,
join_array_ext_mea,
if join_array_ext_mea.is_empty() { "" } else { "," },
);
println!("{}", ac);
// Now add part d
let bd = if d_drills.is_empty() {
format!("select {}, b, d from \
(select {}, groupArray(b) as b_s, sum(b) as d from ({})) \
Array Join {}, b_s as b",
b_drills_str,
group_array_rca_drill_2,
b,
join_array_rca_drill_2,
)
} else {
format!("select {}, b, d from \
(select {}, {}, groupArray(b) as b_s, sum(b) as d from ({}) group by {}) \
Array Join {}, b_s as b",
b_drills_str,
d_drills_minus_rca_drill_2,
group_array_rca_drill_2,
b,
d_drills_minus_rca_drill_2,
join_array_rca_drill_2,
)
};
println!("bd: {}", bd);
// now do the final join
let mut final_sql = format!("select * from ({}) all inner join ({}) using {}",
ac,
bd,
b_final_drills,
);
// adding final measures at the end
let final_ext_meas = if !meas.is_empty() {
", ".to_owned() + &join((1..meas.len()+1).map(|i| format!("final_m{}", i)), ", ")
} else {
"".to_owned()
};
final_sql = format!("select {}, {}((a/b) / (c/d)) as rca{} from ({})",
a_final_drills,
if rca.debug { "a, b, c, d, " } else { "" },
final_ext_meas,
final_sql,
);
// SPECIAL CASE
// Hack to deal with no drills on d
// Later, make this better
final_sql = final_sql.replace("select , ", "select ");
final_sql = final_sql.replace("group by )", ")");
(final_sql, a_final_drills)
} | identifier_body |
|
acss.go | // Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
// package acss implements "Asynchronous Complete Secret Sharing" as described in
//
// https://iotaledger.github.io/crypto-tss/talks/async-dkg/slides-async-dkg.html#/5/6
//
// Here is a copy of the pseudo code from the slide mentioned above (just in case):
//
// > // dealer with input s
// > sample random polynomial ϕ such that ϕ(0) = s
// > C, S := VSS.Share(ϕ, f+1, n)
// > E := [PKI.Enc(S[i], pkᵢ) for each party i]
// >
// > // party i (including the dealer)
// > RBC(C||E)
// > sᵢ := PKI.Dec(eᵢ, skᵢ)
// > if decrypt fails or VSS.Verify(C, i, sᵢ) == false:
// > send <IMPLICATE, i, skᵢ> to all parties
// > else:
// > send <OK>
// >
// > on receiving <OK> from n-f parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from f+1 parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from n-f parties:
// > if sᵢ is valid:
// > out = true
// > output sᵢ
// >
// > on receiving <IMPLICATE, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if decrypt fails or VSS.Verify(C, j, sⱼ) == false:
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
// >
// > on receiving <RECOVER, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if VSS.Verify(C, j, sⱼ): T = T ∪ {sⱼ}
// >
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
//
// On the adaptations and sources:
//
// > More details and references to the papers are bellow:
// >
// > Here the references for the Asynchronous Secret-Sharing that I was referring to.
// > It is purely based on (Feldman) Verifiable Secret Sharing and does not rely on any PVSS schemes
// > requiring fancy NIZKP (and thus trades network-complexity vs computational-complexity):
// >
// > * [1], Section IV. A. we use the ACSS scheme from [2] but replace its Pedersen
// > commitment with a Feldman polynomial commitment to achieve Homomorphic-Partial-Commitment.
// >
// > * In [2], Section 5.3. they explain the Pedersen-based hbACSS0 and give some proof sketch.
// > The complete description and analysis of hbACSS0 can be found in [3]. However, as mentioned
// > before they use Kate-commitments instead of Feldman/Pedersen. This has better message
// > complexity especially when multiple secrets are shared at the same time, but in our case
// > that would need to be replaced with Feldman making it much simpler and not losing any security.
// > Actually, [3] is just a pre-print, the official published version is [4], but [4] also contains
// > other, non-relevant, variants like hbACSS1 and hbACSS2 and much more analysis.
// > So, I found [3] a bit more helpful, although it is just the preliminary version.
// > They also provide their reference implementation in [5], which is also what the
// > authors of [1] used for their practical DKG results.
// >
// > [1] Practical Asynchronous Distributed Key Generation https://eprint.iacr.org/2021/1591
// > [2] Asynchronous Data Dissemination and its Applications https://eprint.iacr.org/2021/777
// > [3] Brief Note: Asynchronous Verifiable Secret Sharing with Optimal Resilience and Linear Amortized Overhead https://arxiv.org/pdf/1902.06095.pdf
// > [4] hbACSS: How to Robustly Share Many Secrets https://eprint.iacr.org/2021/159
// > [5] https://github.com/tyurek/hbACSS
//
// A PoC implementation: <https://github.com/Wollac/async.go>.
//
// The Crypto part shown the pseudo-code above is replaced in the implementation with the
// scheme allowing to keep the private keys secret. The scheme implementation is taken
// from the PoC mentioned above. It is described in <https://hackmd.io/@CcRtfCBnRbW82-AdbFJUig/S1qcPiUN5>.
package acss
import (
"errors"
"fmt"
"math"
"go.dedis.ch/kyber/v3"
"go.dedis.ch/kyber/v3/share"
"go.dedis.ch/kyber/v3/suites"
"github.com/iotaledger/hive.go/logger"
"github.com/iotaledger/wasp/packages/gpa"
"github.com/iotaledger/wasp/packages/gpa/acss/crypto"
rbc "github.com/iotaledger/wasp/packages/gpa/rbc/bracha"
"github.com/iotaledger/wasp/packages/util/rwutil"
)
const (
subsystemRBC byte = iota
)
type Output struct {
PriShare *share.PriShare // Private share, received by this instance.
Commits []kyber.Point // Feldman's commitment to the shared polynomial.
}
type acssImpl struct {
suite suites.Suite
n int
f int
me gpa.NodeID
mySK kyber.Scalar
myPK kyber.Point
myIdx int
dealer gpa.NodeID // A node that is recognized as a dealer.
dealCB func(int, []byte) []byte // Callback to be called on the encrypted deals (for tests actually).
peerPKs map[gpa.NodeID]kyber.Point // Peer public keys.
peerIdx []gpa.NodeID // Particular order of the nodes (position in the polynomial).
rbc gpa.GPA // RBC to share `C||E`.
rbcOut *crypto.Deal // Deal broadcasted by the dealer.
voteOKRecv map[gpa.NodeID]bool // A set of received OK votes.
voteREADYRecv map[gpa.NodeID]bool // A set of received READY votes.
voteREADYSent bool // Have we sent our READY vote?
pendingIRMsgs []*msgImplicateRecover // I/R messages are buffered, if the RBC is not completed yet.
implicateRecv map[gpa.NodeID]bool // To check, that implicate only received once from a node.
recoverRecv map[gpa.NodeID]*share.PriShare // Private shares from the RECOVER messages.
outS *share.PriShare // Our share of the secret (decrypted from rbcOutE).
output bool
msgWrapper *gpa.MsgWrapper
log *logger.Logger
}
var _ gpa.GPA = &acssImpl{}
func New(
suite suites.Suite, // Ed25519
peers []gpa.NodeID, // Participating nodes in a specific order.
peerPKs map[gpa.NodeID]kyber.Point, // Public keys for all the peers.
f int, // Max number of expected faulty nodes.
me gpa.NodeID, // ID of this node.
mySK kyber.Scalar, // Secret Key of this node.
dealer gpa.NodeID, // The dealer node for this protocol instance.
dealCB func(int, []byte) []byte, // For tests only: interceptor for the deal to be shared.
log *logger.Logger, // A logger to use.
) gpa.GPA {
n := len(peers)
if dealCB == nil {
dealCB = func(i int, b []byte) []byte { return b }
}
a := acssImpl{
suite: suite,
n: n,
f: f,
me: me,
mySK: mySK,
myPK: peerPKs[me],
myIdx: -1, // Updated bellow.
dealer: dealer,
dealCB: dealCB,
peerPKs: peerPKs,
peerIdx: peers,
rbc: rbc.New(peers, f, me, dealer, math.MaxInt, func(b []byte) bool { return true }), // TODO: Provide meaningful maxMsgSize
rbcOut: nil, // Will be set on output from the RBC.
voteOKRecv: map[gpa.NodeID]bool{},
voteREADYRecv: map[gpa.NodeID]bool{},
voteREADYSent: false,
pendingIRMsgs: []*msgImplicateRecover{},
implicateRecv: map[gpa.NodeID]bool{},
recoverRecv: map[gpa.NodeID]*share.PriShare{},
outS: nil,
output: false,
log: log,
}
a.msgWrapper = gpa.NewMsgWrapper(msgTypeWrapped, func(subsystem byte, index int) (gpa.GPA, error) {
if subsystem == subsystemRBC {
if index != 0 {
return nil, fmt.Errorf("unknown rbc index: %v", index)
}
return a.rbc, nil
}
return nil, fmt.Errorf("unknown subsystem: %v", subsystem)
})
if a.myIdx = a.peerIndex(me); a.myIdx == -1 {
panic("i'm not in the peer list")
}
return gpa.NewOwnHandler(me, &a)
}
// Input for the algorithm is the secret to share.
// It can be provided by the dealer only.
func (a *acssImpl) Input(input gpa.Input) gpa.OutMessages {
if a.me != a.dealer {
panic(errors.New("only dealer can initiate the sharing"))
}
if input == nil {
panic(errors.New("we expect kyber.Scalar as input"))
}
return a.handleInput(input.(kyber.Scalar))
}
// Receive all the messages and route them to the appropriate handlers.
func (a *acssImpl) Message(msg gpa.Message) gpa.OutMessages {
switch m := msg.(type) {
case *gpa.WrappingMsg:
switch m.Subsystem() {
case subsystemRBC:
return a.handleRBCMessage(m)
default:
panic(fmt.Errorf("unexpected wrapped message: %+v", m))
}
case *msgVote:
switch m.kind {
case msgVoteOK:
return a.handleVoteOK(m)
case msgVoteREADY:
return a.handleVoteREADY(m)
default:
panic(fmt.Errorf("unexpected vote message: %+v", m))
}
case *msgImplicateRecover:
return a.handleImplicateRecoverReceived(m)
default:
panic(fmt.Errorf("unexpected message: %+v", msg))
}
}
// > // dealer with input s
// > sample random polynomial ϕ such that ϕ(0) = s
// > C, S := VSS.Share(ϕ, f+1, n)
// > E := [PKI.Enc(S[i], pkᵢ) for each party i]
// >
// > // party i (including the dealer)
// > RBC(C||E)
func (a *acssImpl) handleInput(secretToShare kyber.Scalar) gpa.OutMessages {
pubKeys := make([]kyber.Point, 0)
for _, peerID := range a.peerIdx {
pubKeys = append(pubKeys, a.peerPKs[peerID])
}
deal := crypto.NewDeal(a.suite, pubKeys, secretToShare)
data, err := deal.MarshalBinary()
if err != nil {
panic(fmt.Sprintf("acss: internal error: %v", err))
}
// > RBC(C||E)
rbcCEPayloadBytes := rwutil.WriteToBytes(&msgRBCCEPayload{suite: a.suite, data: data})
msgs := a.msgWrapper.WrapMessages(subsystemRBC, 0, a.rbc.Input(rbcCEPayloadBytes))
return a.tryHandleRBCTermination(false, msgs)
}
// Delegate received messages to the RBC and handle its output.
//
// > // party i (including the dealer)
// > RBC(C||E)
func (a *acssImpl) handleRBCMessage(m *gpa.WrappingMsg) gpa.OutMessages {
wasOut := a.rbc.Output() != nil // To send the msgRBCCEOutput message once (for perf reasons).
msgs := a.msgWrapper.WrapMessages(subsystemRBC, 0, a.rbc.Message(m.Wrapped()))
return a.tryHandleRBCTermination(wasOut, msgs)
}
func (a *acssImpl) tryHandleRBCTermination(wasOut bool, msgs gpa.OutMessages) gpa.OutMessages {
if out := a.rbc.Output(); !wasOut && out != nil {
// Send the result for self as a message (maybe the code will look nicer this way).
outParsed, err := rwutil.ReadFromBytes(out.([]byte), &msgRBCCEPayload{suite: a.suite})
if err != nil {
panic(fmt.Errorf("cannot unmarshal msgRBCCEPayload: %w", err))
}
msgs.AddAll(a.handleRBCOutput(outParsed))
}
return msgs
}
// Upon receiving the RBC output...
//
// > sᵢ := PKI.Dec(eᵢ, skᵢ)
// > if decrypt fails or VSS.Verify(C, i, sᵢ) == false:
// > send <IMPLICATE, i, skᵢ> to all parties
// > else:
// > send <OK>
func (a *acssImpl) handleRBCOutput(rbcOutput *msgRBCCEPayload) gpa.OutMessages {
if a.outS != nil || a.rbcOut != nil {
// Take the first RBC output only.
return nil
}
//
// Store the broadcast result and process pending IMPLICATE/RECOVER messages, if any.
deal, err := crypto.DealUnmarshalBinary(a.suite, a.n, rbcOutput.data)
if err != nil {
panic(errors.New("cannot unmarshal msgRBCCEPayload.data"))
}
a.rbcOut = deal
msgs := a.handleImplicateRecoverPending(gpa.NoMessages())
//
// Process the RBC output, as described above.
secret := crypto.Secret(a.suite, a.rbcOut.PubKey, a.mySK)
myShare, err := crypto.DecryptShare(a.suite, a.rbcOut, a.myIdx, secret)
if err != nil {
return a.broadcastImplicate(err, msgs)
}
a.outS = myShare
a.tryOutput() // Maybe the READY messages are already received.
return a.handleImplicateRecoverPending(a.broadcastVote(msgVoteOK, msgs))
}
// > on receiving <OK> from n-f parties:
// > send <READY> to all parties
func (a *acssImpl) handleVoteOK(msg *msgVote) gpa.OutMessages {
a.voteOKRecv[msg.Sender()] = true
count := len(a.voteOKRecv)
if !a.voteREADYSent && count >= (a.n-a.f) {
a.voteREADYSent = true
return a.broadcastVote(msgVoteREADY, gpa.NoMessages())
}
return nil
}
// > on receiving <READY> from f+1 parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from n-f parties:
// > if sᵢ is valid:
// > out = true
// > output sᵢ
func (a *acssImpl) handleVoteREADY(msg *msgVote) gpa.OutMessages {
a.voteREADYRecv[msg.Sender()] = true
count := len(a.voteREADYRecv)
msgs := gpa.NoMessages()
if !a.voteREADYSent && count >= (a.f+1) {
msgs = a.broadcastVote(msgVoteREADY, msgs)
a.voteREADYSent = true
}
a.tryOutput()
return a.handleImplicateRecoverPending(msgs)
}
// It is possible that we are receiving IMPLICATE/RECOVER messages before our RBC is completed.
// We store these messages for processing after that, if RBC is not done and process it otherwise.
func (a *acssImpl) handleImplicateRecoverReceived(msg *msgImplicateRecover) gpa.OutMessages {
if a.rbcOut == nil {
a.pendingIRMsgs = append(a.pendingIRMsgs, msg)
return nil
}
switch msg.kind {
case msgImplicateRecoverKindIMPLICATE:
return a.handleImplicate(msg)
case msgImplicateRecoverKindRECOVER:
return a.handleRecover(msg)
default:
panic(fmt.Errorf("handleImplicateRecoverReceived: unexpected msgImplicateRecover.kind=%v, message: %+v", msg.kind, msg))
}
}
func (a *acssImpl) handleImplicateRecoverPending(msgs gpa.OutMessages) gpa.OutMessages {
//
// Only process the IMPLICATE/RECOVER messages, if this node has RBC completed.
if a.rbcOut == nil {
return msgs
}
postponedIRMsgs := []*msgImplicateRecover{}
for _, m := range a.pendingIRMsgs {
switch m.kind {
case msgImplicateRecoverKindIMPLICATE:
// Only handle the IMPLICATE messages when output is already produced to implement the following:
//
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
//
if a.output {
msgs.AddAll(a.handleImplicate(m))
} else {
postponedIRMsgs = append(postponedIRMsgs, m)
}
case msgImplicateRecoverKindRECOVER:
msgs.AddAll(a.handleRecover(m))
default:
panic(fmt.Errorf("handleImplicateRecoverReceived: unexpected msgImplicateRecover.kind=%v, message: %+v", m.kind, m))
}
}
a.pendingIRMsgs = postponedIRMsgs
return msgs
}
// Here the RBC is assumed to be completed already, OUT is set and the private key is checked.
//
// > on receiving <IMPLICATE, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if decrypt fails or VSS.Verify(C, j, sⱼ) == false:
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
//
// NOTE: We assume `if out == true:` stands for a wait for such condition.
func (a *acssImpl) handleImplicate(msg *msgImplicateRecover) gpa.OutMessages {
peerIndex := a.peerIndex(msg.sender)
if peerIndex == -1 {
a.log.Warnf("implicate received from unknown peer: %v", msg.sender)
return nil
}
//
// Check message duplicates.
if _, ok := a.implicateRecv[msg.sender]; ok {
// Received the implicate before, just ignore it.
return nil
}
a.implicateRecv[msg.sender] = true
//
// Check implicate.
secret, err := crypto.CheckImplicate(a.suite, a.rbcOut.PubKey, a.peerPKs[msg.sender], msg.data)
if err != nil {
a.log.Warnf("Invalid implication received: %v", err)
return nil
}
_, err = crypto.DecryptShare(a.suite, a.rbcOut, peerIndex, secret)
if err == nil {
// if we are able to decrypt the share, the implication is not correct
a.log.Warn("encrypted share is valid")
return nil
}
//
// Create the reveal message.
return a.broadcastRecover(gpa.NoMessages())
}
// Here the RBC is assumed to be completed already and the private key is checked.
//
// > on receiving <RECOVER, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if VSS.Verify(C, j, sⱼ): T = T ∪ {sⱼ}
// >
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
func (a *acssImpl) handleRecover(msg *msgImplicateRecover) gpa.OutMessages {
if a.output {
// Ignore the RECOVER messages, if we are done with the output.
return nil
}
peerIndex := a.peerIndex(msg.sender)
if peerIndex == -1 {
a.log.Warnf("Recover received from unexpected sender: %v", msg.sender)
return nil
}
if _, ok := a.recoverRecv[msg.sender]; ok {
a.log.Warnf("Recover was already received from %v", msg.sender)
return nil
}
peerSecret, err := crypto.DecryptShare(a.suite, a.rbcOut, peerIndex, msg.data)
if err != nil {
a.log.Warn("invalid secret revealed")
return nil
}
a.recoverRecv[msg.sender] = peerSecret
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
if len(a.recoverRecv) >= a.f+1 {
priShares := []*share.PriShare{}
for i := range a.recoverRecv {
priShares = append(priShares, a.recoverRecv[i])
}
myPriShare, err := crypto.InterpolateShare(a.suite, priShares, a.n, a.myIdx)
if err != nil {
a.log.Warnf("Failed to recover pri-poly: %v", err)
}
a.outS = myPriShare
a.output = true
return nil
}
return nil
}
func (a *acssImpl) broadcastVote(voteKind msgVoteKind, msgs gpa.OutMessages) gpa.OutMessages {
for i := range a.peerIdx {
msg := &msgVote{
BasicMessage: gpa.NewBasicMessage(a.peerIdx[i]),
kind: voteKind,
}
msg.SetSender(a.me)
msgs.Add(msg)
}
return msgs
}
func (a *acssImpl) broadcastImplicate(reason error, msgs gpa.OutMessages) gpa.OutMessages {
a.log.Warnf("Sending implicate because of: %v", reason)
implicate := crypto.Implicate(a.suite, a.rbcOut.PubKey, a.mySK)
return a.broadcastImplicateRecover(msgImplicateRecoverKindIMPLICATE, implicate, msgs)
}
func (a *acssImpl) broadcastRecover(msgs gpa.OutMessages) gpa.OutMessages {
secret := crypto.Secret(a.suite, a.rbcOut.PubKey, a.mySK)
return a.broadcastImplicateRecover(msgImplicateRecoverKindRECOVER, secret, msgs)
}
func (a *acssImpl) broadcastImplicateRecover(kind msgImplicateKind, data []byte, msgs gpa.OutMessages) gpa.OutMessages {
for i := range a.peerIdx {
msgs.Add(&msgImplicateRecover{kind: kind, recipient: a.peerIdx[i], i: a.myIdx, data: data})
}
return msgs
}
func (a *acssImpl) tryOutput() {
count := len(a.voteREADYRecv)
if count >= (a.n-a.f) && a.outS != nil {
a.output = true | nc (a *acssImpl) peerIndex(peer gpa.NodeID) int {
for i := range a.peerIdx {
if a.peerIdx[i] == peer {
return i
}
}
return -1
}
func (a *acssImpl) Output() gpa.Output {
if a.output {
return &Output{
PriShare: a.outS,
Commits: a.rbcOut.Commits,
}
}
return nil
}
func (a *acssImpl) StatusString() string {
return fmt.Sprintf("{ACSS, output=%v, rbc=%v}", a.output, a.rbc.StatusString())
}
|
}
}
fu | identifier_name |
acss.go | // Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
// package acss implements "Asynchronous Complete Secret Sharing" as described in
//
// https://iotaledger.github.io/crypto-tss/talks/async-dkg/slides-async-dkg.html#/5/6
//
// Here is a copy of the pseudo code from the slide mentioned above (just in case):
//
// > // dealer with input s
// > sample random polynomial ϕ such that ϕ(0) = s
// > C, S := VSS.Share(ϕ, f+1, n)
// > E := [PKI.Enc(S[i], pkᵢ) for each party i]
// >
// > // party i (including the dealer)
// > RBC(C||E)
// > sᵢ := PKI.Dec(eᵢ, skᵢ)
// > if decrypt fails or VSS.Verify(C, i, sᵢ) == false:
// > send <IMPLICATE, i, skᵢ> to all parties
// > else:
// > send <OK>
// >
// > on receiving <OK> from n-f parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from f+1 parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from n-f parties:
// > if sᵢ is valid:
// > out = true
// > output sᵢ
// >
// > on receiving <IMPLICATE, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if decrypt fails or VSS.Verify(C, j, sⱼ) == false:
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
// >
// > on receiving <RECOVER, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if VSS.Verify(C, j, sⱼ): T = T ∪ {sⱼ}
// >
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
//
// On the adaptations and sources:
//
// > More details and references to the papers are bellow:
// >
// > Here the references for the Asynchronous Secret-Sharing that I was referring to.
// > It is purely based on (Feldman) Verifiable Secret Sharing and does not rely on any PVSS schemes
// > requiring fancy NIZKP (and thus trades network-complexity vs computational-complexity):
// >
// > * [1], Section IV. A. we use the ACSS scheme from [2] but replace its Pedersen
// > commitment with a Feldman polynomial commitment to achieve Homomorphic-Partial-Commitment.
// >
// > * In [2], Section 5.3. they explain the Pedersen-based hbACSS0 and give some proof sketch.
// > The complete description and analysis of hbACSS0 can be found in [3]. However, as mentioned
// > before they use Kate-commitments instead of Feldman/Pedersen. This has better message
// > complexity especially when multiple secrets are shared at the same time, but in our case
// > that would need to be replaced with Feldman making it much simpler and not losing any security.
// > Actually, [3] is just a pre-print, the official published version is [4], but [4] also contains
// > other, non-relevant, variants like hbACSS1 and hbACSS2 and much more analysis.
// > So, I found [3] a bit more helpful, although it is just the preliminary version.
// > They also provide their reference implementation in [5], which is also what the
// > authors of [1] used for their practical DKG results.
// >
// > [1] Practical Asynchronous Distributed Key Generation https://eprint.iacr.org/2021/1591
// > [2] Asynchronous Data Dissemination and its Applications https://eprint.iacr.org/2021/777
// > [3] Brief Note: Asynchronous Verifiable Secret Sharing with Optimal Resilience and Linear Amortized Overhead https://arxiv.org/pdf/1902.06095.pdf
// > [4] hbACSS: How to Robustly Share Many Secrets https://eprint.iacr.org/2021/159
// > [5] https://github.com/tyurek/hbACSS
//
// A PoC implementation: <https://github.com/Wollac/async.go>.
//
// The Crypto part shown the pseudo-code above is replaced in the implementation with the
// scheme allowing to keep the private keys secret. The scheme implementation is taken
// from the PoC mentioned above. It is described in <https://hackmd.io/@CcRtfCBnRbW82-AdbFJUig/S1qcPiUN5>.
package acss
import (
"errors"
"fmt"
"math"
"go.dedis.ch/kyber/v3"
"go.dedis.ch/kyber/v3/share"
"go.dedis.ch/kyber/v3/suites"
"github.com/iotaledger/hive.go/logger"
"github.com/iotaledger/wasp/packages/gpa"
"github.com/iotaledger/wasp/packages/gpa/acss/crypto"
rbc "github.com/iotaledger/wasp/packages/gpa/rbc/bracha"
"github.com/iotaledger/wasp/packages/util/rwutil"
)
const (
subsystemRBC byte = iota
)
type Output struct {
PriShare *share.PriShare // Private share, received by this instance.
Commits []kyber.Point // Feldman's commitment to the shared polynomial.
}
type acssImpl struct {
suite suites.Suite
n int
f int
me gpa.NodeID
mySK kyber.Scalar
myPK kyber.Point
myIdx int
dealer gpa.NodeID // A node that is recognized as a dealer.
dealCB func(int, []byte) []byte // Callback to be called on the encrypted deals (for tests actually).
peerPKs map[gpa.NodeID]kyber.Point // Peer public keys.
peerIdx []gpa.NodeID // Particular order of the nodes (position in the polynomial).
rbc gpa.GPA // RBC to share `C||E`.
rbcOut *crypto.Deal // Deal broadcasted by the dealer.
voteOKRecv map[gpa.NodeID]bool // A set of received OK votes.
voteREADYRecv map[gpa.NodeID]bool // A set of received READY votes.
voteREADYSent bool // Have we sent our READY vote?
pendingIRMsgs []*msgImplicateRecover // I/R messages are buffered, if the RBC is not completed yet.
implicateRecv map[gpa.NodeID]bool // To check, that implicate only received once from a node.
recoverRecv map[gpa.NodeID]*share.PriShare // Private shares from the RECOVER messages.
outS *share.PriShare // Our share of the secret (decrypted from rbcOutE).
output bool
msgWrapper *gpa.MsgWrapper
log *logger.Logger
}
var _ gpa.GPA = &acssImpl{}
func New(
suite suites.Suite, // Ed25519
peers []gpa.NodeID, // Participating nodes in a specific order.
peerPKs map[gpa.NodeID]kyber.Point, // Public keys for all the peers.
f int, // Max number of expected faulty nodes.
me gpa.NodeID, // ID of this node.
mySK kyber.Scalar, // Secret Key of this node.
dealer gpa.NodeID, // The dealer node for this protocol instance.
dealCB func(int, []byte) []byte, // For tests only: interceptor for the deal to be shared.
log *logger.Logger, // A logger to use.
) gpa.GPA {
n := len(peers)
if dealCB == nil {
dealCB = func(i int, b []byte) []byte { return b }
}
a := acssImpl{
suite: suite,
n: n,
f: f,
me: me,
mySK: mySK,
myPK: peerPKs[me],
myIdx: -1, // Updated bellow.
dealer: dealer,
dealCB: dealCB,
peerPKs: peerPKs,
peerIdx: peers,
rbc: rbc.New(peers, f, me, dealer, math.MaxInt, func(b []byte) bool { return true }), // TODO: Provide meaningful maxMsgSize
rbcOut: nil, // Will be set on output from the RBC.
voteOKRecv: map[gpa.NodeID]bool{},
voteREADYRecv: map[gpa.NodeID]bool{},
voteREADYSent: false,
pendingIRMsgs: []*msgImplicateRecover{},
implicateRecv: map[gpa.NodeID]bool{},
recoverRecv: map[gpa.NodeID]*share.PriShare{},
outS: nil,
output: false,
log: log,
}
a.msgWrapper = gpa.NewMsgWrapper(msgTypeWrapped, func(subsystem byte, index int) (gpa.GPA, error) {
if subsystem == subsystemRBC {
if index != 0 {
return nil, fmt.Errorf("unknown rbc index: %v", index)
}
return a.rbc, nil
}
return nil, fmt.Errorf("unknown subsystem: %v", subsystem)
})
if a.myIdx = a.peerIndex(me); a.myIdx == -1 {
panic("i'm not in the peer list")
}
return gpa.NewOwnHandler(me, &a)
}
// Input for the algorithm is the secret to share.
// It can be provided by the dealer only.
func (a *acssImpl) Input(input gpa.Input) gpa.OutMessages {
if a.me != a.dealer {
panic(errors.New("only dealer can initiate the sharing"))
}
if input == nil {
panic(errors.New("we expect kyber.Scalar as input"))
}
return a.handleInput(input.(kyber.Scalar))
}
// Receive all the messages and route them to the appropriate handlers.
func (a *acssImpl) Message(msg gpa.Message) gpa.OutMessages {
switch m := msg.(type) {
case *gpa.WrappingMsg:
switch m.Subsystem() {
case subsystemRBC:
return a.handleRBCMessage(m)
default:
panic(fmt.Errorf("unexpected wrapped message: %+v", m))
}
case *msgVote:
switch m.kind {
case msgVoteOK:
return a.handleVoteOK(m)
case msgVoteREADY:
return a.handleVoteREADY(m)
default:
panic(fmt.Errorf("unexpected vote message: %+v", m))
}
case *msgImplicateRecover:
return a.handleImplicateRecoverReceived(m)
default:
panic(fmt.Errorf("unexpected message: %+v", msg))
}
}
// > // dealer with input s
// > sample random polynomial ϕ such that ϕ(0) = s
// > C, S := VSS.Share(ϕ, f+1, n)
// > E := [PKI.Enc(S[i], pkᵢ) for each party i]
// >
// > // party i (including the dealer)
// > RBC(C||E)
func (a *acssImpl) handleInput(secretToShare kyber.Scalar) gpa.OutMessages {
pubKeys := make([]kyber.Point, 0)
for _, peerID := range a.peerIdx {
pubKeys = append(pubKeys, a.peerPKs[peerID])
}
deal := crypto.NewDeal(a.suite, pubKeys, secretToShare)
data, err := deal.MarshalBinary()
if err != nil {
panic(fmt.Sprintf("acss: internal error: %v", err))
}
// > RBC(C||E)
rbcCEPayloadBytes := rwutil.WriteToBytes(&msgRBCCEPayload{suite: a.suite, data: data})
msgs := a.msgWrapper.WrapMessages(subsystemRBC, 0, a.rbc.Input(rbcCEPayloadBytes))
return a.tryHandleRBCTermination(false, msgs)
}
// Delegate received messages to the RBC and handle its output.
//
// > // party i (including the dealer)
// > RBC(C||E)
func (a *acssImpl) handleRBCMessage(m *gpa.WrappingMsg) gpa.OutMessages {
wasOut := a.rbc.Output() != nil // To send the msgRBCCEOutput message once (for perf reasons).
msgs := a.msgWrapper.WrapMessages(subsystemRBC, 0, a.rbc.Message(m.Wrapped()))
return a.tryHandleRBCTermination(wasOut, msgs)
}
func (a *acssImpl) tryHandleRBCTermination(wasOut bool, msgs gpa.OutMessages) gpa.OutMessages {
if out := a.rbc.Output(); !wasOut && out != nil {
// Send the result for self as a message (maybe the code will look nicer this way).
outParsed, err := rwutil.ReadFromBytes(out.([]byte), &msgRBCCEPayload{suite: a.suite})
if err != nil {
panic(fmt.Errorf("cannot unmarshal msgRBCCEPayload: %w", err))
}
msgs.AddAll(a.handleRBCOutput(outParsed))
}
return msgs
}
// Upon receiving the RBC output...
//
// > sᵢ := PKI.Dec(eᵢ, skᵢ)
// > if decrypt fails or VSS.Verify(C, i, sᵢ) == false:
// > send <IMPLICATE, i, skᵢ> to all parties
// > else:
// > send <OK>
func (a *acssImpl) handleRBCOutput(rbcOutput *msgRBCCEPayload) gpa.OutMessages {
if a.outS != nil || a.rbcOut != nil {
// Take the first RBC output only.
return nil
}
//
// Store the broadcast result and process pending IMPLICATE/RECOVER messages, if any.
deal, err := crypto.DealUnmarshalBinary(a.suite, a.n, rbcOutput.data)
if err != nil {
panic(errors.New("cannot unmarshal msgRBCCEPayload.data"))
}
a.rbcOut = deal
msgs := a.handleImplicateRecoverPending(gpa.NoMessages())
//
// Process the RBC output, as described above.
secret := crypto.Secret(a.suite, a.rbcOut.PubKey, a.mySK)
myShare, err := crypto.DecryptShare(a.suite, a.rbcOut, a.myIdx, secret)
if err != nil {
return a.broadcastImplicate(err, msgs)
}
a.outS = myShare
a.tryOutput() // Maybe the READY messages are already received.
return a.handleImplicateRecoverPending(a.broadcastVote(msgVoteOK, msgs))
}
// > on receiving <OK> from n-f parties:
// > send <READY> to all parties
func (a *acssImpl) handleVoteOK(msg *msgVote) gpa.OutMessages {
a.voteOKRecv[msg.Sender()] = true
count := len(a.voteOKRecv)
if !a.voteREADYSent && count >= (a.n-a.f) {
a.voteREADYSent = true
return a.broadcastVote(msgVoteREADY, gpa.NoMessages())
}
return nil
}
// > on receiving <READY> from f+1 parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from n-f parties:
// > if sᵢ is valid:
// > out = true
// > output sᵢ
func (a *acssImpl) handleVoteREADY(msg *msgVote) gpa.OutMessages {
a.voteREADYRecv[msg.Sender()] = true
count := len(a.voteREADYRecv)
msgs := gpa.NoMessages()
if !a.voteREADYSent && count >= (a.f+1) {
msgs = a.broadcastVote(msgVoteREADY, msgs)
a.voteREADYSent = true
}
a.tryOutput()
return a.handleImplicateRecoverPending(msgs)
}
// It is possible that we are receiving IMPLICATE/RECOVER messages before our RBC is completed.
// We store these messages for processing after that, if RBC is not done and process it otherwise.
func (a *acssImpl) handleImplicateRecoverReceived(msg *msgImplicateRecover) gpa.OutMessages {
if a.rbcOut == nil {
a.pendingIRMsgs = append(a.pendingIRMsgs, msg)
return nil
}
switch msg.kind {
case msgImplicateRecoverKindIMPLICATE:
return a.handleImplicate(msg)
case msgImplicateRecoverKindRECOVER:
return a.handleRecover(msg)
default:
panic(fmt.Errorf("handleImplicateRecoverReceived: unexpected msgImplicateRecover.kind=%v, message: %+v", msg.kind, msg))
}
}
func (a *acssImpl) handleImplicateRecoverPending(msgs gpa.OutMessages) gpa.OutMessages {
//
// Only process the IMPLICATE/RECOVER messages, if this node has RBC completed.
if a.rbcOut == nil {
return msgs
}
postponedIRMsgs := []*msgImplicateRecover{}
for _, m := range a.pendingIRMsgs {
switch m.kind {
case msgImplicateRecoverKindIMPLICATE:
// Only handle the IMPLICATE messages when output is already produced to implement the following:
//
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
//
if a.output {
msgs.AddAll(a.handleImplicate(m))
} else {
postponedIRMsgs = append(postponedIRMsgs, m)
}
case msgImplicateRecoverKindRECOVER:
msgs.AddAll(a.handleRecover(m))
default:
panic(fmt.Errorf("handleImplicateRecoverReceived: unexpected msgImplicateRecover.kind=%v, message: %+v", m.kind, m))
}
}
a.pendingIRMsgs = postponedIRMsgs
return msgs
}
// Here the RBC is assumed to be completed already, OUT is set and the private key is checked.
//
// > on receiving <IMPLICATE, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if decrypt fails or VSS.Verify(C, j, sⱼ) == false:
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
// | a.log.Warnf("implicate received from unknown peer: %v", msg.sender)
return nil
}
//
// Check message duplicates.
if _, ok := a.implicateRecv[msg.sender]; ok {
// Received the implicate before, just ignore it.
return nil
}
a.implicateRecv[msg.sender] = true
//
// Check implicate.
secret, err := crypto.CheckImplicate(a.suite, a.rbcOut.PubKey, a.peerPKs[msg.sender], msg.data)
if err != nil {
a.log.Warnf("Invalid implication received: %v", err)
return nil
}
_, err = crypto.DecryptShare(a.suite, a.rbcOut, peerIndex, secret)
if err == nil {
// if we are able to decrypt the share, the implication is not correct
a.log.Warn("encrypted share is valid")
return nil
}
//
// Create the reveal message.
return a.broadcastRecover(gpa.NoMessages())
}
// Here the RBC is assumed to be completed already and the private key is checked.
//
// > on receiving <RECOVER, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if VSS.Verify(C, j, sⱼ): T = T ∪ {sⱼ}
// >
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
func (a *acssImpl) handleRecover(msg *msgImplicateRecover) gpa.OutMessages {
if a.output {
// Ignore the RECOVER messages, if we are done with the output.
return nil
}
peerIndex := a.peerIndex(msg.sender)
if peerIndex == -1 {
a.log.Warnf("Recover received from unexpected sender: %v", msg.sender)
return nil
}
if _, ok := a.recoverRecv[msg.sender]; ok {
a.log.Warnf("Recover was already received from %v", msg.sender)
return nil
}
peerSecret, err := crypto.DecryptShare(a.suite, a.rbcOut, peerIndex, msg.data)
if err != nil {
a.log.Warn("invalid secret revealed")
return nil
}
a.recoverRecv[msg.sender] = peerSecret
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
if len(a.recoverRecv) >= a.f+1 {
priShares := []*share.PriShare{}
for i := range a.recoverRecv {
priShares = append(priShares, a.recoverRecv[i])
}
myPriShare, err := crypto.InterpolateShare(a.suite, priShares, a.n, a.myIdx)
if err != nil {
a.log.Warnf("Failed to recover pri-poly: %v", err)
}
a.outS = myPriShare
a.output = true
return nil
}
return nil
}
func (a *acssImpl) broadcastVote(voteKind msgVoteKind, msgs gpa.OutMessages) gpa.OutMessages {
for i := range a.peerIdx {
msg := &msgVote{
BasicMessage: gpa.NewBasicMessage(a.peerIdx[i]),
kind: voteKind,
}
msg.SetSender(a.me)
msgs.Add(msg)
}
return msgs
}
func (a *acssImpl) broadcastImplicate(reason error, msgs gpa.OutMessages) gpa.OutMessages {
a.log.Warnf("Sending implicate because of: %v", reason)
implicate := crypto.Implicate(a.suite, a.rbcOut.PubKey, a.mySK)
return a.broadcastImplicateRecover(msgImplicateRecoverKindIMPLICATE, implicate, msgs)
}
func (a *acssImpl) broadcastRecover(msgs gpa.OutMessages) gpa.OutMessages {
secret := crypto.Secret(a.suite, a.rbcOut.PubKey, a.mySK)
return a.broadcastImplicateRecover(msgImplicateRecoverKindRECOVER, secret, msgs)
}
func (a *acssImpl) broadcastImplicateRecover(kind msgImplicateKind, data []byte, msgs gpa.OutMessages) gpa.OutMessages {
for i := range a.peerIdx {
msgs.Add(&msgImplicateRecover{kind: kind, recipient: a.peerIdx[i], i: a.myIdx, data: data})
}
return msgs
}
func (a *acssImpl) tryOutput() {
count := len(a.voteREADYRecv)
if count >= (a.n-a.f) && a.outS != nil {
a.output = true
}
}
func (a *acssImpl) peerIndex(peer gpa.NodeID) int {
for i := range a.peerIdx {
if a.peerIdx[i] == peer {
return i
}
}
return -1
}
func (a *acssImpl) Output() gpa.Output {
if a.output {
return &Output{
PriShare: a.outS,
Commits: a.rbcOut.Commits,
}
}
return nil
}
func (a *acssImpl) StatusString() string {
return fmt.Sprintf("{ACSS, output=%v, rbc=%v}", a.output, a.rbc.StatusString())
} | // NOTE: We assume `if out == true:` stands for a wait for such condition.
func (a *acssImpl) handleImplicate(msg *msgImplicateRecover) gpa.OutMessages {
peerIndex := a.peerIndex(msg.sender)
if peerIndex == -1 { | random_line_split |
acss.go | // Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
// package acss implements "Asynchronous Complete Secret Sharing" as described in
//
// https://iotaledger.github.io/crypto-tss/talks/async-dkg/slides-async-dkg.html#/5/6
//
// Here is a copy of the pseudo code from the slide mentioned above (just in case):
//
// > // dealer with input s
// > sample random polynomial ϕ such that ϕ(0) = s
// > C, S := VSS.Share(ϕ, f+1, n)
// > E := [PKI.Enc(S[i], pkᵢ) for each party i]
// >
// > // party i (including the dealer)
// > RBC(C||E)
// > sᵢ := PKI.Dec(eᵢ, skᵢ)
// > if decrypt fails or VSS.Verify(C, i, sᵢ) == false:
// > send <IMPLICATE, i, skᵢ> to all parties
// > else:
// > send <OK>
// >
// > on receiving <OK> from n-f parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from f+1 parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from n-f parties:
// > if sᵢ is valid:
// > out = true
// > output sᵢ
// >
// > on receiving <IMPLICATE, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if decrypt fails or VSS.Verify(C, j, sⱼ) == false:
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
// >
// > on receiving <RECOVER, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if VSS.Verify(C, j, sⱼ): T = T ∪ {sⱼ}
// >
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
//
// On the adaptations and sources:
//
// > More details and references to the papers are bellow:
// >
// > Here the references for the Asynchronous Secret-Sharing that I was referring to.
// > It is purely based on (Feldman) Verifiable Secret Sharing and does not rely on any PVSS schemes
// > requiring fancy NIZKP (and thus trades network-complexity vs computational-complexity):
// >
// > * [1], Section IV. A. we use the ACSS scheme from [2] but replace its Pedersen
// > commitment with a Feldman polynomial commitment to achieve Homomorphic-Partial-Commitment.
// >
// > * In [2], Section 5.3. they explain the Pedersen-based hbACSS0 and give some proof sketch.
// > The complete description and analysis of hbACSS0 can be found in [3]. However, as mentioned
// > before they use Kate-commitments instead of Feldman/Pedersen. This has better message
// > complexity especially when multiple secrets are shared at the same time, but in our case
// > that would need to be replaced with Feldman making it much simpler and not losing any security.
// > Actually, [3] is just a pre-print, the official published version is [4], but [4] also contains
// > other, non-relevant, variants like hbACSS1 and hbACSS2 and much more analysis.
// > So, I found [3] a bit more helpful, although it is just the preliminary version.
// > They also provide their reference implementation in [5], which is also what the
// > authors of [1] used for their practical DKG results.
// >
// > [1] Practical Asynchronous Distributed Key Generation https://eprint.iacr.org/2021/1591
// > [2] Asynchronous Data Dissemination and its Applications https://eprint.iacr.org/2021/777
// > [3] Brief Note: Asynchronous Verifiable Secret Sharing with Optimal Resilience and Linear Amortized Overhead https://arxiv.org/pdf/1902.06095.pdf
// > [4] hbACSS: How to Robustly Share Many Secrets https://eprint.iacr.org/2021/159
// > [5] https://github.com/tyurek/hbACSS
//
// A PoC implementation: <https://github.com/Wollac/async.go>.
//
// The Crypto part shown the pseudo-code above is replaced in the implementation with the
// scheme allowing to keep the private keys secret. The scheme implementation is taken
// from the PoC mentioned above. It is described in <https://hackmd.io/@CcRtfCBnRbW82-AdbFJUig/S1qcPiUN5>.
package acss
import (
"errors"
"fmt"
"math"
"go.dedis.ch/kyber/v3"
"go.dedis.ch/kyber/v3/share"
"go.dedis.ch/kyber/v3/suites"
"github.com/iotaledger/hive.go/logger"
"github.com/iotaledger/wasp/packages/gpa"
"github.com/iotaledger/wasp/packages/gpa/acss/crypto"
rbc "github.com/iotaledger/wasp/packages/gpa/rbc/bracha"
"github.com/iotaledger/wasp/packages/util/rwutil"
)
const (
subsystemRBC byte = iota
)
type Output struct {
PriShare *share.PriShare // Private share, received by this instance.
Commits []kyber.Point // Feldman's commitment to the shared polynomial.
}
type acssImpl struct {
suite suites.Suite
n int
f int
me gpa.NodeID
mySK kyber.Scalar
myPK kyber.Point
myIdx int
dealer gpa.NodeID // A node that is recognized as a dealer.
dealCB func(int, []byte) []byte // Callback to be called on the encrypted deals (for tests actually).
peerPKs map[gpa.NodeID]kyber.Point // Peer public keys.
peerIdx []gpa.NodeID // Particular order of the nodes (position in the polynomial).
rbc gpa.GPA // RBC to share `C||E`.
rbcOut *crypto.Deal // Deal broadcasted by the dealer.
voteOKRecv map[gpa.NodeID]bool // A set of received OK votes.
voteREADYRecv map[gpa.NodeID]bool // A set of received READY votes.
voteREADYSent bool // Have we sent our READY vote?
pendingIRMsgs []*msgImplicateRecover // I/R messages are buffered, if the RBC is not completed yet.
implicateRecv map[gpa.NodeID]bool // To check, that implicate only received once from a node.
recoverRecv map[gpa.NodeID]*share.PriShare // Private shares from the RECOVER messages.
outS *share.PriShare // Our share of the secret (decrypted from rbcOutE).
output bool
msgWrapper *gpa.MsgWrapper
log *logger.Logger
}
var _ gpa.GPA = &acssImpl{}
func New(
suite suites.Suite, // Ed25519
peers []gpa.NodeID, // Participating nodes in a specific order.
peerPKs map[gpa.NodeID]kyber.Point, // Public keys for all the peers.
f int, // Max number of expected faulty nodes.
me gpa.NodeID, // ID of this node.
mySK kyber.Scalar, // Secret Key of this node.
dealer gpa.NodeID, // The dealer node for this protocol instance.
dealCB func(int, []byte) []byte, // For tests only: interceptor for the deal to be shared.
log *logger.Logger, // A logger to use.
) gpa.GPA {
n := len(peers)
if dealCB == nil {
dealCB = func(i int, b []byte) []byte { return b }
}
a := acssImpl{
suite: suite,
n: n,
f: f,
me: me,
mySK: mySK,
myPK: peerPKs[me],
myIdx: -1, // Updated bellow.
dealer: dealer,
dealCB: dealCB,
peerPKs: peerPKs,
peerIdx: peers,
rbc: rbc.New(peers, f, me, dealer, math.MaxInt, func(b []byte) bool { return true }), // TODO: Provide meaningful maxMsgSize
rbcOut: nil, // Will be set on output from the RBC.
voteOKRecv: map[gpa.NodeID]bool{},
voteREADYRecv: map[gpa.NodeID]bool{},
voteREADYSent: false,
pendingIRMsgs: []*msgImplicateRecover{},
implicateRecv: map[gpa.NodeID]bool{},
recoverRecv: map[gpa.NodeID]*share.PriShare{},
outS: nil,
output: false,
log: log,
}
a.msgWrapper = gpa.NewMsgWrapper(msgTypeWrapped, func(subsystem byte, index int) (gpa.GPA, error) {
if subsystem == subsystemRBC {
if index != 0 {
return nil, fmt.Errorf("unknown rbc index: %v", index)
}
return a.rbc, nil
}
return nil, fmt.Errorf("unknown subsystem: %v", subsystem)
})
if a.myIdx = a.peerIndex(me); a.myIdx == -1 {
panic("i'm not in the peer list")
}
return gpa.NewOwnHandler(me, &a)
}
// Input for the algorithm is the secret to share.
// It can be provided by the dealer only.
func (a *acssImpl) Input(input gpa.Input) gpa.OutMessages {
if a.me != a.dealer {
panic(errors.New("only dealer can initiate the sharing"))
}
if input == nil {
panic(errors.New("we expect kyber.Scalar as input"))
}
return a.handleInput(input.(kyber.Scalar))
}
// Receive all the messages and route them to the appropriate handlers.
func (a *acssImpl) Message(msg gpa.Message) gpa.OutMessages {
switch m := msg.(type) {
case *gpa.WrappingMsg:
switch m.Subsystem() {
case subsystemRBC:
return a.handleRBCMessage(m)
default:
panic(fmt.Errorf("unexpected wrapped message: %+v", m))
}
case *msgVote:
switch m.kind {
case msgVoteOK:
return a.handleVoteOK(m)
case msgVoteREADY:
return a.handleVoteREADY(m)
default:
panic(fmt.Errorf("unexpected vote message: %+v", m))
}
case *msgImplicateRecover:
return a.handleImplicateRecoverReceived(m)
default:
panic(fmt.Errorf("unexpected message: %+v", msg))
}
}
// > // dealer with input s
// > sample random polynomial ϕ such that ϕ(0) = s
// > C, S := VSS.Share(ϕ, f+1, n)
// > E := [PKI.Enc(S[i], pkᵢ) for each party i]
// >
// > // party i (including the dealer)
// > RBC(C||E)
func (a *acssImpl) handleInput(secretToShare kyber.Scalar) gpa.OutMessages {
pubKeys := make([]kyber.Point, 0)
for _, peerID := range a.peerIdx {
pubKeys = append(pubKeys, a.peerPKs[peerID])
}
deal := crypto.NewDeal(a.suite, pubKeys, secretToShare)
data, err := deal.MarshalBinary()
if err != nil {
panic(fmt.Sprintf("acss: internal error: %v", err))
}
// > RBC(C||E)
rbcCEPayloadBytes := rwutil.WriteToBytes(&msgRBCCEPayload{suite: a.suite, data: data})
msgs := a.msgWrapper.WrapMessages(subsystemRBC, 0, a.rbc.Input(rbcCEPayloadBytes))
return a.tryHandleRBCTermination(false, msgs)
}
// Delegate received messages to the RBC and handle its output.
//
// > // party i (including the dealer)
// > RBC(C||E)
func (a *acssImpl) handleRBCMessage(m *gpa.WrappingMsg) gpa.OutMessages {
wasOut := a.rbc.Output() != nil // To send the msgRBCCEOutput message once (for perf reasons).
msgs := a.msgWrapper.WrapMessages(subsystemRBC, 0, a.rbc.Message(m.Wrapped()))
return a.tryHandleRBCTermination(wasOut, msgs)
}
func (a *acssImpl) tryHandleRBCTermination(wasOut bool, msgs gpa.OutMessages) gpa.OutMessages {
if out := a.rbc.Output(); !wasOut && out != nil {
// Send the result for self as a message (maybe the code will look nicer this way).
outParsed, err := rwutil.ReadFromBytes(out.([]byte), &msgRBCCEPayload{suite: a.suite})
if err != nil {
panic(fmt.Errorf("cannot unmarshal msgRBCCEPayload: %w", err))
}
msgs.AddAll(a.handleRBCOutput(outParsed))
}
return msgs
}
// Upon receiving the RBC output...
//
// > sᵢ := PKI.Dec(eᵢ, skᵢ)
// > if decrypt fails or VSS.Verify(C, i, sᵢ) == false:
// > send <IMPLICATE, i, skᵢ> to all parties
// > else:
// > send <OK>
func (a *acssImpl) handleRBCOutput(rbcOutput *msgRBCCEPayload) gpa.OutMessages {
if a.outS != nil || a.rbcOut != nil {
// Take the first RBC output only.
return nil
}
//
// Store the broadcast result and process pending IMPLICATE/RECOVER messages, if any.
deal, err := crypto.DealUnmarshalBinary(a.suite, a.n, rbcOutput.data)
if err != nil {
panic(errors.New("cannot unmarshal msgRBCCEPayload.data"))
}
a.rbcOut = deal
msgs := a.handleImplicateRecoverPending(gpa.NoMessages())
//
// Process the RBC output, as described above.
secret := crypto.Secret(a.suite, a.rbcOut.PubKey, a.mySK)
myShare, err := crypto.DecryptShare(a.suite, a.rbcOut, a.myIdx, secret)
if err != nil {
return a.broadcastImplicate(err, msgs)
}
a.outS = myShare
a.tryOutput() // Maybe the READY messages are already received.
return a.handleImplicateRecoverPending(a.broadcastVote(msgVoteOK, msgs))
}
// > on receiving <OK> from n-f parties:
// > send <READY> to all parties
func (a *acssImpl) handleVoteOK(msg *msgVote) gpa.OutMessages {
a.voteOKRecv[msg.Sender()] = true
count := len(a.voteOKRecv) | > to all parties
// >
// > on receiving <READY> from n-f parties:
// > if sᵢ is valid:
// > out = true
// > output sᵢ
func (a *acssImpl) handleVoteREADY(msg *msgVote) gpa.OutMessages {
a.voteREADYRecv[msg.Sender()] = true
count := len(a.voteREADYRecv)
msgs := gpa.NoMessages()
if !a.voteREADYSent && count >= (a.f+1) {
msgs = a.broadcastVote(msgVoteREADY, msgs)
a.voteREADYSent = true
}
a.tryOutput()
return a.handleImplicateRecoverPending(msgs)
}
// It is possible that we are receiving IMPLICATE/RECOVER messages before our RBC is completed.
// We store these messages for processing after that, if RBC is not done and process it otherwise.
func (a *acssImpl) handleImplicateRecoverReceived(msg *msgImplicateRecover) gpa.OutMessages {
if a.rbcOut == nil {
a.pendingIRMsgs = append(a.pendingIRMsgs, msg)
return nil
}
switch msg.kind {
case msgImplicateRecoverKindIMPLICATE:
return a.handleImplicate(msg)
case msgImplicateRecoverKindRECOVER:
return a.handleRecover(msg)
default:
panic(fmt.Errorf("handleImplicateRecoverReceived: unexpected msgImplicateRecover.kind=%v, message: %+v", msg.kind, msg))
}
}
func (a *acssImpl) handleImplicateRecoverPending(msgs gpa.OutMessages) gpa.OutMessages {
//
// Only process the IMPLICATE/RECOVER messages, if this node has RBC completed.
if a.rbcOut == nil {
return msgs
}
postponedIRMsgs := []*msgImplicateRecover{}
for _, m := range a.pendingIRMsgs {
switch m.kind {
case msgImplicateRecoverKindIMPLICATE:
// Only handle the IMPLICATE messages when output is already produced to implement the following:
//
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
//
if a.output {
msgs.AddAll(a.handleImplicate(m))
} else {
postponedIRMsgs = append(postponedIRMsgs, m)
}
case msgImplicateRecoverKindRECOVER:
msgs.AddAll(a.handleRecover(m))
default:
panic(fmt.Errorf("handleImplicateRecoverReceived: unexpected msgImplicateRecover.kind=%v, message: %+v", m.kind, m))
}
}
a.pendingIRMsgs = postponedIRMsgs
return msgs
}
// Here the RBC is assumed to be completed already, OUT is set and the private key is checked.
//
// > on receiving <IMPLICATE, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if decrypt fails or VSS.Verify(C, j, sⱼ) == false:
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
//
// NOTE: We assume `if out == true:` stands for a wait for such condition.
func (a *acssImpl) handleImplicate(msg *msgImplicateRecover) gpa.OutMessages {
peerIndex := a.peerIndex(msg.sender)
if peerIndex == -1 {
a.log.Warnf("implicate received from unknown peer: %v", msg.sender)
return nil
}
//
// Check message duplicates.
if _, ok := a.implicateRecv[msg.sender]; ok {
// Received the implicate before, just ignore it.
return nil
}
a.implicateRecv[msg.sender] = true
//
// Check implicate.
secret, err := crypto.CheckImplicate(a.suite, a.rbcOut.PubKey, a.peerPKs[msg.sender], msg.data)
if err != nil {
a.log.Warnf("Invalid implication received: %v", err)
return nil
}
_, err = crypto.DecryptShare(a.suite, a.rbcOut, peerIndex, secret)
if err == nil {
// if we are able to decrypt the share, the implication is not correct
a.log.Warn("encrypted share is valid")
return nil
}
//
// Create the reveal message.
return a.broadcastRecover(gpa.NoMessages())
}
// Here the RBC is assumed to be completed already and the private key is checked.
//
// > on receiving <RECOVER, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if VSS.Verify(C, j, sⱼ): T = T ∪ {sⱼ}
// >
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
func (a *acssImpl) handleRecover(msg *msgImplicateRecover) gpa.OutMessages {
if a.output {
// Ignore the RECOVER messages, if we are done with the output.
return nil
}
peerIndex := a.peerIndex(msg.sender)
if peerIndex == -1 {
a.log.Warnf("Recover received from unexpected sender: %v", msg.sender)
return nil
}
if _, ok := a.recoverRecv[msg.sender]; ok {
a.log.Warnf("Recover was already received from %v", msg.sender)
return nil
}
peerSecret, err := crypto.DecryptShare(a.suite, a.rbcOut, peerIndex, msg.data)
if err != nil {
a.log.Warn("invalid secret revealed")
return nil
}
a.recoverRecv[msg.sender] = peerSecret
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
if len(a.recoverRecv) >= a.f+1 {
priShares := []*share.PriShare{}
for i := range a.recoverRecv {
priShares = append(priShares, a.recoverRecv[i])
}
myPriShare, err := crypto.InterpolateShare(a.suite, priShares, a.n, a.myIdx)
if err != nil {
a.log.Warnf("Failed to recover pri-poly: %v", err)
}
a.outS = myPriShare
a.output = true
return nil
}
return nil
}
func (a *acssImpl) broadcastVote(voteKind msgVoteKind, msgs gpa.OutMessages) gpa.OutMessages {
for i := range a.peerIdx {
msg := &msgVote{
BasicMessage: gpa.NewBasicMessage(a.peerIdx[i]),
kind: voteKind,
}
msg.SetSender(a.me)
msgs.Add(msg)
}
return msgs
}
func (a *acssImpl) broadcastImplicate(reason error, msgs gpa.OutMessages) gpa.OutMessages {
a.log.Warnf("Sending implicate because of: %v", reason)
implicate := crypto.Implicate(a.suite, a.rbcOut.PubKey, a.mySK)
return a.broadcastImplicateRecover(msgImplicateRecoverKindIMPLICATE, implicate, msgs)
}
func (a *acssImpl) broadcastRecover(msgs gpa.OutMessages) gpa.OutMessages {
secret := crypto.Secret(a.suite, a.rbcOut.PubKey, a.mySK)
return a.broadcastImplicateRecover(msgImplicateRecoverKindRECOVER, secret, msgs)
}
func (a *acssImpl) broadcastImplicateRecover(kind msgImplicateKind, data []byte, msgs gpa.OutMessages) gpa.OutMessages {
for i := range a.peerIdx {
msgs.Add(&msgImplicateRecover{kind: kind, recipient: a.peerIdx[i], i: a.myIdx, data: data})
}
return msgs
}
func (a *acssImpl) tryOutput() {
count := len(a.voteREADYRecv)
if count >= (a.n-a.f) && a.outS != nil {
a.output = true
}
}
func (a *acssImpl) peerIndex(peer gpa.NodeID) int {
for i := range a.peerIdx {
if a.peerIdx[i] == peer {
return i
}
}
return -1
}
func (a *acssImpl) Output() gpa.Output {
if a.output {
return &Output{
PriShare: a.outS,
Commits: a.rbcOut.Commits,
}
}
return nil
}
func (a *acssImpl) StatusString() string {
return fmt.Sprintf("{ACSS, output=%v, rbc=%v}", a.output, a.rbc.StatusString())
}
|
if !a.voteREADYSent && count >= (a.n-a.f) {
a.voteREADYSent = true
return a.broadcastVote(msgVoteREADY, gpa.NoMessages())
}
return nil
}
// > on receiving <READY> from f+1 parties:
// > send <READY | identifier_body |
acss.go | // Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
// package acss implements "Asynchronous Complete Secret Sharing" as described in
//
// https://iotaledger.github.io/crypto-tss/talks/async-dkg/slides-async-dkg.html#/5/6
//
// Here is a copy of the pseudo code from the slide mentioned above (just in case):
//
// > // dealer with input s
// > sample random polynomial ϕ such that ϕ(0) = s
// > C, S := VSS.Share(ϕ, f+1, n)
// > E := [PKI.Enc(S[i], pkᵢ) for each party i]
// >
// > // party i (including the dealer)
// > RBC(C||E)
// > sᵢ := PKI.Dec(eᵢ, skᵢ)
// > if decrypt fails or VSS.Verify(C, i, sᵢ) == false:
// > send <IMPLICATE, i, skᵢ> to all parties
// > else:
// > send <OK>
// >
// > on receiving <OK> from n-f parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from f+1 parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from n-f parties:
// > if sᵢ is valid:
// > out = true
// > output sᵢ
// >
// > on receiving <IMPLICATE, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if decrypt fails or VSS.Verify(C, j, sⱼ) == false:
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
// >
// > on receiving <RECOVER, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if VSS.Verify(C, j, sⱼ): T = T ∪ {sⱼ}
// >
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
//
// On the adaptations and sources:
//
// > More details and references to the papers are bellow:
// >
// > Here the references for the Asynchronous Secret-Sharing that I was referring to.
// > It is purely based on (Feldman) Verifiable Secret Sharing and does not rely on any PVSS schemes
// > requiring fancy NIZKP (and thus trades network-complexity vs computational-complexity):
// >
// > * [1], Section IV. A. we use the ACSS scheme from [2] but replace its Pedersen
// > commitment with a Feldman polynomial commitment to achieve Homomorphic-Partial-Commitment.
// >
// > * In [2], Section 5.3. they explain the Pedersen-based hbACSS0 and give some proof sketch.
// > The complete description and analysis of hbACSS0 can be found in [3]. However, as mentioned
// > before they use Kate-commitments instead of Feldman/Pedersen. This has better message
// > complexity especially when multiple secrets are shared at the same time, but in our case
// > that would need to be replaced with Feldman making it much simpler and not losing any security.
// > Actually, [3] is just a pre-print, the official published version is [4], but [4] also contains
// > other, non-relevant, variants like hbACSS1 and hbACSS2 and much more analysis.
// > So, I found [3] a bit more helpful, although it is just the preliminary version.
// > They also provide their reference implementation in [5], which is also what the
// > authors of [1] used for their practical DKG results.
// >
// > [1] Practical Asynchronous Distributed Key Generation https://eprint.iacr.org/2021/1591
// > [2] Asynchronous Data Dissemination and its Applications https://eprint.iacr.org/2021/777
// > [3] Brief Note: Asynchronous Verifiable Secret Sharing with Optimal Resilience and Linear Amortized Overhead https://arxiv.org/pdf/1902.06095.pdf
// > [4] hbACSS: How to Robustly Share Many Secrets https://eprint.iacr.org/2021/159
// > [5] https://github.com/tyurek/hbACSS
//
// A PoC implementation: <https://github.com/Wollac/async.go>.
//
// The Crypto part shown the pseudo-code above is replaced in the implementation with the
// scheme allowing to keep the private keys secret. The scheme implementation is taken
// from the PoC mentioned above. It is described in <https://hackmd.io/@CcRtfCBnRbW82-AdbFJUig/S1qcPiUN5>.
package acss
import (
"errors"
"fmt"
"math"
"go.dedis.ch/kyber/v3"
"go.dedis.ch/kyber/v3/share"
"go.dedis.ch/kyber/v3/suites"
"github.com/iotaledger/hive.go/logger"
"github.com/iotaledger/wasp/packages/gpa"
"github.com/iotaledger/wasp/packages/gpa/acss/crypto"
rbc "github.com/iotaledger/wasp/packages/gpa/rbc/bracha"
"github.com/iotaledger/wasp/packages/util/rwutil"
)
const (
subsystemRBC byte = iota
)
type Output struct {
PriShare *share.PriShare // Private share, received by this instance.
Commits []kyber.Point // Feldman's commitment to the shared polynomial.
}
type acssImpl struct {
suite suites.Suite
n int
f int
me gpa.NodeID
mySK kyber.Scalar
myPK kyber.Point
myIdx int
dealer gpa.NodeID // A node that is recognized as a dealer.
dealCB func(int, []byte) []byte // Callback to be called on the encrypted deals (for tests actually).
peerPKs map[gpa.NodeID]kyber.Point // Peer public keys.
peerIdx []gpa.NodeID // Particular order of the nodes (position in the polynomial).
rbc gpa.GPA // RBC to share `C||E`.
rbcOut *crypto.Deal // Deal broadcasted by the dealer.
voteOKRecv map[gpa.NodeID]bool // A set of received OK votes.
voteREADYRecv map[gpa.NodeID]bool // A set of received READY votes.
voteREADYSent bool // Have we sent our READY vote?
pendingIRMsgs []*msgImplicateRecover // I/R messages are buffered, if the RBC is not completed yet.
implicateRecv map[gpa.NodeID]bool // To check, that implicate only received once from a node.
recoverRecv map[gpa.NodeID]*share.PriShare // Private shares from the RECOVER messages.
outS *share.PriShare // Our share of the secret (decrypted from rbcOutE).
output bool
msgWrapper *gpa.MsgWrapper
log *logger.Logger
}
var _ gpa.GPA = &acssImpl{}
func New(
suite suites.Suite, // Ed25519
peers []gpa.NodeID, // Participating nodes in a specific order.
peerPKs map[gpa.NodeID]kyber.Point, // Public keys for all the peers.
f int, // Max number of expected faulty nodes.
me gpa.NodeID, // ID of this node.
mySK kyber.Scalar, // Secret Key of this node.
dealer gpa.NodeID, // The dealer node for this protocol instance.
dealCB func(int, []byte) []byte, // For tests only: interceptor for the deal to be shared.
log *logger.Logger, // A logger to use.
) gpa.GPA {
n := len(peers)
if dealCB == nil {
dealCB = func(i int, b []byte) []byte { return b }
}
a := acssImpl{
suite: suite,
n: n,
f: f,
me: me,
mySK: mySK,
myPK: peerPKs[me],
myIdx: -1, // Updated bellow.
dealer: dealer,
dealCB: dealCB,
peerPKs: peerPKs,
peerIdx: peers,
rbc: rbc.New(peers, f, me, dealer, math.MaxInt, func(b []byte) bool { return true }), // TODO: Provide meaningful maxMsgSize
rbcOut: nil, // Will be set on output from the RBC.
voteOKRecv: map[gpa.NodeID]bool{},
voteREADYRecv: map[gpa.NodeID]bool{},
voteREADYSent: false,
pendingIRMsgs: []*msgImplicateRecover{},
implicateRecv: map[gpa.NodeID]bool{},
recoverRecv: map[gpa.NodeID]*share.PriShare{},
outS: nil,
output: false,
log: log,
}
a.msgWrapper = gpa.NewMsgWrapper(msgTypeWrapped, func(subsystem byte, index int) (gpa.GPA, error) {
if subsystem == subsystemRBC {
if index != 0 {
return nil, fmt.Errorf("unknown rbc index: %v", index)
}
return a.rbc, nil
}
return nil, fmt.Errorf("unknown subsystem: %v", subsystem)
})
if a.myIdx = a.peerIndex(me); a.myIdx == -1 {
panic("i'm not in the peer list")
}
return | r the algorithm is the secret to share.
// It can be provided by the dealer only.
func (a *acssImpl) Input(input gpa.Input) gpa.OutMessages {
if a.me != a.dealer {
panic(errors.New("only dealer can initiate the sharing"))
}
if input == nil {
panic(errors.New("we expect kyber.Scalar as input"))
}
return a.handleInput(input.(kyber.Scalar))
}
// Receive all the messages and route them to the appropriate handlers.
func (a *acssImpl) Message(msg gpa.Message) gpa.OutMessages {
switch m := msg.(type) {
case *gpa.WrappingMsg:
switch m.Subsystem() {
case subsystemRBC:
return a.handleRBCMessage(m)
default:
panic(fmt.Errorf("unexpected wrapped message: %+v", m))
}
case *msgVote:
switch m.kind {
case msgVoteOK:
return a.handleVoteOK(m)
case msgVoteREADY:
return a.handleVoteREADY(m)
default:
panic(fmt.Errorf("unexpected vote message: %+v", m))
}
case *msgImplicateRecover:
return a.handleImplicateRecoverReceived(m)
default:
panic(fmt.Errorf("unexpected message: %+v", msg))
}
}
// > // dealer with input s
// > sample random polynomial ϕ such that ϕ(0) = s
// > C, S := VSS.Share(ϕ, f+1, n)
// > E := [PKI.Enc(S[i], pkᵢ) for each party i]
// >
// > // party i (including the dealer)
// > RBC(C||E)
func (a *acssImpl) handleInput(secretToShare kyber.Scalar) gpa.OutMessages {
pubKeys := make([]kyber.Point, 0)
for _, peerID := range a.peerIdx {
pubKeys = append(pubKeys, a.peerPKs[peerID])
}
deal := crypto.NewDeal(a.suite, pubKeys, secretToShare)
data, err := deal.MarshalBinary()
if err != nil {
panic(fmt.Sprintf("acss: internal error: %v", err))
}
// > RBC(C||E)
rbcCEPayloadBytes := rwutil.WriteToBytes(&msgRBCCEPayload{suite: a.suite, data: data})
msgs := a.msgWrapper.WrapMessages(subsystemRBC, 0, a.rbc.Input(rbcCEPayloadBytes))
return a.tryHandleRBCTermination(false, msgs)
}
// Delegate received messages to the RBC and handle its output.
//
// > // party i (including the dealer)
// > RBC(C||E)
func (a *acssImpl) handleRBCMessage(m *gpa.WrappingMsg) gpa.OutMessages {
wasOut := a.rbc.Output() != nil // To send the msgRBCCEOutput message once (for perf reasons).
msgs := a.msgWrapper.WrapMessages(subsystemRBC, 0, a.rbc.Message(m.Wrapped()))
return a.tryHandleRBCTermination(wasOut, msgs)
}
func (a *acssImpl) tryHandleRBCTermination(wasOut bool, msgs gpa.OutMessages) gpa.OutMessages {
if out := a.rbc.Output(); !wasOut && out != nil {
// Send the result for self as a message (maybe the code will look nicer this way).
outParsed, err := rwutil.ReadFromBytes(out.([]byte), &msgRBCCEPayload{suite: a.suite})
if err != nil {
panic(fmt.Errorf("cannot unmarshal msgRBCCEPayload: %w", err))
}
msgs.AddAll(a.handleRBCOutput(outParsed))
}
return msgs
}
// Upon receiving the RBC output...
//
// > sᵢ := PKI.Dec(eᵢ, skᵢ)
// > if decrypt fails or VSS.Verify(C, i, sᵢ) == false:
// > send <IMPLICATE, i, skᵢ> to all parties
// > else:
// > send <OK>
func (a *acssImpl) handleRBCOutput(rbcOutput *msgRBCCEPayload) gpa.OutMessages {
if a.outS != nil || a.rbcOut != nil {
// Take the first RBC output only.
return nil
}
//
// Store the broadcast result and process pending IMPLICATE/RECOVER messages, if any.
deal, err := crypto.DealUnmarshalBinary(a.suite, a.n, rbcOutput.data)
if err != nil {
panic(errors.New("cannot unmarshal msgRBCCEPayload.data"))
}
a.rbcOut = deal
msgs := a.handleImplicateRecoverPending(gpa.NoMessages())
//
// Process the RBC output, as described above.
secret := crypto.Secret(a.suite, a.rbcOut.PubKey, a.mySK)
myShare, err := crypto.DecryptShare(a.suite, a.rbcOut, a.myIdx, secret)
if err != nil {
return a.broadcastImplicate(err, msgs)
}
a.outS = myShare
a.tryOutput() // Maybe the READY messages are already received.
return a.handleImplicateRecoverPending(a.broadcastVote(msgVoteOK, msgs))
}
// > on receiving <OK> from n-f parties:
// > send <READY> to all parties
func (a *acssImpl) handleVoteOK(msg *msgVote) gpa.OutMessages {
a.voteOKRecv[msg.Sender()] = true
count := len(a.voteOKRecv)
if !a.voteREADYSent && count >= (a.n-a.f) {
a.voteREADYSent = true
return a.broadcastVote(msgVoteREADY, gpa.NoMessages())
}
return nil
}
// > on receiving <READY> from f+1 parties:
// > send <READY> to all parties
// >
// > on receiving <READY> from n-f parties:
// > if sᵢ is valid:
// > out = true
// > output sᵢ
func (a *acssImpl) handleVoteREADY(msg *msgVote) gpa.OutMessages {
a.voteREADYRecv[msg.Sender()] = true
count := len(a.voteREADYRecv)
msgs := gpa.NoMessages()
if !a.voteREADYSent && count >= (a.f+1) {
msgs = a.broadcastVote(msgVoteREADY, msgs)
a.voteREADYSent = true
}
a.tryOutput()
return a.handleImplicateRecoverPending(msgs)
}
// It is possible that we are receiving IMPLICATE/RECOVER messages before our RBC is completed.
// We store these messages for processing after that, if RBC is not done and process it otherwise.
func (a *acssImpl) handleImplicateRecoverReceived(msg *msgImplicateRecover) gpa.OutMessages {
if a.rbcOut == nil {
a.pendingIRMsgs = append(a.pendingIRMsgs, msg)
return nil
}
switch msg.kind {
case msgImplicateRecoverKindIMPLICATE:
return a.handleImplicate(msg)
case msgImplicateRecoverKindRECOVER:
return a.handleRecover(msg)
default:
panic(fmt.Errorf("handleImplicateRecoverReceived: unexpected msgImplicateRecover.kind=%v, message: %+v", msg.kind, msg))
}
}
func (a *acssImpl) handleImplicateRecoverPending(msgs gpa.OutMessages) gpa.OutMessages {
//
// Only process the IMPLICATE/RECOVER messages, if this node has RBC completed.
if a.rbcOut == nil {
return msgs
}
postponedIRMsgs := []*msgImplicateRecover{}
for _, m := range a.pendingIRMsgs {
switch m.kind {
case msgImplicateRecoverKindIMPLICATE:
// Only handle the IMPLICATE messages when output is already produced to implement the following:
//
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
//
if a.output {
msgs.AddAll(a.handleImplicate(m))
} else {
postponedIRMsgs = append(postponedIRMsgs, m)
}
case msgImplicateRecoverKindRECOVER:
msgs.AddAll(a.handleRecover(m))
default:
panic(fmt.Errorf("handleImplicateRecoverReceived: unexpected msgImplicateRecover.kind=%v, message: %+v", m.kind, m))
}
}
a.pendingIRMsgs = postponedIRMsgs
return msgs
}
// Here the RBC is assumed to be completed already, OUT is set and the private key is checked.
//
// > on receiving <IMPLICATE, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if decrypt fails or VSS.Verify(C, j, sⱼ) == false:
// > if out == true:
// > send <RECOVER, i, skᵢ> to all parties
// > return
//
// NOTE: We assume `if out == true:` stands for a wait for such condition.
func (a *acssImpl) handleImplicate(msg *msgImplicateRecover) gpa.OutMessages {
peerIndex := a.peerIndex(msg.sender)
if peerIndex == -1 {
a.log.Warnf("implicate received from unknown peer: %v", msg.sender)
return nil
}
//
// Check message duplicates.
if _, ok := a.implicateRecv[msg.sender]; ok {
// Received the implicate before, just ignore it.
return nil
}
a.implicateRecv[msg.sender] = true
//
// Check implicate.
secret, err := crypto.CheckImplicate(a.suite, a.rbcOut.PubKey, a.peerPKs[msg.sender], msg.data)
if err != nil {
a.log.Warnf("Invalid implication received: %v", err)
return nil
}
_, err = crypto.DecryptShare(a.suite, a.rbcOut, peerIndex, secret)
if err == nil {
// if we are able to decrypt the share, the implication is not correct
a.log.Warn("encrypted share is valid")
return nil
}
//
// Create the reveal message.
return a.broadcastRecover(gpa.NoMessages())
}
// Here the RBC is assumed to be completed already and the private key is checked.
//
// > on receiving <RECOVER, j, skⱼ>:
// > sⱼ := PKI.Dec(eⱼ, skⱼ)
// > if VSS.Verify(C, j, sⱼ): T = T ∪ {sⱼ}
// >
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
func (a *acssImpl) handleRecover(msg *msgImplicateRecover) gpa.OutMessages {
if a.output {
// Ignore the RECOVER messages, if we are done with the output.
return nil
}
peerIndex := a.peerIndex(msg.sender)
if peerIndex == -1 {
a.log.Warnf("Recover received from unexpected sender: %v", msg.sender)
return nil
}
if _, ok := a.recoverRecv[msg.sender]; ok {
a.log.Warnf("Recover was already received from %v", msg.sender)
return nil
}
peerSecret, err := crypto.DecryptShare(a.suite, a.rbcOut, peerIndex, msg.data)
if err != nil {
a.log.Warn("invalid secret revealed")
return nil
}
a.recoverRecv[msg.sender] = peerSecret
// > wait until len(T) >= f+1:
// > sᵢ = SSS.Recover(T, f+1, n)(i)
// > out = true
// > output sᵢ
if len(a.recoverRecv) >= a.f+1 {
priShares := []*share.PriShare{}
for i := range a.recoverRecv {
priShares = append(priShares, a.recoverRecv[i])
}
myPriShare, err := crypto.InterpolateShare(a.suite, priShares, a.n, a.myIdx)
if err != nil {
a.log.Warnf("Failed to recover pri-poly: %v", err)
}
a.outS = myPriShare
a.output = true
return nil
}
return nil
}
func (a *acssImpl) broadcastVote(voteKind msgVoteKind, msgs gpa.OutMessages) gpa.OutMessages {
for i := range a.peerIdx {
msg := &msgVote{
BasicMessage: gpa.NewBasicMessage(a.peerIdx[i]),
kind: voteKind,
}
msg.SetSender(a.me)
msgs.Add(msg)
}
return msgs
}
func (a *acssImpl) broadcastImplicate(reason error, msgs gpa.OutMessages) gpa.OutMessages {
a.log.Warnf("Sending implicate because of: %v", reason)
implicate := crypto.Implicate(a.suite, a.rbcOut.PubKey, a.mySK)
return a.broadcastImplicateRecover(msgImplicateRecoverKindIMPLICATE, implicate, msgs)
}
func (a *acssImpl) broadcastRecover(msgs gpa.OutMessages) gpa.OutMessages {
secret := crypto.Secret(a.suite, a.rbcOut.PubKey, a.mySK)
return a.broadcastImplicateRecover(msgImplicateRecoverKindRECOVER, secret, msgs)
}
func (a *acssImpl) broadcastImplicateRecover(kind msgImplicateKind, data []byte, msgs gpa.OutMessages) gpa.OutMessages {
for i := range a.peerIdx {
msgs.Add(&msgImplicateRecover{kind: kind, recipient: a.peerIdx[i], i: a.myIdx, data: data})
}
return msgs
}
func (a *acssImpl) tryOutput() {
count := len(a.voteREADYRecv)
if count >= (a.n-a.f) && a.outS != nil {
a.output = true
}
}
func (a *acssImpl) peerIndex(peer gpa.NodeID) int {
for i := range a.peerIdx {
if a.peerIdx[i] == peer {
return i
}
}
return -1
}
func (a *acssImpl) Output() gpa.Output {
if a.output {
return &Output{
PriShare: a.outS,
Commits: a.rbcOut.Commits,
}
}
return nil
}
func (a *acssImpl) StatusString() string {
return fmt.Sprintf("{ACSS, output=%v, rbc=%v}", a.output, a.rbc.StatusString())
}
| gpa.NewOwnHandler(me, &a)
}
// Input fo | conditional_block |
discord.go | package dolphin
import (
"errors"
"fmt"
"regexp"
"strings"
"github.com/diamondburned/arikawa/api"
"github.com/diamondburned/arikawa/discord"
"github.com/diamondburned/arikawa/gateway"
"github.com/diamondburned/arikawa/state"
"github.com/diamondburned/arikawa/webhook"
"gitlab.com/EbonJaeger/dolphin/rcon"
)
var webhookRegex = regexp.MustCompile("https://discordapp.com/api/webhooks/(.*)/(.*)")
// NewDiscordBot creates a new DiscordBot with a MinecraftWatcher and
// connects to discord.
func NewDiscordBot() (*DiscordBot, error) {
bot := &DiscordBot{}
var discordErr error
// Create Discord session
s, discordErr := state.New("Bot " + Config.Discord.BotToken)
if discordErr != nil {
return nil, discordErr
}
bot.state = s
// Add our Discord handlers
bot.state.AddHandler(bot.onReady)
bot.state.AddHandler(bot.onGuildCreate)
bot.state.AddHandler(bot.onMessageCreate)
// Connect to Discord websocket
if discordErr = bot.state.Open(); discordErr != nil {
return nil, discordErr
}
// Get our Discord user
self, discordErr := bot.state.Me()
if discordErr != nil {
return nil, discordErr
}
// Set our data and create the Minecraft watcher
bot.id = self.ID
bot.name = self.Username
bot.avatarURL = self.AvatarURL()
if Config.Discord.ChannelID != "" {
snowflake, discordErr := discord.ParseSnowflake(Config.Discord.ChannelID)
if discordErr != nil |
bot.channel = discord.ChannelID(snowflake)
} else {
return nil, errors.New("no channel ID configured")
}
bot.watcher = NewWatcher(self.Username, *Config.Minecraft.CustomDeathKeywords)
return bot, discordErr
}
// Close cleans up the watcher and closes the Discord session.
func (bot *DiscordBot) Close() error {
var closeErr error
if err := bot.watcher.Close(); err != nil {
closeErr = err
}
if err := bot.state.Session.Close(); err != nil {
closeErr = err
}
return closeErr
}
// WaitForMessages starts the Minecraft log watcher and waits for messages
// on a messages channel.
func (bot *DiscordBot) WaitForMessages() {
// Make our messages channel
mc := make(chan *MinecraftMessage)
// Start our Minecraft watcher
go bot.watcher.Watch(mc)
for {
// Read message from the channel
msg := <-mc
Log.Debugf("Received a line from Minecraft: Username='%s', Text='%s'\n", msg.Username, msg.Message)
// Don't send messages that are disabled
switch msg.Type {
case AdvancementMessage:
{
if !Config.Discord.MessageOptions.ShowAdvancements {
continue
}
}
case DeathMessage:
{
if !Config.Discord.MessageOptions.ShowDeaths {
continue
}
}
case JoinLeaveMessage:
{
if !Config.Discord.MessageOptions.ShowJoinsLeaves {
continue
}
}
}
// Send the message to the Discord channel
bot.sendToDiscord(msg)
}
}
// onReady sets the bot's Discord status.
func (bot *DiscordBot) onReady(e *gateway.ReadyEvent) {
// Set the bot gaming status
err := bot.state.Gateway.UpdateStatus(gateway.UpdateStatusData{
Game: &discord.Activity{
Name: "Bridging the Minecraft/Discord gap",
},
})
if err != nil {
Log.Errorf("Unable to update Discord status: %s\n", err)
}
}
// onGuildCreate handles when the bot joins or connects to a Guild.
func (bot *DiscordBot) onGuildCreate(e *gateway.GuildCreateEvent) {
// Make sure the guild is available
if e.Unavailable {
Log.Warnf("Attempted to join Guild '%s', but it was unavailable\n", e.Guild.Name)
return
}
if bot.guildID.String() != "" {
Log.Warnf("Received a Guild join event for '%s', but we've already joined one\n", e.Guild.Name)
return
}
Log.Infof("Connected to guild named '%s'\n", e.Guild.Name)
bot.guildID = e.Guild.ID
}
// onMessageCreate handles messages that the bot receives, and sends them
// to Minecraft via RCON.
func (bot *DiscordBot) onMessageCreate(e *gateway.MessageCreateEvent) {
// Ignore messages from ourselves
if e.Author.ID != bot.id && e.Message.WebhookID.String() == "" {
// Check if the message is a bot command
if strings.HasPrefix(e.Message.Content, "!") {
c := make(chan bool)
go parser.Parse(e.Message, bot.state, c)
// Don't go any further if the command was found and ran
if <-c {
return
}
}
// Not a command, so ignore messages from other channels
if e.ChannelID.String() == Config.Discord.ChannelID {
Log.Debugln("Received a message from Discord")
// Get the name to use
var name string
if Config.Discord.UseMemberNicks {
name = bot.getNickname(e.Author.ID)
} else {
name = e.Author.Username
}
// Print the URL if message contains an attachement but no message content
if len(e.Message.Attachments) > 0 {
if len(e.Content) == 0 {
e.Content = e.Message.Attachments[0].URL
if err := sendToMinecraft(e.Content, name); err != nil {
Log.Errorf("Error sending command to RCON: %s\n", err)
}
return
}
}
content := formatMessage(bot.state, e.Message)
lines := strings.Split(content, "\n")
// Send a separate message for each line
for i := 0; i < len(lines); i++ {
line := lines[i]
// Split long lines into additional messages
if len(line) > 100 {
lines = append(lines, "")
copy(lines[i+2:], lines[i+1:])
lines[i+1] = line[100:]
line = line[:100]
}
if err := sendToMinecraft(line, name); err != nil {
Log.Errorf("Error sending command to RCON: %s\n", err)
}
}
}
}
}
// sendToDiscord sends a message from Minecraft to the configured
// Discord channel.
func (bot *DiscordBot) sendToDiscord(m *MinecraftMessage) {
// Insert Discord mentions if configured and present
if Config.Discord.AllowMentions {
// Insert Discord mentions
m.Message = bot.insertMentions(m.Message)
}
// Send the message to Discord either via webhook or normal channel message
if Config.Discord.Webhook.Enabled {
// Get the configured webhook
id, token := matchWebhookURL(Config.Discord.Webhook.URL)
if id == "" || token == "" {
Log.Warnln("Invalid or undefined Discord webhook URL")
return
}
// Attempt to get the webhook
snowflake, err := discord.ParseSnowflake(id)
if err != nil {
Log.Errorf("Error parsing Webhook Snowflake: %s\n", err.Error())
}
webhookID := discord.WebhookID(snowflake)
// Form our webhook params
params := bot.setWebhookParams(m)
// Send to the webhook
Log.Debugf("Sending to webhook: id='%s', token='%s'\n", id, token)
if err := webhook.Execute(webhookID, token, params); err != nil {
Log.Errorf("Error sending data to Discord webhook: %s\n", err.Error())
}
} else {
// Format the message for Discord
formatted := fmt.Sprintf("**%s**: %s", m.Username, m.Message)
// Send to the configured Discord channel
if _, err := bot.state.Client.SendMessage(bot.channel, formatted, nil); err != nil {
Log.Errorf("Error sending a message to Discord: %s\n", err.Error())
}
}
}
// getNickname gets the nickname of a Discord user in a Guild.
func (bot *DiscordBot) getNickname(id discord.UserID) string {
var m *discord.Member
// Look in the cached state for the Member
m, _ = bot.state.Member(bot.guildID, id)
// Make sure we do have a user
if m == nil {
return ""
}
if m.Nick == "" {
return m.User.Username
}
return m.Nick
}
// getUserFromName gets the Discord user from a mention or username. The username
// can be only a partial username.
func (bot *DiscordBot) getUserFromName(text string) (target *discord.User) {
// Look through all guild members in the state
members, _ := bot.state.Members(bot.guildID)
for _, u := range members {
// Check if the name matches, case-insensitive
if strings.EqualFold(u.User.Username, text) {
target = &u.User
break
}
}
return target
}
// insertMentions looks for potential Discord mentions in a Minecraft chat
// message. If there are any, we will attempt to get the user being mentioned
// to get their mention string to put into the chat message.
func (bot *DiscordBot) insertMentions(msg string) string {
// Split the message into words
words := strings.Split(msg, " ")
// Iterate over each word
for _, word := range words {
// Check if the word might be a mention
if strings.HasPrefix(word, "@") {
// Attempt to get the user
user := bot.getUserFromName(word[1:])
if user != nil {
// Replace the word with the mention
msg = strings.Replace(msg, word, user.Mention(), 1)
}
}
}
return msg
}
func matchWebhookURL(url string) (string, string) {
wm := webhookRegex.FindStringSubmatch(url)
// Make sure we have the correct number of parts (ID and token)
if len(wm) != 3 {
return "", ""
}
// Return the webhook ID and token
return wm[1], wm[2]
}
// setWebhookParams sets the avater, username, and message for a webhook request.
func (bot *DiscordBot) setWebhookParams(m *MinecraftMessage) api.ExecuteWebhookData {
// Get the avatar to use for this message
var avatarURL string
if m.Username == bot.name {
// Use the bot's avatar
avatarURL = bot.avatarURL
} else {
// Player's Minecraft head as the avatar
avatarURL = fmt.Sprintf("https://minotar.net/helm/%s/256.png", m.Username)
}
return api.ExecuteWebhookData{
Content: m.Message,
Username: m.Username,
AvatarURL: avatarURL,
}
}
func formatMessage(state *state.State, message discord.Message) string {
content := message.Content
// Replace mentions
for _, word := range strings.Split(content, " ") {
if strings.HasPrefix(word, "<#") && strings.HasSuffix(word, ">") {
// Get the ID from the mention string
id := word[2 : len(word)-1]
snowflake, _ := discord.ParseSnowflake(id)
channelID := discord.ChannelID(snowflake)
channel, err := state.Channel(channelID)
if err != nil {
Log.Warnf("Error while getting channel from Discord: %s\n", err)
continue
}
content = strings.Replace(content, fmt.Sprintf("<#%s>", id), fmt.Sprintf("#%s", channel.Name), -1)
}
}
for _, member := range message.Mentions {
content = strings.Replace(content, fmt.Sprintf("<@!%s>", member.ID), fmt.Sprintf("@%s", member.Username), -1)
}
// Escape quote characters
content = strings.Replace(content, "\"", "\\\"", -1)
return content
}
func sendToMinecraft(content, username string) error {
// Format command to send to the Minecraft server
command := fmt.Sprintf("tellraw @a %s", Config.Minecraft.TellrawTemplate)
command = strings.Replace(command, "%username%", username, -1)
command = strings.Replace(command, "%message%", content, -1)
// Create RCON connection
conn, err := rcon.Dial(Config.Minecraft.RconIP, Config.Minecraft.RconPort, Config.Minecraft.RconPassword)
if err != nil {
return err
}
defer conn.Close()
// Authenticate to RCON
if err = conn.Authenticate(); err != nil {
return err
}
// Send the command to Minecraft
if _, err := conn.SendCommand(command); err != nil {
return err
}
return nil
}
| {
return nil, discordErr
} | conditional_block |
discord.go | package dolphin
import (
"errors"
"fmt"
"regexp"
"strings"
"github.com/diamondburned/arikawa/api"
"github.com/diamondburned/arikawa/discord"
"github.com/diamondburned/arikawa/gateway"
"github.com/diamondburned/arikawa/state"
"github.com/diamondburned/arikawa/webhook"
"gitlab.com/EbonJaeger/dolphin/rcon"
)
var webhookRegex = regexp.MustCompile("https://discordapp.com/api/webhooks/(.*)/(.*)")
// NewDiscordBot creates a new DiscordBot with a MinecraftWatcher and
// connects to discord.
func NewDiscordBot() (*DiscordBot, error) {
bot := &DiscordBot{}
var discordErr error
// Create Discord session
s, discordErr := state.New("Bot " + Config.Discord.BotToken)
if discordErr != nil {
return nil, discordErr
}
bot.state = s
// Add our Discord handlers
bot.state.AddHandler(bot.onReady)
bot.state.AddHandler(bot.onGuildCreate)
bot.state.AddHandler(bot.onMessageCreate)
// Connect to Discord websocket
if discordErr = bot.state.Open(); discordErr != nil {
return nil, discordErr
}
// Get our Discord user
self, discordErr := bot.state.Me()
if discordErr != nil {
return nil, discordErr
}
// Set our data and create the Minecraft watcher
bot.id = self.ID
bot.name = self.Username
bot.avatarURL = self.AvatarURL()
if Config.Discord.ChannelID != "" {
snowflake, discordErr := discord.ParseSnowflake(Config.Discord.ChannelID)
if discordErr != nil {
return nil, discordErr
}
bot.channel = discord.ChannelID(snowflake)
} else {
return nil, errors.New("no channel ID configured")
}
bot.watcher = NewWatcher(self.Username, *Config.Minecraft.CustomDeathKeywords)
return bot, discordErr
}
// Close cleans up the watcher and closes the Discord session.
func (bot *DiscordBot) Close() error {
var closeErr error
if err := bot.watcher.Close(); err != nil {
closeErr = err
}
if err := bot.state.Session.Close(); err != nil {
closeErr = err
}
return closeErr
}
// WaitForMessages starts the Minecraft log watcher and waits for messages
// on a messages channel.
func (bot *DiscordBot) | () {
// Make our messages channel
mc := make(chan *MinecraftMessage)
// Start our Minecraft watcher
go bot.watcher.Watch(mc)
for {
// Read message from the channel
msg := <-mc
Log.Debugf("Received a line from Minecraft: Username='%s', Text='%s'\n", msg.Username, msg.Message)
// Don't send messages that are disabled
switch msg.Type {
case AdvancementMessage:
{
if !Config.Discord.MessageOptions.ShowAdvancements {
continue
}
}
case DeathMessage:
{
if !Config.Discord.MessageOptions.ShowDeaths {
continue
}
}
case JoinLeaveMessage:
{
if !Config.Discord.MessageOptions.ShowJoinsLeaves {
continue
}
}
}
// Send the message to the Discord channel
bot.sendToDiscord(msg)
}
}
// onReady sets the bot's Discord status.
func (bot *DiscordBot) onReady(e *gateway.ReadyEvent) {
// Set the bot gaming status
err := bot.state.Gateway.UpdateStatus(gateway.UpdateStatusData{
Game: &discord.Activity{
Name: "Bridging the Minecraft/Discord gap",
},
})
if err != nil {
Log.Errorf("Unable to update Discord status: %s\n", err)
}
}
// onGuildCreate handles when the bot joins or connects to a Guild.
func (bot *DiscordBot) onGuildCreate(e *gateway.GuildCreateEvent) {
// Make sure the guild is available
if e.Unavailable {
Log.Warnf("Attempted to join Guild '%s', but it was unavailable\n", e.Guild.Name)
return
}
if bot.guildID.String() != "" {
Log.Warnf("Received a Guild join event for '%s', but we've already joined one\n", e.Guild.Name)
return
}
Log.Infof("Connected to guild named '%s'\n", e.Guild.Name)
bot.guildID = e.Guild.ID
}
// onMessageCreate handles messages that the bot receives, and sends them
// to Minecraft via RCON.
func (bot *DiscordBot) onMessageCreate(e *gateway.MessageCreateEvent) {
// Ignore messages from ourselves
if e.Author.ID != bot.id && e.Message.WebhookID.String() == "" {
// Check if the message is a bot command
if strings.HasPrefix(e.Message.Content, "!") {
c := make(chan bool)
go parser.Parse(e.Message, bot.state, c)
// Don't go any further if the command was found and ran
if <-c {
return
}
}
// Not a command, so ignore messages from other channels
if e.ChannelID.String() == Config.Discord.ChannelID {
Log.Debugln("Received a message from Discord")
// Get the name to use
var name string
if Config.Discord.UseMemberNicks {
name = bot.getNickname(e.Author.ID)
} else {
name = e.Author.Username
}
// Print the URL if message contains an attachement but no message content
if len(e.Message.Attachments) > 0 {
if len(e.Content) == 0 {
e.Content = e.Message.Attachments[0].URL
if err := sendToMinecraft(e.Content, name); err != nil {
Log.Errorf("Error sending command to RCON: %s\n", err)
}
return
}
}
content := formatMessage(bot.state, e.Message)
lines := strings.Split(content, "\n")
// Send a separate message for each line
for i := 0; i < len(lines); i++ {
line := lines[i]
// Split long lines into additional messages
if len(line) > 100 {
lines = append(lines, "")
copy(lines[i+2:], lines[i+1:])
lines[i+1] = line[100:]
line = line[:100]
}
if err := sendToMinecraft(line, name); err != nil {
Log.Errorf("Error sending command to RCON: %s\n", err)
}
}
}
}
}
// sendToDiscord sends a message from Minecraft to the configured
// Discord channel.
func (bot *DiscordBot) sendToDiscord(m *MinecraftMessage) {
// Insert Discord mentions if configured and present
if Config.Discord.AllowMentions {
// Insert Discord mentions
m.Message = bot.insertMentions(m.Message)
}
// Send the message to Discord either via webhook or normal channel message
if Config.Discord.Webhook.Enabled {
// Get the configured webhook
id, token := matchWebhookURL(Config.Discord.Webhook.URL)
if id == "" || token == "" {
Log.Warnln("Invalid or undefined Discord webhook URL")
return
}
// Attempt to get the webhook
snowflake, err := discord.ParseSnowflake(id)
if err != nil {
Log.Errorf("Error parsing Webhook Snowflake: %s\n", err.Error())
}
webhookID := discord.WebhookID(snowflake)
// Form our webhook params
params := bot.setWebhookParams(m)
// Send to the webhook
Log.Debugf("Sending to webhook: id='%s', token='%s'\n", id, token)
if err := webhook.Execute(webhookID, token, params); err != nil {
Log.Errorf("Error sending data to Discord webhook: %s\n", err.Error())
}
} else {
// Format the message for Discord
formatted := fmt.Sprintf("**%s**: %s", m.Username, m.Message)
// Send to the configured Discord channel
if _, err := bot.state.Client.SendMessage(bot.channel, formatted, nil); err != nil {
Log.Errorf("Error sending a message to Discord: %s\n", err.Error())
}
}
}
// getNickname gets the nickname of a Discord user in a Guild.
func (bot *DiscordBot) getNickname(id discord.UserID) string {
var m *discord.Member
// Look in the cached state for the Member
m, _ = bot.state.Member(bot.guildID, id)
// Make sure we do have a user
if m == nil {
return ""
}
if m.Nick == "" {
return m.User.Username
}
return m.Nick
}
// getUserFromName gets the Discord user from a mention or username. The username
// can be only a partial username.
func (bot *DiscordBot) getUserFromName(text string) (target *discord.User) {
// Look through all guild members in the state
members, _ := bot.state.Members(bot.guildID)
for _, u := range members {
// Check if the name matches, case-insensitive
if strings.EqualFold(u.User.Username, text) {
target = &u.User
break
}
}
return target
}
// insertMentions looks for potential Discord mentions in a Minecraft chat
// message. If there are any, we will attempt to get the user being mentioned
// to get their mention string to put into the chat message.
func (bot *DiscordBot) insertMentions(msg string) string {
// Split the message into words
words := strings.Split(msg, " ")
// Iterate over each word
for _, word := range words {
// Check if the word might be a mention
if strings.HasPrefix(word, "@") {
// Attempt to get the user
user := bot.getUserFromName(word[1:])
if user != nil {
// Replace the word with the mention
msg = strings.Replace(msg, word, user.Mention(), 1)
}
}
}
return msg
}
func matchWebhookURL(url string) (string, string) {
wm := webhookRegex.FindStringSubmatch(url)
// Make sure we have the correct number of parts (ID and token)
if len(wm) != 3 {
return "", ""
}
// Return the webhook ID and token
return wm[1], wm[2]
}
// setWebhookParams sets the avater, username, and message for a webhook request.
func (bot *DiscordBot) setWebhookParams(m *MinecraftMessage) api.ExecuteWebhookData {
// Get the avatar to use for this message
var avatarURL string
if m.Username == bot.name {
// Use the bot's avatar
avatarURL = bot.avatarURL
} else {
// Player's Minecraft head as the avatar
avatarURL = fmt.Sprintf("https://minotar.net/helm/%s/256.png", m.Username)
}
return api.ExecuteWebhookData{
Content: m.Message,
Username: m.Username,
AvatarURL: avatarURL,
}
}
func formatMessage(state *state.State, message discord.Message) string {
content := message.Content
// Replace mentions
for _, word := range strings.Split(content, " ") {
if strings.HasPrefix(word, "<#") && strings.HasSuffix(word, ">") {
// Get the ID from the mention string
id := word[2 : len(word)-1]
snowflake, _ := discord.ParseSnowflake(id)
channelID := discord.ChannelID(snowflake)
channel, err := state.Channel(channelID)
if err != nil {
Log.Warnf("Error while getting channel from Discord: %s\n", err)
continue
}
content = strings.Replace(content, fmt.Sprintf("<#%s>", id), fmt.Sprintf("#%s", channel.Name), -1)
}
}
for _, member := range message.Mentions {
content = strings.Replace(content, fmt.Sprintf("<@!%s>", member.ID), fmt.Sprintf("@%s", member.Username), -1)
}
// Escape quote characters
content = strings.Replace(content, "\"", "\\\"", -1)
return content
}
func sendToMinecraft(content, username string) error {
// Format command to send to the Minecraft server
command := fmt.Sprintf("tellraw @a %s", Config.Minecraft.TellrawTemplate)
command = strings.Replace(command, "%username%", username, -1)
command = strings.Replace(command, "%message%", content, -1)
// Create RCON connection
conn, err := rcon.Dial(Config.Minecraft.RconIP, Config.Minecraft.RconPort, Config.Minecraft.RconPassword)
if err != nil {
return err
}
defer conn.Close()
// Authenticate to RCON
if err = conn.Authenticate(); err != nil {
return err
}
// Send the command to Minecraft
if _, err := conn.SendCommand(command); err != nil {
return err
}
return nil
}
| WaitForMessages | identifier_name |
discord.go | package dolphin
import (
"errors"
"fmt"
"regexp"
"strings"
"github.com/diamondburned/arikawa/api"
"github.com/diamondburned/arikawa/discord"
"github.com/diamondburned/arikawa/gateway"
"github.com/diamondburned/arikawa/state"
"github.com/diamondburned/arikawa/webhook"
"gitlab.com/EbonJaeger/dolphin/rcon"
)
var webhookRegex = regexp.MustCompile("https://discordapp.com/api/webhooks/(.*)/(.*)")
// NewDiscordBot creates a new DiscordBot with a MinecraftWatcher and
// connects to discord.
func NewDiscordBot() (*DiscordBot, error) {
bot := &DiscordBot{}
var discordErr error
// Create Discord session
s, discordErr := state.New("Bot " + Config.Discord.BotToken)
if discordErr != nil {
return nil, discordErr
}
bot.state = s
// Add our Discord handlers
bot.state.AddHandler(bot.onReady)
bot.state.AddHandler(bot.onGuildCreate)
bot.state.AddHandler(bot.onMessageCreate)
// Connect to Discord websocket
if discordErr = bot.state.Open(); discordErr != nil {
return nil, discordErr
}
// Get our Discord user
self, discordErr := bot.state.Me()
if discordErr != nil {
return nil, discordErr
}
// Set our data and create the Minecraft watcher
bot.id = self.ID
bot.name = self.Username
bot.avatarURL = self.AvatarURL()
if Config.Discord.ChannelID != "" {
snowflake, discordErr := discord.ParseSnowflake(Config.Discord.ChannelID)
if discordErr != nil {
return nil, discordErr
}
bot.channel = discord.ChannelID(snowflake)
} else {
return nil, errors.New("no channel ID configured")
}
bot.watcher = NewWatcher(self.Username, *Config.Minecraft.CustomDeathKeywords)
return bot, discordErr
}
// Close cleans up the watcher and closes the Discord session.
func (bot *DiscordBot) Close() error {
var closeErr error
if err := bot.watcher.Close(); err != nil {
closeErr = err
}
if err := bot.state.Session.Close(); err != nil {
closeErr = err
}
return closeErr
}
// WaitForMessages starts the Minecraft log watcher and waits for messages
// on a messages channel.
func (bot *DiscordBot) WaitForMessages() {
// Make our messages channel
mc := make(chan *MinecraftMessage)
// Start our Minecraft watcher
go bot.watcher.Watch(mc)
for {
// Read message from the channel
msg := <-mc
Log.Debugf("Received a line from Minecraft: Username='%s', Text='%s'\n", msg.Username, msg.Message)
// Don't send messages that are disabled
switch msg.Type {
case AdvancementMessage:
{
if !Config.Discord.MessageOptions.ShowAdvancements {
continue
}
}
case DeathMessage:
{
if !Config.Discord.MessageOptions.ShowDeaths {
continue
}
}
case JoinLeaveMessage:
{
if !Config.Discord.MessageOptions.ShowJoinsLeaves {
continue
}
}
}
// Send the message to the Discord channel
bot.sendToDiscord(msg)
}
}
// onReady sets the bot's Discord status.
func (bot *DiscordBot) onReady(e *gateway.ReadyEvent) {
// Set the bot gaming status
err := bot.state.Gateway.UpdateStatus(gateway.UpdateStatusData{
Game: &discord.Activity{
Name: "Bridging the Minecraft/Discord gap",
},
})
if err != nil {
Log.Errorf("Unable to update Discord status: %s\n", err)
}
}
// onGuildCreate handles when the bot joins or connects to a Guild.
func (bot *DiscordBot) onGuildCreate(e *gateway.GuildCreateEvent) {
// Make sure the guild is available
if e.Unavailable {
Log.Warnf("Attempted to join Guild '%s', but it was unavailable\n", e.Guild.Name)
return
}
if bot.guildID.String() != "" {
Log.Warnf("Received a Guild join event for '%s', but we've already joined one\n", e.Guild.Name)
return
}
Log.Infof("Connected to guild named '%s'\n", e.Guild.Name)
bot.guildID = e.Guild.ID
}
// onMessageCreate handles messages that the bot receives, and sends them
// to Minecraft via RCON.
func (bot *DiscordBot) onMessageCreate(e *gateway.MessageCreateEvent) {
// Ignore messages from ourselves
if e.Author.ID != bot.id && e.Message.WebhookID.String() == "" {
// Check if the message is a bot command
if strings.HasPrefix(e.Message.Content, "!") {
c := make(chan bool)
go parser.Parse(e.Message, bot.state, c)
// Don't go any further if the command was found and ran
if <-c {
return
}
}
// Not a command, so ignore messages from other channels
if e.ChannelID.String() == Config.Discord.ChannelID {
Log.Debugln("Received a message from Discord")
// Get the name to use
var name string
if Config.Discord.UseMemberNicks {
name = bot.getNickname(e.Author.ID)
} else {
name = e.Author.Username
}
// Print the URL if message contains an attachement but no message content
if len(e.Message.Attachments) > 0 {
if len(e.Content) == 0 {
e.Content = e.Message.Attachments[0].URL
if err := sendToMinecraft(e.Content, name); err != nil {
Log.Errorf("Error sending command to RCON: %s\n", err)
}
return
}
}
content := formatMessage(bot.state, e.Message)
lines := strings.Split(content, "\n")
// Send a separate message for each line
for i := 0; i < len(lines); i++ {
line := lines[i]
// Split long lines into additional messages
if len(line) > 100 {
lines = append(lines, "")
copy(lines[i+2:], lines[i+1:])
lines[i+1] = line[100:]
line = line[:100]
}
if err := sendToMinecraft(line, name); err != nil {
Log.Errorf("Error sending command to RCON: %s\n", err)
}
}
}
}
}
// sendToDiscord sends a message from Minecraft to the configured
// Discord channel.
func (bot *DiscordBot) sendToDiscord(m *MinecraftMessage) {
// Insert Discord mentions if configured and present
if Config.Discord.AllowMentions {
// Insert Discord mentions
m.Message = bot.insertMentions(m.Message)
}
// Send the message to Discord either via webhook or normal channel message
if Config.Discord.Webhook.Enabled {
// Get the configured webhook
id, token := matchWebhookURL(Config.Discord.Webhook.URL)
if id == "" || token == "" {
Log.Warnln("Invalid or undefined Discord webhook URL")
return
}
// Attempt to get the webhook
snowflake, err := discord.ParseSnowflake(id)
if err != nil {
Log.Errorf("Error parsing Webhook Snowflake: %s\n", err.Error())
}
webhookID := discord.WebhookID(snowflake)
// Form our webhook params
params := bot.setWebhookParams(m)
// Send to the webhook
Log.Debugf("Sending to webhook: id='%s', token='%s'\n", id, token)
if err := webhook.Execute(webhookID, token, params); err != nil {
Log.Errorf("Error sending data to Discord webhook: %s\n", err.Error())
}
} else {
// Format the message for Discord
formatted := fmt.Sprintf("**%s**: %s", m.Username, m.Message)
// Send to the configured Discord channel
if _, err := bot.state.Client.SendMessage(bot.channel, formatted, nil); err != nil {
Log.Errorf("Error sending a message to Discord: %s\n", err.Error())
}
}
}
// getNickname gets the nickname of a Discord user in a Guild.
func (bot *DiscordBot) getNickname(id discord.UserID) string {
var m *discord.Member
// Look in the cached state for the Member
m, _ = bot.state.Member(bot.guildID, id)
// Make sure we do have a user
if m == nil {
return ""
}
if m.Nick == "" {
return m.User.Username
}
return m.Nick
}
// getUserFromName gets the Discord user from a mention or username. The username
// can be only a partial username.
func (bot *DiscordBot) getUserFromName(text string) (target *discord.User) {
// Look through all guild members in the state
members, _ := bot.state.Members(bot.guildID)
for _, u := range members {
// Check if the name matches, case-insensitive
if strings.EqualFold(u.User.Username, text) {
target = &u.User
break
}
}
return target
}
// insertMentions looks for potential Discord mentions in a Minecraft chat
// message. If there are any, we will attempt to get the user being mentioned
// to get their mention string to put into the chat message.
func (bot *DiscordBot) insertMentions(msg string) string {
// Split the message into words
words := strings.Split(msg, " ")
// Iterate over each word
for _, word := range words {
// Check if the word might be a mention
if strings.HasPrefix(word, "@") {
// Attempt to get the user
user := bot.getUserFromName(word[1:])
if user != nil {
// Replace the word with the mention
msg = strings.Replace(msg, word, user.Mention(), 1)
}
}
}
return msg
}
func matchWebhookURL(url string) (string, string) {
wm := webhookRegex.FindStringSubmatch(url)
// Make sure we have the correct number of parts (ID and token)
if len(wm) != 3 {
return "", ""
}
// Return the webhook ID and token
return wm[1], wm[2]
}
// setWebhookParams sets the avater, username, and message for a webhook request.
func (bot *DiscordBot) setWebhookParams(m *MinecraftMessage) api.ExecuteWebhookData {
// Get the avatar to use for this message
var avatarURL string
if m.Username == bot.name {
// Use the bot's avatar
avatarURL = bot.avatarURL
} else {
// Player's Minecraft head as the avatar
avatarURL = fmt.Sprintf("https://minotar.net/helm/%s/256.png", m.Username)
}
return api.ExecuteWebhookData{
Content: m.Message,
Username: m.Username,
AvatarURL: avatarURL,
}
}
func formatMessage(state *state.State, message discord.Message) string {
content := message.Content
// Replace mentions
for _, word := range strings.Split(content, " ") {
if strings.HasPrefix(word, "<#") && strings.HasSuffix(word, ">") {
// Get the ID from the mention string
id := word[2 : len(word)-1]
snowflake, _ := discord.ParseSnowflake(id)
channelID := discord.ChannelID(snowflake)
channel, err := state.Channel(channelID)
if err != nil {
Log.Warnf("Error while getting channel from Discord: %s\n", err)
continue
}
content = strings.Replace(content, fmt.Sprintf("<#%s>", id), fmt.Sprintf("#%s", channel.Name), -1)
}
}
for _, member := range message.Mentions {
content = strings.Replace(content, fmt.Sprintf("<@!%s>", member.ID), fmt.Sprintf("@%s", member.Username), -1)
}
// Escape quote characters
content = strings.Replace(content, "\"", "\\\"", -1)
return content
}
func sendToMinecraft(content, username string) error | {
// Format command to send to the Minecraft server
command := fmt.Sprintf("tellraw @a %s", Config.Minecraft.TellrawTemplate)
command = strings.Replace(command, "%username%", username, -1)
command = strings.Replace(command, "%message%", content, -1)
// Create RCON connection
conn, err := rcon.Dial(Config.Minecraft.RconIP, Config.Minecraft.RconPort, Config.Minecraft.RconPassword)
if err != nil {
return err
}
defer conn.Close()
// Authenticate to RCON
if err = conn.Authenticate(); err != nil {
return err
}
// Send the command to Minecraft
if _, err := conn.SendCommand(command); err != nil {
return err
}
return nil
} | identifier_body |
|
discord.go | package dolphin
import (
"errors"
"fmt"
"regexp"
"strings"
"github.com/diamondburned/arikawa/api"
"github.com/diamondburned/arikawa/discord"
"github.com/diamondburned/arikawa/gateway"
"github.com/diamondburned/arikawa/state"
"github.com/diamondburned/arikawa/webhook"
"gitlab.com/EbonJaeger/dolphin/rcon"
)
var webhookRegex = regexp.MustCompile("https://discordapp.com/api/webhooks/(.*)/(.*)")
// NewDiscordBot creates a new DiscordBot with a MinecraftWatcher and
// connects to discord.
func NewDiscordBot() (*DiscordBot, error) {
bot := &DiscordBot{}
var discordErr error
// Create Discord session
s, discordErr := state.New("Bot " + Config.Discord.BotToken)
if discordErr != nil {
return nil, discordErr
}
bot.state = s
// Add our Discord handlers
bot.state.AddHandler(bot.onReady)
bot.state.AddHandler(bot.onGuildCreate)
bot.state.AddHandler(bot.onMessageCreate)
// Connect to Discord websocket
if discordErr = bot.state.Open(); discordErr != nil {
return nil, discordErr
}
// Get our Discord user
self, discordErr := bot.state.Me()
if discordErr != nil {
return nil, discordErr
}
// Set our data and create the Minecraft watcher
bot.id = self.ID
bot.name = self.Username
bot.avatarURL = self.AvatarURL()
if Config.Discord.ChannelID != "" {
snowflake, discordErr := discord.ParseSnowflake(Config.Discord.ChannelID)
if discordErr != nil {
return nil, discordErr
}
bot.channel = discord.ChannelID(snowflake)
} else {
return nil, errors.New("no channel ID configured")
}
bot.watcher = NewWatcher(self.Username, *Config.Minecraft.CustomDeathKeywords)
return bot, discordErr
}
// Close cleans up the watcher and closes the Discord session.
func (bot *DiscordBot) Close() error {
var closeErr error
if err := bot.watcher.Close(); err != nil {
closeErr = err
}
if err := bot.state.Session.Close(); err != nil {
closeErr = err
}
return closeErr
}
// WaitForMessages starts the Minecraft log watcher and waits for messages
// on a messages channel.
func (bot *DiscordBot) WaitForMessages() {
// Make our messages channel
mc := make(chan *MinecraftMessage)
// Start our Minecraft watcher
go bot.watcher.Watch(mc)
for {
// Read message from the channel
msg := <-mc
Log.Debugf("Received a line from Minecraft: Username='%s', Text='%s'\n", msg.Username, msg.Message)
// Don't send messages that are disabled
switch msg.Type {
case AdvancementMessage:
{
if !Config.Discord.MessageOptions.ShowAdvancements {
continue
}
}
case DeathMessage:
{
if !Config.Discord.MessageOptions.ShowDeaths {
continue
}
}
case JoinLeaveMessage:
{
if !Config.Discord.MessageOptions.ShowJoinsLeaves {
continue
}
}
}
// Send the message to the Discord channel
bot.sendToDiscord(msg)
}
}
// onReady sets the bot's Discord status.
func (bot *DiscordBot) onReady(e *gateway.ReadyEvent) {
// Set the bot gaming status
err := bot.state.Gateway.UpdateStatus(gateway.UpdateStatusData{
Game: &discord.Activity{
Name: "Bridging the Minecraft/Discord gap",
},
})
if err != nil {
Log.Errorf("Unable to update Discord status: %s\n", err)
}
}
// onGuildCreate handles when the bot joins or connects to a Guild.
func (bot *DiscordBot) onGuildCreate(e *gateway.GuildCreateEvent) {
// Make sure the guild is available
if e.Unavailable {
Log.Warnf("Attempted to join Guild '%s', but it was unavailable\n", e.Guild.Name)
return
}
if bot.guildID.String() != "" {
Log.Warnf("Received a Guild join event for '%s', but we've already joined one\n", e.Guild.Name)
return
}
Log.Infof("Connected to guild named '%s'\n", e.Guild.Name)
bot.guildID = e.Guild.ID
}
// onMessageCreate handles messages that the bot receives, and sends them
// to Minecraft via RCON.
func (bot *DiscordBot) onMessageCreate(e *gateway.MessageCreateEvent) {
// Ignore messages from ourselves
if e.Author.ID != bot.id && e.Message.WebhookID.String() == "" {
// Check if the message is a bot command
if strings.HasPrefix(e.Message.Content, "!") {
c := make(chan bool)
go parser.Parse(e.Message, bot.state, c)
// Don't go any further if the command was found and ran
if <-c {
return
}
}
// Not a command, so ignore messages from other channels
if e.ChannelID.String() == Config.Discord.ChannelID {
Log.Debugln("Received a message from Discord")
// Get the name to use
var name string
if Config.Discord.UseMemberNicks {
name = bot.getNickname(e.Author.ID)
} else {
name = e.Author.Username
}
// Print the URL if message contains an attachement but no message content
if len(e.Message.Attachments) > 0 {
if len(e.Content) == 0 {
e.Content = e.Message.Attachments[0].URL
if err := sendToMinecraft(e.Content, name); err != nil {
Log.Errorf("Error sending command to RCON: %s\n", err)
}
return
}
}
content := formatMessage(bot.state, e.Message)
lines := strings.Split(content, "\n")
// Send a separate message for each line
for i := 0; i < len(lines); i++ {
line := lines[i]
// Split long lines into additional messages
if len(line) > 100 {
lines = append(lines, "")
copy(lines[i+2:], lines[i+1:])
lines[i+1] = line[100:]
line = line[:100]
}
if err := sendToMinecraft(line, name); err != nil {
Log.Errorf("Error sending command to RCON: %s\n", err)
}
}
}
}
}
// sendToDiscord sends a message from Minecraft to the configured
// Discord channel.
func (bot *DiscordBot) sendToDiscord(m *MinecraftMessage) {
// Insert Discord mentions if configured and present
if Config.Discord.AllowMentions {
// Insert Discord mentions
m.Message = bot.insertMentions(m.Message)
}
// Send the message to Discord either via webhook or normal channel message
if Config.Discord.Webhook.Enabled {
// Get the configured webhook
id, token := matchWebhookURL(Config.Discord.Webhook.URL)
if id == "" || token == "" {
Log.Warnln("Invalid or undefined Discord webhook URL")
return
}
// Attempt to get the webhook
snowflake, err := discord.ParseSnowflake(id)
if err != nil {
Log.Errorf("Error parsing Webhook Snowflake: %s\n", err.Error())
}
webhookID := discord.WebhookID(snowflake)
// Form our webhook params
params := bot.setWebhookParams(m)
// Send to the webhook
Log.Debugf("Sending to webhook: id='%s', token='%s'\n", id, token)
if err := webhook.Execute(webhookID, token, params); err != nil {
Log.Errorf("Error sending data to Discord webhook: %s\n", err.Error())
}
} else {
// Format the message for Discord
formatted := fmt.Sprintf("**%s**: %s", m.Username, m.Message)
// Send to the configured Discord channel
if _, err := bot.state.Client.SendMessage(bot.channel, formatted, nil); err != nil {
Log.Errorf("Error sending a message to Discord: %s\n", err.Error())
}
}
}
// getNickname gets the nickname of a Discord user in a Guild.
func (bot *DiscordBot) getNickname(id discord.UserID) string {
var m *discord.Member
// Look in the cached state for the Member
m, _ = bot.state.Member(bot.guildID, id)
// Make sure we do have a user
if m == nil {
return ""
}
if m.Nick == "" {
return m.User.Username
}
return m.Nick
}
// getUserFromName gets the Discord user from a mention or username. The username
// can be only a partial username.
func (bot *DiscordBot) getUserFromName(text string) (target *discord.User) {
// Look through all guild members in the state
members, _ := bot.state.Members(bot.guildID)
for _, u := range members {
// Check if the name matches, case-insensitive
if strings.EqualFold(u.User.Username, text) {
target = &u.User
break
}
}
return target
}
// insertMentions looks for potential Discord mentions in a Minecraft chat
// message. If there are any, we will attempt to get the user being mentioned
// to get their mention string to put into the chat message.
func (bot *DiscordBot) insertMentions(msg string) string {
// Split the message into words
words := strings.Split(msg, " ")
// Iterate over each word
for _, word := range words {
// Check if the word might be a mention
if strings.HasPrefix(word, "@") {
// Attempt to get the user
user := bot.getUserFromName(word[1:])
if user != nil {
// Replace the word with the mention
msg = strings.Replace(msg, word, user.Mention(), 1)
}
}
}
return msg
}
func matchWebhookURL(url string) (string, string) {
wm := webhookRegex.FindStringSubmatch(url)
// Make sure we have the correct number of parts (ID and token)
if len(wm) != 3 {
return "", ""
}
// Return the webhook ID and token
return wm[1], wm[2]
}
// setWebhookParams sets the avater, username, and message for a webhook request.
func (bot *DiscordBot) setWebhookParams(m *MinecraftMessage) api.ExecuteWebhookData {
// Get the avatar to use for this message
var avatarURL string
if m.Username == bot.name {
// Use the bot's avatar
avatarURL = bot.avatarURL
} else {
// Player's Minecraft head as the avatar
avatarURL = fmt.Sprintf("https://minotar.net/helm/%s/256.png", m.Username)
}
return api.ExecuteWebhookData{
Content: m.Message,
Username: m.Username,
AvatarURL: avatarURL,
}
}
func formatMessage(state *state.State, message discord.Message) string {
content := message.Content
// Replace mentions
for _, word := range strings.Split(content, " ") {
if strings.HasPrefix(word, "<#") && strings.HasSuffix(word, ">") {
// Get the ID from the mention string
id := word[2 : len(word)-1]
snowflake, _ := discord.ParseSnowflake(id)
channelID := discord.ChannelID(snowflake)
channel, err := state.Channel(channelID)
if err != nil {
Log.Warnf("Error while getting channel from Discord: %s\n", err)
continue
} | }
for _, member := range message.Mentions {
content = strings.Replace(content, fmt.Sprintf("<@!%s>", member.ID), fmt.Sprintf("@%s", member.Username), -1)
}
// Escape quote characters
content = strings.Replace(content, "\"", "\\\"", -1)
return content
}
func sendToMinecraft(content, username string) error {
// Format command to send to the Minecraft server
command := fmt.Sprintf("tellraw @a %s", Config.Minecraft.TellrawTemplate)
command = strings.Replace(command, "%username%", username, -1)
command = strings.Replace(command, "%message%", content, -1)
// Create RCON connection
conn, err := rcon.Dial(Config.Minecraft.RconIP, Config.Minecraft.RconPort, Config.Minecraft.RconPassword)
if err != nil {
return err
}
defer conn.Close()
// Authenticate to RCON
if err = conn.Authenticate(); err != nil {
return err
}
// Send the command to Minecraft
if _, err := conn.SendCommand(command); err != nil {
return err
}
return nil
} |
content = strings.Replace(content, fmt.Sprintf("<#%s>", id), fmt.Sprintf("#%s", channel.Name), -1)
} | random_line_split |
mod.rs | use std::cell::RefCell;
use std::collections::HashMap;
use std::fs::{self, File};
use std::path::Path;
use glium;
use glium::backend::Facade;
use image::{self, DynamicImage, GenericImage, Rgba};
use texture_packer::Rect;
use texture_packer::SkylinePacker;
use texture_packer::{TexturePacker, TexturePackerConfig};
use texture_packer::importer::ImageImporter;
use texture_packer::exporter::ImageExporter;
mod config;
pub mod font;
pub mod texture_atlas;
use self::config::TileAtlasConfig;
pub type Texture2d = glium::texture::CompressedSrgbTexture2d;
type AnimFrames = u64;
type AnimMillisDelay = u64;
#[derive(Serialize, Deserialize, Clone)]
pub enum TileKind {
Static,
Animated(AnimFrames, AnimMillisDelay),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct AtlasRect {
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
}
impl From<Rect> for AtlasRect {
fn from(rect: Rect) -> AtlasRect {
AtlasRect {
x: rect.x,
y: rect.y,
w: rect.w,
h: rect.h,
}
}
}
pub type AtlasTextureRegion = (f32, f32, f32, f32);
pub enum TileShape {
Static,
Autotile,
Wall,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AtlasTileData {
pub offset: (u32, u32),
pub is_autotile: bool,
pub tile_kind: TileKind,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AtlasTile {
pub data: AtlasTileData,
pub cached_rect: RefCell<Option<AtlasTextureRegion>>,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AtlasFrame {
tile_size: (u32, u32),
texture_idx: usize,
rect: AtlasRect,
offsets: HashMap<String, AtlasTile>,
}
impl AtlasFrame {
pub fn new(texture_idx: usize, rect: Rect, tile_size: (u32, u32)) -> Self {
AtlasFrame {
tile_size: tile_size,
texture_idx: texture_idx,
rect: AtlasRect::from(rect),
offsets: HashMap::new(),
}
}
}
pub type TilePacker<'a> = TexturePacker<'a, DynamicImage, SkylinePacker<Rgba<u8>>>;
pub struct TileAtlas {
config: TileAtlasConfig,
textures: Vec<Texture2d>,
indices: Vec<String>,
}
pub struct TileAtlasBuilder<'a> {
locations: HashMap<String, String>,
frames: HashMap<String, AtlasFrame>,
packers: Vec<TilePacker<'a>>,
pub file_hash: String,
}
impl <'a> TileAtlasBuilder<'a> {
pub fn new() -> Self {
let mut builder = TileAtlasBuilder {
locations: HashMap::new(),
frames: HashMap::new(),
packers: Vec::new(),
file_hash: String::new(),
};
builder.add_packer();
builder
}
pub fn add_tile(&mut self, path_str: &str, index: String, tile_data: AtlasTileData) {
let key = path_str.to_string();
assert!(self.frames.contains_key(&path_str.to_string()));
{
let mut frame = self.frames.get_mut(&key).unwrap();
assert!(!frame.offsets.contains_key(&index));
let tile = AtlasTile {
data: tile_data,
cached_rect: RefCell::new(None),
};
frame.offsets.insert(index.clone(), tile);
self.locations.insert(index, key);
}
}
pub fn add_frame(&mut self, path_string: &str, tile_size: (u32, u32)) {
if self.frames.contains_key(path_string) {
return;
}
let path = Path::new(&path_string);
let texture = ImageImporter::import_from_file(path).unwrap();
for (idx, packer) in self.packers.iter_mut().enumerate() {
if packer.can_pack(&texture) {
packer.pack_own(path_string.to_string(), texture).unwrap();
let rect = packer.get_frame(path_string).unwrap().frame;
self.frames.insert(path_string.to_string(), AtlasFrame::new(idx, rect, tile_size));
// cannot return self here, since self already borrowed, so
// cannot use builder pattern.
return;
}
}
self.add_packer();
{
// complains that borrow doesn't last long enough
// len mut packer = self.newest_packer_mut();
let packer_idx = self.packers.len() - 1;
let mut packer = &mut self.packers[packer_idx];
packer.pack_own(path_string.to_string(), texture).unwrap();
let rect = packer.get_frame(&path_string).unwrap().frame;
self.frames.insert(path_string.to_string(), AtlasFrame::new(packer_idx, rect, tile_size));
}
}
fn add_packer(&mut self) {
let config = TexturePackerConfig {
max_width: 2048,
max_height: 2048,
allow_rotation: false,
texture_outlines: false,
trim: false,
texture_padding: 0,
..Default::default()
};
self.packers.push(TexturePacker::new_skyline(config));
}
pub fn build<F: Facade>(&self, display: &F, packed_tex_folder: &str) -> TileAtlas {
let mut textures = Vec::new();
let packed_folder_path = config::get_config_cache_path(packed_tex_folder);
if Path::exists(packed_folder_path.as_path()) {
fs::remove_dir_all(packed_folder_path.as_path()).unwrap();
}
fs::create_dir_all(packed_folder_path.as_path()).unwrap();
for (idx, packer) in self.packers.iter().enumerate() {
let image = ImageExporter::export(packer).unwrap();
let mut file_path = packed_folder_path.clone();
file_path.push(&format!("{}.png", idx));
let mut file = File::create(file_path).unwrap();
image.save(&mut file, image::PNG).unwrap();
textures.push(make_texture(display, image));
}
println!("Saved {}", packed_tex_folder);
let config = TileAtlasConfig {
locations: self.locations.clone(),
frames: self.frames.clone(),
file_hash: self.file_hash.clone(),
};
config::write_tile_atlas_config(&config, packed_tex_folder);
TileAtlas::new(config, textures)
}
}
impl TileAtlas {
pub fn new(config: TileAtlasConfig, textures: Vec<Texture2d>) -> Self {
let mut atlas = TileAtlas {
config: config,
textures: textures,
indices: Vec::new(),
};
atlas.cache_tile_regions();
atlas
}
/// Precalculates the UV rectangles for individual tiles to avoid the
/// overhead of recalculationg them on lookup. It must be done before the
/// tile atlas can be used.
fn cache_tile_regions(&mut self) {
for frame in self.config.frames.values() {
let (frame_w, frame_h) = self.frame_size(frame);
for (tile_type, tile) in frame.offsets.iter() {
let tex_ratio = self.get_sprite_tex_ratio(tile_type);
let add_offset = get_add_offset(&frame.rect, &frame.tile_size);
let ratio = if tile.data.is_autotile {
2
} else {
1
};
let tx = ((tile.data.offset.0 + add_offset.0) * ratio) as f32 * tex_ratio[0];
let ty = ((tile.data.offset.1 + add_offset.1) * ratio) as f32 * tex_ratio[1];
let tw = (frame.tile_size.0 * ratio) as f32 / frame_w as f32;
let th = (frame.tile_size.1 * ratio) as f32 / frame_h as f32;
*tile.cached_rect.borrow_mut() = Some((tx, ty, tw, th));
}
}
self.indices = self.config.locations.keys().map(|l| l.to_string()).collect();
}
fn frame_size(&self, frame: &AtlasFrame) -> (u32, u32) |
fn texture_size(&self, texture_idx: usize) -> (u32, u32) {
self.textures[texture_idx].dimensions()
}
fn get_frame(&self, tile_type: &str) -> &AtlasFrame {
let tex_name = &self.config.locations[tile_type];
&self.config.frames[tex_name]
}
pub fn get_tile_texture_idx(&self, tile_type: &str) -> usize {
self.get_frame(tile_type).texture_idx
}
pub fn get_tilemap_tex_ratio(&self, texture_idx: usize) -> [f32; 2] {
let dimensions = self.texture_size(texture_idx);
let cols: u32 = dimensions.0 / 24;
let rows: u32 = dimensions.1 / 24;
[1.0 / cols as f32, 1.0 / rows as f32]
}
pub fn get_sprite_tex_ratio(&self, tile_type: &str) -> [f32; 2] {
let frame = self.get_frame(tile_type);
let (mut sx, mut sy) = frame.tile_size;
if frame.offsets[tile_type].data.is_autotile {
// divide the autotile into 24x24 from 48x48
sx /= 2;
sy /= 2;
}
let dimensions = self.frame_size(frame);
let cols: f32 = dimensions.0 as f32 / sx as f32;
let rows: f32 = dimensions.1 as f32 / sy as f32;
[1.0 / cols, 1.0 / rows]
}
pub fn get_tile_texture_size(&self, tile_type: &str) -> (u32, u32) {
self.get_frame(tile_type).tile_size
}
pub fn get_tile(&self, tile_type: &str) -> &AtlasTile {
let frame = self.get_frame(tile_type);
&frame.offsets[tile_type]
}
pub fn get_texture_offset(&self, tile_type: &str, msecs: u64) -> (f32, f32) {
let frame = self.get_frame(tile_type);
let tile = &frame.offsets[tile_type];
let (mut tx, ty, tw, _) = tile.cached_rect.borrow()
.expect("Texture atlas regions weren't cached yet.");
match tile.data.tile_kind {
TileKind::Static => (),
TileKind::Animated(frame_count, delay) => {
let current_frame = msecs / delay;
let x_index_offset = current_frame % frame_count;
tx += x_index_offset as f32 * tw;
}
}
(tx, ty)
}
pub fn get_tile_index(&self, tile_kind: &str) -> usize {
self.indices.iter().enumerate().find(|&(_, i)| i == tile_kind).unwrap().0
}
fn get_tile_kind_indexed(&self, tile_idx: usize) -> &String {
&self.indices[tile_idx]
}
pub fn get_texture_offset_indexed(&self, tile_idx: usize, msecs: u64) -> (f32, f32) {
let kind = self.get_tile_kind_indexed(tile_idx);
self.get_texture_offset(kind, msecs)
}
pub fn get_texture(&self, idx: usize) -> &Texture2d {
&self.textures[idx]
}
pub fn passes(&self) -> usize {
self.textures.len()
}
}
fn get_add_offset(rect: &AtlasRect, tile_size: &(u32, u32)) -> (u32, u32) {
let ceil = |a, b| (a + b - 1) / b;
let cols: u32 = ceil(rect.x, tile_size.0);
let rows: u32 = ceil(rect.y, tile_size.1);
(cols, rows)
}
pub fn make_texture<F: Facade>(display: &F, image: DynamicImage) -> Texture2d {
let dimensions = image.dimensions();
let image = glium::texture::RawImage2d::from_raw_rgba_reversed(image.to_rgba().into_raw(), dimensions);
Texture2d::new(display, image).unwrap()
}
| {
self.texture_size(frame.texture_idx)
} | identifier_body |
mod.rs | use std::cell::RefCell;
use std::collections::HashMap;
use std::fs::{self, File};
use std::path::Path;
use glium;
use glium::backend::Facade;
use image::{self, DynamicImage, GenericImage, Rgba};
use texture_packer::Rect;
use texture_packer::SkylinePacker;
use texture_packer::{TexturePacker, TexturePackerConfig};
use texture_packer::importer::ImageImporter;
use texture_packer::exporter::ImageExporter;
mod config;
pub mod font;
pub mod texture_atlas;
use self::config::TileAtlasConfig;
pub type Texture2d = glium::texture::CompressedSrgbTexture2d;
type AnimFrames = u64;
type AnimMillisDelay = u64;
#[derive(Serialize, Deserialize, Clone)]
pub enum TileKind {
Static,
Animated(AnimFrames, AnimMillisDelay),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct AtlasRect {
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
}
impl From<Rect> for AtlasRect {
fn from(rect: Rect) -> AtlasRect {
AtlasRect {
x: rect.x,
y: rect.y,
w: rect.w,
h: rect.h,
}
}
}
pub type AtlasTextureRegion = (f32, f32, f32, f32);
pub enum TileShape {
Static,
Autotile,
Wall,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AtlasTileData {
pub offset: (u32, u32),
pub is_autotile: bool,
pub tile_kind: TileKind,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AtlasTile {
pub data: AtlasTileData,
pub cached_rect: RefCell<Option<AtlasTextureRegion>>,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AtlasFrame {
tile_size: (u32, u32),
texture_idx: usize,
rect: AtlasRect,
offsets: HashMap<String, AtlasTile>,
}
impl AtlasFrame {
pub fn new(texture_idx: usize, rect: Rect, tile_size: (u32, u32)) -> Self {
AtlasFrame {
tile_size: tile_size,
texture_idx: texture_idx,
rect: AtlasRect::from(rect),
offsets: HashMap::new(),
}
}
}
pub type TilePacker<'a> = TexturePacker<'a, DynamicImage, SkylinePacker<Rgba<u8>>>;
pub struct TileAtlas {
config: TileAtlasConfig,
textures: Vec<Texture2d>,
indices: Vec<String>,
}
pub struct TileAtlasBuilder<'a> {
locations: HashMap<String, String>,
frames: HashMap<String, AtlasFrame>,
packers: Vec<TilePacker<'a>>,
pub file_hash: String,
}
impl <'a> TileAtlasBuilder<'a> {
pub fn new() -> Self {
let mut builder = TileAtlasBuilder {
locations: HashMap::new(),
frames: HashMap::new(),
packers: Vec::new(),
file_hash: String::new(),
};
builder.add_packer();
builder
}
pub fn add_tile(&mut self, path_str: &str, index: String, tile_data: AtlasTileData) {
let key = path_str.to_string();
assert!(self.frames.contains_key(&path_str.to_string()));
{
let mut frame = self.frames.get_mut(&key).unwrap();
assert!(!frame.offsets.contains_key(&index));
let tile = AtlasTile {
data: tile_data,
cached_rect: RefCell::new(None),
};
frame.offsets.insert(index.clone(), tile);
self.locations.insert(index, key);
}
}
pub fn add_frame(&mut self, path_string: &str, tile_size: (u32, u32)) {
if self.frames.contains_key(path_string) {
return;
}
let path = Path::new(&path_string);
let texture = ImageImporter::import_from_file(path).unwrap();
for (idx, packer) in self.packers.iter_mut().enumerate() {
if packer.can_pack(&texture) {
packer.pack_own(path_string.to_string(), texture).unwrap();
let rect = packer.get_frame(path_string).unwrap().frame;
self.frames.insert(path_string.to_string(), AtlasFrame::new(idx, rect, tile_size));
// cannot return self here, since self already borrowed, so
// cannot use builder pattern.
return;
}
}
self.add_packer();
{
// complains that borrow doesn't last long enough
// len mut packer = self.newest_packer_mut();
let packer_idx = self.packers.len() - 1;
let mut packer = &mut self.packers[packer_idx];
packer.pack_own(path_string.to_string(), texture).unwrap();
let rect = packer.get_frame(&path_string).unwrap().frame;
self.frames.insert(path_string.to_string(), AtlasFrame::new(packer_idx, rect, tile_size));
}
}
fn add_packer(&mut self) {
let config = TexturePackerConfig {
max_width: 2048,
max_height: 2048,
allow_rotation: false,
texture_outlines: false,
trim: false,
texture_padding: 0,
..Default::default()
};
self.packers.push(TexturePacker::new_skyline(config));
}
pub fn build<F: Facade>(&self, display: &F, packed_tex_folder: &str) -> TileAtlas {
let mut textures = Vec::new();
let packed_folder_path = config::get_config_cache_path(packed_tex_folder);
if Path::exists(packed_folder_path.as_path()) {
fs::remove_dir_all(packed_folder_path.as_path()).unwrap();
}
fs::create_dir_all(packed_folder_path.as_path()).unwrap();
for (idx, packer) in self.packers.iter().enumerate() {
let image = ImageExporter::export(packer).unwrap();
let mut file_path = packed_folder_path.clone();
file_path.push(&format!("{}.png", idx));
let mut file = File::create(file_path).unwrap();
image.save(&mut file, image::PNG).unwrap();
textures.push(make_texture(display, image));
}
println!("Saved {}", packed_tex_folder);
let config = TileAtlasConfig {
locations: self.locations.clone(),
frames: self.frames.clone(),
file_hash: self.file_hash.clone(),
};
config::write_tile_atlas_config(&config, packed_tex_folder);
TileAtlas::new(config, textures)
}
}
impl TileAtlas {
pub fn new(config: TileAtlasConfig, textures: Vec<Texture2d>) -> Self {
let mut atlas = TileAtlas {
config: config,
textures: textures,
indices: Vec::new(),
};
atlas.cache_tile_regions();
atlas
}
/// Precalculates the UV rectangles for individual tiles to avoid the
/// overhead of recalculationg them on lookup. It must be done before the
/// tile atlas can be used.
fn cache_tile_regions(&mut self) {
for frame in self.config.frames.values() {
let (frame_w, frame_h) = self.frame_size(frame);
for (tile_type, tile) in frame.offsets.iter() {
let tex_ratio = self.get_sprite_tex_ratio(tile_type);
let add_offset = get_add_offset(&frame.rect, &frame.tile_size);
let ratio = if tile.data.is_autotile {
2
} else {
1
};
let tx = ((tile.data.offset.0 + add_offset.0) * ratio) as f32 * tex_ratio[0];
let ty = ((tile.data.offset.1 + add_offset.1) * ratio) as f32 * tex_ratio[1];
let tw = (frame.tile_size.0 * ratio) as f32 / frame_w as f32;
let th = (frame.tile_size.1 * ratio) as f32 / frame_h as f32;
*tile.cached_rect.borrow_mut() = Some((tx, ty, tw, th));
}
}
self.indices = self.config.locations.keys().map(|l| l.to_string()).collect();
}
fn frame_size(&self, frame: &AtlasFrame) -> (u32, u32) {
self.texture_size(frame.texture_idx)
}
fn texture_size(&self, texture_idx: usize) -> (u32, u32) {
self.textures[texture_idx].dimensions()
}
fn get_frame(&self, tile_type: &str) -> &AtlasFrame {
let tex_name = &self.config.locations[tile_type];
&self.config.frames[tex_name]
}
pub fn get_tile_texture_idx(&self, tile_type: &str) -> usize {
self.get_frame(tile_type).texture_idx
}
pub fn get_tilemap_tex_ratio(&self, texture_idx: usize) -> [f32; 2] {
let dimensions = self.texture_size(texture_idx);
let cols: u32 = dimensions.0 / 24;
let rows: u32 = dimensions.1 / 24;
[1.0 / cols as f32, 1.0 / rows as f32]
}
pub fn get_sprite_tex_ratio(&self, tile_type: &str) -> [f32; 2] {
let frame = self.get_frame(tile_type);
let (mut sx, mut sy) = frame.tile_size;
if frame.offsets[tile_type].data.is_autotile {
// divide the autotile into 24x24 from 48x48
sx /= 2;
sy /= 2;
}
let dimensions = self.frame_size(frame);
let cols: f32 = dimensions.0 as f32 / sx as f32;
let rows: f32 = dimensions.1 as f32 / sy as f32;
[1.0 / cols, 1.0 / rows]
}
pub fn get_tile_texture_size(&self, tile_type: &str) -> (u32, u32) {
self.get_frame(tile_type).tile_size
}
pub fn get_tile(&self, tile_type: &str) -> &AtlasTile {
let frame = self.get_frame(tile_type);
&frame.offsets[tile_type]
}
pub fn get_texture_offset(&self, tile_type: &str, msecs: u64) -> (f32, f32) {
let frame = self.get_frame(tile_type);
let tile = &frame.offsets[tile_type];
let (mut tx, ty, tw, _) = tile.cached_rect.borrow()
.expect("Texture atlas regions weren't cached yet.");
match tile.data.tile_kind {
TileKind::Static => (),
TileKind::Animated(frame_count, delay) => {
let current_frame = msecs / delay;
let x_index_offset = current_frame % frame_count;
tx += x_index_offset as f32 * tw;
}
}
(tx, ty)
}
pub fn get_tile_index(&self, tile_kind: &str) -> usize {
self.indices.iter().enumerate().find(|&(_, i)| i == tile_kind).unwrap().0
}
fn get_tile_kind_indexed(&self, tile_idx: usize) -> &String {
&self.indices[tile_idx]
}
pub fn | (&self, tile_idx: usize, msecs: u64) -> (f32, f32) {
let kind = self.get_tile_kind_indexed(tile_idx);
self.get_texture_offset(kind, msecs)
}
pub fn get_texture(&self, idx: usize) -> &Texture2d {
&self.textures[idx]
}
pub fn passes(&self) -> usize {
self.textures.len()
}
}
fn get_add_offset(rect: &AtlasRect, tile_size: &(u32, u32)) -> (u32, u32) {
let ceil = |a, b| (a + b - 1) / b;
let cols: u32 = ceil(rect.x, tile_size.0);
let rows: u32 = ceil(rect.y, tile_size.1);
(cols, rows)
}
pub fn make_texture<F: Facade>(display: &F, image: DynamicImage) -> Texture2d {
let dimensions = image.dimensions();
let image = glium::texture::RawImage2d::from_raw_rgba_reversed(image.to_rgba().into_raw(), dimensions);
Texture2d::new(display, image).unwrap()
}
| get_texture_offset_indexed | identifier_name |
mod.rs | use std::cell::RefCell;
use std::collections::HashMap;
use std::fs::{self, File};
use std::path::Path;
use glium;
use glium::backend::Facade;
use image::{self, DynamicImage, GenericImage, Rgba};
use texture_packer::Rect;
use texture_packer::SkylinePacker;
use texture_packer::{TexturePacker, TexturePackerConfig};
use texture_packer::importer::ImageImporter;
use texture_packer::exporter::ImageExporter;
mod config;
pub mod font;
pub mod texture_atlas;
use self::config::TileAtlasConfig;
pub type Texture2d = glium::texture::CompressedSrgbTexture2d;
type AnimFrames = u64;
type AnimMillisDelay = u64;
#[derive(Serialize, Deserialize, Clone)]
pub enum TileKind {
Static,
Animated(AnimFrames, AnimMillisDelay),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct AtlasRect {
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
}
impl From<Rect> for AtlasRect {
fn from(rect: Rect) -> AtlasRect {
AtlasRect {
x: rect.x,
y: rect.y,
w: rect.w,
h: rect.h,
}
}
}
pub type AtlasTextureRegion = (f32, f32, f32, f32);
pub enum TileShape {
Static,
Autotile,
Wall,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AtlasTileData {
pub offset: (u32, u32),
pub is_autotile: bool,
pub tile_kind: TileKind,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AtlasTile {
pub data: AtlasTileData,
pub cached_rect: RefCell<Option<AtlasTextureRegion>>,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AtlasFrame {
tile_size: (u32, u32),
texture_idx: usize,
rect: AtlasRect, | pub fn new(texture_idx: usize, rect: Rect, tile_size: (u32, u32)) -> Self {
AtlasFrame {
tile_size: tile_size,
texture_idx: texture_idx,
rect: AtlasRect::from(rect),
offsets: HashMap::new(),
}
}
}
pub type TilePacker<'a> = TexturePacker<'a, DynamicImage, SkylinePacker<Rgba<u8>>>;
pub struct TileAtlas {
config: TileAtlasConfig,
textures: Vec<Texture2d>,
indices: Vec<String>,
}
pub struct TileAtlasBuilder<'a> {
locations: HashMap<String, String>,
frames: HashMap<String, AtlasFrame>,
packers: Vec<TilePacker<'a>>,
pub file_hash: String,
}
impl <'a> TileAtlasBuilder<'a> {
pub fn new() -> Self {
let mut builder = TileAtlasBuilder {
locations: HashMap::new(),
frames: HashMap::new(),
packers: Vec::new(),
file_hash: String::new(),
};
builder.add_packer();
builder
}
pub fn add_tile(&mut self, path_str: &str, index: String, tile_data: AtlasTileData) {
let key = path_str.to_string();
assert!(self.frames.contains_key(&path_str.to_string()));
{
let mut frame = self.frames.get_mut(&key).unwrap();
assert!(!frame.offsets.contains_key(&index));
let tile = AtlasTile {
data: tile_data,
cached_rect: RefCell::new(None),
};
frame.offsets.insert(index.clone(), tile);
self.locations.insert(index, key);
}
}
pub fn add_frame(&mut self, path_string: &str, tile_size: (u32, u32)) {
if self.frames.contains_key(path_string) {
return;
}
let path = Path::new(&path_string);
let texture = ImageImporter::import_from_file(path).unwrap();
for (idx, packer) in self.packers.iter_mut().enumerate() {
if packer.can_pack(&texture) {
packer.pack_own(path_string.to_string(), texture).unwrap();
let rect = packer.get_frame(path_string).unwrap().frame;
self.frames.insert(path_string.to_string(), AtlasFrame::new(idx, rect, tile_size));
// cannot return self here, since self already borrowed, so
// cannot use builder pattern.
return;
}
}
self.add_packer();
{
// complains that borrow doesn't last long enough
// len mut packer = self.newest_packer_mut();
let packer_idx = self.packers.len() - 1;
let mut packer = &mut self.packers[packer_idx];
packer.pack_own(path_string.to_string(), texture).unwrap();
let rect = packer.get_frame(&path_string).unwrap().frame;
self.frames.insert(path_string.to_string(), AtlasFrame::new(packer_idx, rect, tile_size));
}
}
fn add_packer(&mut self) {
let config = TexturePackerConfig {
max_width: 2048,
max_height: 2048,
allow_rotation: false,
texture_outlines: false,
trim: false,
texture_padding: 0,
..Default::default()
};
self.packers.push(TexturePacker::new_skyline(config));
}
pub fn build<F: Facade>(&self, display: &F, packed_tex_folder: &str) -> TileAtlas {
let mut textures = Vec::new();
let packed_folder_path = config::get_config_cache_path(packed_tex_folder);
if Path::exists(packed_folder_path.as_path()) {
fs::remove_dir_all(packed_folder_path.as_path()).unwrap();
}
fs::create_dir_all(packed_folder_path.as_path()).unwrap();
for (idx, packer) in self.packers.iter().enumerate() {
let image = ImageExporter::export(packer).unwrap();
let mut file_path = packed_folder_path.clone();
file_path.push(&format!("{}.png", idx));
let mut file = File::create(file_path).unwrap();
image.save(&mut file, image::PNG).unwrap();
textures.push(make_texture(display, image));
}
println!("Saved {}", packed_tex_folder);
let config = TileAtlasConfig {
locations: self.locations.clone(),
frames: self.frames.clone(),
file_hash: self.file_hash.clone(),
};
config::write_tile_atlas_config(&config, packed_tex_folder);
TileAtlas::new(config, textures)
}
}
impl TileAtlas {
pub fn new(config: TileAtlasConfig, textures: Vec<Texture2d>) -> Self {
let mut atlas = TileAtlas {
config: config,
textures: textures,
indices: Vec::new(),
};
atlas.cache_tile_regions();
atlas
}
/// Precalculates the UV rectangles for individual tiles to avoid the
/// overhead of recalculationg them on lookup. It must be done before the
/// tile atlas can be used.
fn cache_tile_regions(&mut self) {
for frame in self.config.frames.values() {
let (frame_w, frame_h) = self.frame_size(frame);
for (tile_type, tile) in frame.offsets.iter() {
let tex_ratio = self.get_sprite_tex_ratio(tile_type);
let add_offset = get_add_offset(&frame.rect, &frame.tile_size);
let ratio = if tile.data.is_autotile {
2
} else {
1
};
let tx = ((tile.data.offset.0 + add_offset.0) * ratio) as f32 * tex_ratio[0];
let ty = ((tile.data.offset.1 + add_offset.1) * ratio) as f32 * tex_ratio[1];
let tw = (frame.tile_size.0 * ratio) as f32 / frame_w as f32;
let th = (frame.tile_size.1 * ratio) as f32 / frame_h as f32;
*tile.cached_rect.borrow_mut() = Some((tx, ty, tw, th));
}
}
self.indices = self.config.locations.keys().map(|l| l.to_string()).collect();
}
fn frame_size(&self, frame: &AtlasFrame) -> (u32, u32) {
self.texture_size(frame.texture_idx)
}
fn texture_size(&self, texture_idx: usize) -> (u32, u32) {
self.textures[texture_idx].dimensions()
}
fn get_frame(&self, tile_type: &str) -> &AtlasFrame {
let tex_name = &self.config.locations[tile_type];
&self.config.frames[tex_name]
}
pub fn get_tile_texture_idx(&self, tile_type: &str) -> usize {
self.get_frame(tile_type).texture_idx
}
pub fn get_tilemap_tex_ratio(&self, texture_idx: usize) -> [f32; 2] {
let dimensions = self.texture_size(texture_idx);
let cols: u32 = dimensions.0 / 24;
let rows: u32 = dimensions.1 / 24;
[1.0 / cols as f32, 1.0 / rows as f32]
}
pub fn get_sprite_tex_ratio(&self, tile_type: &str) -> [f32; 2] {
let frame = self.get_frame(tile_type);
let (mut sx, mut sy) = frame.tile_size;
if frame.offsets[tile_type].data.is_autotile {
// divide the autotile into 24x24 from 48x48
sx /= 2;
sy /= 2;
}
let dimensions = self.frame_size(frame);
let cols: f32 = dimensions.0 as f32 / sx as f32;
let rows: f32 = dimensions.1 as f32 / sy as f32;
[1.0 / cols, 1.0 / rows]
}
pub fn get_tile_texture_size(&self, tile_type: &str) -> (u32, u32) {
self.get_frame(tile_type).tile_size
}
pub fn get_tile(&self, tile_type: &str) -> &AtlasTile {
let frame = self.get_frame(tile_type);
&frame.offsets[tile_type]
}
pub fn get_texture_offset(&self, tile_type: &str, msecs: u64) -> (f32, f32) {
let frame = self.get_frame(tile_type);
let tile = &frame.offsets[tile_type];
let (mut tx, ty, tw, _) = tile.cached_rect.borrow()
.expect("Texture atlas regions weren't cached yet.");
match tile.data.tile_kind {
TileKind::Static => (),
TileKind::Animated(frame_count, delay) => {
let current_frame = msecs / delay;
let x_index_offset = current_frame % frame_count;
tx += x_index_offset as f32 * tw;
}
}
(tx, ty)
}
pub fn get_tile_index(&self, tile_kind: &str) -> usize {
self.indices.iter().enumerate().find(|&(_, i)| i == tile_kind).unwrap().0
}
fn get_tile_kind_indexed(&self, tile_idx: usize) -> &String {
&self.indices[tile_idx]
}
pub fn get_texture_offset_indexed(&self, tile_idx: usize, msecs: u64) -> (f32, f32) {
let kind = self.get_tile_kind_indexed(tile_idx);
self.get_texture_offset(kind, msecs)
}
pub fn get_texture(&self, idx: usize) -> &Texture2d {
&self.textures[idx]
}
pub fn passes(&self) -> usize {
self.textures.len()
}
}
fn get_add_offset(rect: &AtlasRect, tile_size: &(u32, u32)) -> (u32, u32) {
let ceil = |a, b| (a + b - 1) / b;
let cols: u32 = ceil(rect.x, tile_size.0);
let rows: u32 = ceil(rect.y, tile_size.1);
(cols, rows)
}
pub fn make_texture<F: Facade>(display: &F, image: DynamicImage) -> Texture2d {
let dimensions = image.dimensions();
let image = glium::texture::RawImage2d::from_raw_rgba_reversed(image.to_rgba().into_raw(), dimensions);
Texture2d::new(display, image).unwrap()
} | offsets: HashMap<String, AtlasTile>,
}
impl AtlasFrame { | random_line_split |
compensation_disp.py | from Propagation import selectMode
from Animation import Anim_dyspersji
import matplotlib.pyplot as plt
import numpy as np
def find_accurate_len(actual_len, factor=8):
min_len = factor * actual_len
estimated_len = 256
while estimated_len < min_len:
estimated_len *= 2
return estimated_len
def pad_timetraces_zeroes(time_vector, signal_vector, multi=8):
actual_len = len(signal_vector)
estimated_len = find_accurate_len(actual_len, multi)
dt = time_vector[-1]/len(time_vector)
len_to_add = estimated_len - actual_len
new_signal = []
new_time_vector = []
for i in range(len(signal_vector)):
new_signal.append(signal_vector[i])
new_time_vector.append((time_vector[i]))
for i in range(len_to_add):
new_signal.append(0)
new_time_vector.append(new_time_vector[-1] + dt)
return [new_time_vector, new_signal]
def calculate_n(k_Nyquista, delta_k, factor=1.1):
# Funkcja obliczająca porządaną ilość próbek w wektorze odległości w końcowym śladzie
# powinien być zbliżony do długości wektora czasu
# n > 2(k_Nyquista/deltak)
if factor <= 1:
print("Współczynnik musi być większy od 1, przyjęta wartość wynosi 1,1")
factor = 1.1
return int(factor * 2 * (k_Nyquista/delta_k))
def calculate_k_nyquist(dispercion_curves, dt, factor=1.1):
#k_Nyquista powinno być >= k(f_Nyquista) f_Nyquista to 1/(2*delta_t)
#Zależność k(f) przechowywana jest w krzywych dyspersji
if factor <= 1:
print("Podany współczynnik musi być większy od 1, przyjęto wartość 1,1")
factor = 1.1
f_Nyq = 1/(2*dt) # to jest w Hz
f_Nyq_kHz = f_Nyq/1000
max_k_Nyq = 0
for mode in dispercion_curves.AllModes.modeTable:
k_temp = Anim_dyspersji.curve_sampling(mode.all_omega_khz, dispercion_curves.k_v, [f_Nyq_kHz])
if k_temp > max_k_Nyq:
max_k_Nyq = k_temp
return factor*max_k_Nyq[0] # Zwracana wartość jest w rad/m
def calculate_delta_k(max_v_gr, signal_duration, factor=0.9):
# delta k powinno być = 1/(n(delta_x) i mniejsze niż 1/(m*delta_t*v_gr_max) m*delta_t jest równe długości trwania sygnału :)
if signal_duration <= 0:
print("Długość sygnału musi być większa od 0")
exit(0)
if factor >= 1:
print("Współczynnik musi być mniejszy od 1, przyjęta wartość to 0,9")
factor = 0.9
delta_k = factor/(signal_duration * max_v_gr)
return delta_k # delta_k zwracana jest w rad/m
def calculate_delta_x(k_Nyquista):
return 1/(2*k_Nyquista) # w metrach
def find_max_k(mode, k_vect, max_omega_kHz):
if max_omega_kHz > mode.all_omega_khz[-1]:
max_k = mode.findPoint([mode.points[-2], mode.points[-1]], max_omega_kHz)
elif max_omega_kHz < mode.minOmega:
max_k = -1
else:
P1 = selectMode.Point()
P2 = selectMode.Point()
for ind in range(len(mode.points)-1):
if mode.points[ind].w < max_omega_kHz and mode.points[ind+1].w > max_omega_kHz:
P1 = mode.points[ind]
P2 = mode.points[ind+1]
break
max_k = mode.findPoint([P1, P2], max_omega_kHz)
return max_k
def find_omega_in_dispercion_curves(mode, temp_k, k_vect):
omega = mode.points[0].w
if temp_k > k_vect[-1]:
omega = mode.findPointWithGivenK([mode.points[-2], mode.points[-1]], temp_k)
elif temp_k < k_vect[0]:
if mode.points[0].w < 5:
temp_point = selectMode.Point()
omega = mode.findPointWithGivenK([temp_point, mode.points[0]], temp_k)
else:
omega = mode.points[0].w
else:
for ind in range(len(k_vect)-1):
if k_vect[ind] < temp_k and k_vect[ind + 1] > temp_k:
omega = mode.findPointWithGivenK([mode.points[ind], mode.points[ind+1]], temp_k)
return omega
def find_omega_in_dispercion_curves_rad_s(mode, temp_k, k_vect):
omega = mode.points[0].wkat_complex
if temp_k > k_vect[-1]:
omega = mode.findPointWithGivenK_rad_s([mode.points[-2], mode.points[-1]], temp_k)
elif temp_k < k_vect[0]:
if mode.points[0].w < 5:
temp_point = selectMode.Point()
omega = mode.findPointWithGivenK_rad_s([temp_point, mode.points[0]], temp_k)
else:
omega = mode.points[0].wkat_complex
else:
for ind in range(len(k_vect)-1):
if k_ve | w, freq_sampling_kHz, omega):
value = -1
for ind in range(len(freq_sampling_kHz)-1):
if freq_sampling_kHz[ind] == omega:
value = G_w[ind]
break
elif freq_sampling_kHz[ind] < omega and freq_sampling_kHz[ind + 1] > omega:
a = (G_w[ind] - G_w[ind+1])/(freq_sampling_kHz[ind] - freq_sampling_kHz[ind +1])
b = G_w[ind] - a * freq_sampling_kHz[ind]
value = a* omega + b
break
if value == -1:
if omega == freq_sampling_kHz[-1]:
value = G_w[-1]
return value
def calculate_group_velocity(mode, k_sampling_rad_m, ind, k_vect):
k1 = k_sampling_rad_m[ind + 1]
k2 = k_sampling_rad_m[ind]
om1 = find_omega_in_dispercion_curves_rad_s(mode, k1, k_vect)
om2 = find_omega_in_dispercion_curves_rad_s(mode, k2, k_vect)
group_velocity = (om1 - om2)/(k1 - k2)
return group_velocity
def calculate_mean_mode(dispercion_curves, numbers_of_propagated_modes):
modes = []
for ind in numbers_of_propagated_modes:
modes.append(dispercion_curves.getMode(ind))
mean_mode = selectMode.Mode()
mean_k_vector = []
omegs = modes[0].all_omega_khz
for ind in range(len(omegs)):
mean_k = modes[0].points[ind].k
for mode_ind in range(len(modes)-1):
calc_k = Anim_dyspersji.curve_sampling_new(modes[mode_ind + 1].all_omega_khz, dispercion_curves.k_v, [omegs[ind]])[0]
mean_k = mean_k + calc_k
mean_k = mean_k/(len(modes))
mean_mode.addPoint(modes[0].points[ind])
mean_mode.points[ind].k = mean_k
mean_k_vector.append(mean_k)
return [mean_mode, mean_k_vector]
def mapping_from_time_to_distance(dispersion, dispercion_curves, propagated_modes, need_to_pad = False):
if need_to_pad:
signal_to_fft = pad_timetraces_zeroes(dispersion[0], dispersion[1])
else:
signal_to_fft = dispersion
signal_after_fft = np.fft.rfft(signal_to_fft[1])
time = signal_to_fft[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal_to_fft[1]), d=dt)*1e-3
new_freq_sampling_kHz = frequency_from_numpy
modes = []
for ind in range(len(propagated_modes)):
modes.append(dispercion_curves.getMode(propagated_modes[ind]))
k_vect = dispercion_curves.k_v
G_w = np.sqrt(signal_after_fft.real**2 + signal_after_fft.imag**2)
if len(modes) > 1:
mean_data = calculate_mean_mode(dispercion_curves, propagated_modes)
mean_mode = mean_data[0]
mean_k_vector = mean_data[1]
else:
mean_mode = modes[0]
mean_k_vector = dispercion_curves.k_v
mode_0 = mean_mode
k_vect = mean_k_vector
v_gr_max = 0
for ind in range(len(k_vect) - 1):
print("Indeks wynosi " + str(ind))
value = (mode_0.points[ind + 1].wkat_complex - mode_0.points[ind].wkat_complex)/(k_vect[ind+1]-k_vect[ind])
if value > v_gr_max:
v_gr_max = value
#------------------Wyliczanie ograniczeń --------------------------------
k_nyq = calculate_k_nyquist(dispercion_curves, dt)
delta_x = calculate_delta_x(k_nyq)
delta_k = calculate_delta_k(v_gr_max.real, time[-1])
n = calculate_n(k_nyq, delta_k) # n to długość wektora x, liczba próbek na odległości
max_k = find_max_k(mode_0, k_vect, new_freq_sampling_kHz[-1])
new_k_sampling_rad_m = []
while max_k/delta_k > 40000:
delta_k = delta_k * 5
k = 0
while k < max_k:
new_k_sampling_rad_m.append(k)
k += delta_k
G_k = []
ind = 0
for temp_k in new_k_sampling_rad_m:
om = find_omega_in_dispercion_curves(mode_0, temp_k, k_vect)
val = find_value_by_omega_in_G_w(signal_after_fft, new_freq_sampling_kHz, om)
G_k.append(val)
ind += 1
v_gr = []
for ind in range(len(new_k_sampling_rad_m) - 1):
value = calculate_group_velocity(mode_0, new_k_sampling_rad_m, ind, k_vect)
v_gr.append(value)
v_gr.append(v_gr[-1])
H_k = []
for ind in range(len(v_gr)):
H_k.append(G_k[ind] * v_gr[ind])
h_x = np.fft.ifft(H_k) / (2000 * np.pi)
distance = 1/delta_k # w metrach
n = len(h_x)
dx = distance/n
dist_vect = []
for i in range(n):
dist_vect.append(i*dx*2*np.pi/len(propagated_modes))
return [dist_vect, h_x]
def wave_length_propagation(signal, numbers_of_modes, disp_curves, distance_m, F_PADZEROS, mult=8):
modes_table = []
for mode_number in numbers_of_modes:
modes_table.append(disp_curves.getMode(mode_number))
if F_PADZEROS:
signal_to_fft = pad_timetraces_zeroes(signal[0], signal[1], mult)
else:
signal_to_fft = signal
signal_after_fft = np.fft.rfft(signal_to_fft[1])
time = signal_to_fft[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal_to_fft[1]), d=dt)*1e-3#*1e4
k_vect = []
new_signal_after_fft = []
for ind, f in enumerate(frequency_from_numpy):
k_vect.append(0)
for mode in modes_table:
k_vect[-1] += mode.findKWithGivenOmega_kHz(f)
new_signal_after_fft.append(signal_after_fft[ind] * np.exp(-1j * k_vect[ind] * distance_m))
propagated_signal = np.fft.irfft(new_signal_after_fft) #/distance_m
new_time = np.linspace(time[0], time[-1], len(propagated_signal))
return [new_time, propagated_signal]
def time_reverse(signal):
time_vector = signal[0]
new_signal = []
for s in signal[1]:
new_signal.append(s)
new_signal.reverse()
return [time_vector, new_signal]
def time_reverse_compensation(signal, distance, numbers_of_modes, disp_curves):
signal_temp = wave_length_propagation(signal, numbers_of_modes, disp_curves, distance, True, 100)
return time_reverse(signal_temp)
def linear_mapping_compensation(signal, number_of_modes, disp_curves):
signal_after_fft = np.fft.rfft(signal[1])
time = signal[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal[1]), d=dt)*1e-3
G_w = np.sqrt(signal_after_fft.real**2 + signal_after_fft.imag**2)
#znalezienie najsilniejszej/średniej omegi
max_g = G_w[0]
max_ind = 0
for ind, g in enumerate(G_w):
if g>max_g:
max_g = g
max_ind = ind
w_0 = frequency_from_numpy[max_ind]
mean_mode = disp_curves.getMode(number_of_modes)
mean_k_vector = disp_curves.k_v
k_vect = []
for ind, f in enumerate(frequency_from_numpy):
k_vect.append(mean_mode.findKWithGivenOmega_kHz(f))
G_k = []
ind = 0
for temp_k in k_vect:
om = find_omega_in_dispercion_curves(mean_mode, temp_k, mean_k_vector)
val = find_value_by_omega_in_G_w(signal_after_fft, frequency_from_numpy, om)
G_k.append(val)
k_0 = mean_mode.findKWithGivenOmega_kHz(w_0)
k_1 = 0
point1 = selectMode.Point()
point2 = selectMode.Point()
point3 = selectMode.Point()
if w_0 < mean_mode.minOmega:
k_1 = 0
if w_0 > mean_mode.points[-1].w:
point1 = mean_mode.points[-2]
point2 = mean_mode.points[-1]
for ind in range(len(mean_mode.points) - 1):
if mean_mode.points[ind].w == w_0:
point1 = mean_mode.points[ind-1]
point2 = mean_mode.points[ind]
point3 = mean_mode.points[ind+1]
break
if mean_mode.points[ind].w > w_0:
continue
if mean_mode.points[ind].w < w_0 and mean_mode.points[ind+1].w > w_0:
point1 = mean_mode.points[ind]
point2 = mean_mode.points[ind+1]
break
if point3.k == 0:
k_1 = (point1.k - point2.k)/(point1.w - point2.w)
else:
k_1_left = (point1.k - point2.k)/(point1.w - point2.w)
k_1_right = (point2.k - point3.k)/(point2.w - point3.w)
k1 = (k_1_right + k_1_left)/2
new_G_w = []
for ind, f in enumerate(frequency_from_numpy):
print(len(frequency_from_numpy)-ind)
k_lin = k_0 + k_1*(f - w_0)
val = find_value_by_omega_in_G_w(G_k, frequency_from_numpy, k_lin)
new_G_w.append(val)
new_g_t = np.fft.ifft(new_G_w)
new_time = np.linspace(time[0], time[-1], len(new_g_t))
return [new_time, new_g_t]
if __name__ == "__main__":
KD3 = selectMode.SelectedMode('../../../Dane/25mm_stal/Node4_25_8_8/kvect', '../../../Dane/25mm_stal/Node4_25_8_8/omega')
KD3.selectMode()
KD3.plot_modes(30)
dist = 2 # w metrach
signal_array3, time_x_freq3 = Anim_dyspersji.get_chirp()
signal3 = wave_length_propagation([time_x_freq3[0], signal_array3[3]], [1, 2, 3], KD3, dist, True, 100)
wilcox3 = mapping_from_time_to_distance(signal3, KD3, [1, 2, 3])
plt.figure("Wilcox")
plt.subplot(311)
plt.plot(time_x_freq3[0], signal_array3[3])
plt.title("Sygnał wejściowy")
plt.xlabel("distence [m]")
plt.ylabel("Amplitude [-]")
plt.subplot(312)
plt.plot(signal3[0], signal3[1])
plt.title("Rozproszony sygnał")
plt.xlabel("time [s]")
plt.ylabel("Amplitude [-]")
plt.subplot(313)
plt.plot(wilcox3[0], wilcox3[1])
plt.title("Skompensowany sygnał")
plt.xlabel("distence [m]")
plt.ylabel("Amplitude [-]")
plt.show()
exit(0)
| ct[ind] < temp_k and k_vect[ind + 1] > temp_k:
omega = mode.findPointWithGivenK_rad_s([mode.points[ind], mode.points[ind+1]], temp_k)
break
return omega
def find_value_by_omega_in_G_w(G_ | conditional_block |
compensation_disp.py | from Propagation import selectMode
from Animation import Anim_dyspersji
import matplotlib.pyplot as plt
import numpy as np
def find_accurate_len(actual_len, factor=8):
min_len = factor * actual_len
estimated_len = 256
while estimated_len < min_len:
estimated_len *= 2
return estimated_len
def pad_timetraces_zeroes(time_vector, signal_vector, multi=8):
actual_len = len(signal_vector)
estimated_len = find_accurate_len(actual_len, multi)
dt = time_vector[-1]/len(time_vector)
len_to_add = estimated_len - actual_len
new_signal = []
new_time_vector = []
for i in range(len(signal_vector)):
new_signal.append(signal_vector[i])
new_time_vector.append((time_vector[i]))
for i in range(len_to_add):
new_signal.append(0)
new_time_vector.append(new_time_vector[-1] + dt)
return [new_time_vector, new_signal]
def calculate_n(k_Nyquista, delta_k, factor=1.1):
# Funkcja obliczająca porządaną ilość próbek w wektorze odległości w końcowym śladzie
# powinien być zbliżony do długości wektora czasu
# n > 2(k_Nyquista/deltak)
if factor <= 1:
print("Współczynnik musi być większy od 1, przyjęta wartość wynosi 1,1")
factor = 1.1
return int(factor * 2 * (k_Nyquista/delta_k))
def calculate_k_nyquist(dispercion_curves, dt, factor=1.1):
#k_Nyquista powinno być >= k(f_Nyquista) f_Nyquista to 1/(2*delta_t)
#Zależność k(f) przechowywana jest w krzywych dyspersji
if factor <= 1:
print("Podany współczynnik musi być większy od 1, przyjęto wartość 1,1")
factor = 1.1
f_Nyq = 1/(2*dt) # to jest w Hz
f_Nyq_kHz = f_Nyq/1000
max_k_Nyq = 0
for mode in dispercion_curves.AllModes.modeTable:
k_temp = Anim_dyspersji.curve_sampling(mode.all_omega_khz, dispercion_curves.k_v, [f_Nyq_kHz])
if k_temp > max_k_Nyq:
max_k_Nyq = k_temp
return factor*max_k_Nyq[0] # Zwracana wartość jest w rad/m
def calculate_delta_k(max_v_gr, signal_duration, factor=0.9):
# delta k powinno być = 1/(n(delta_x) i mniejsze niż 1/(m*delta_t*v_gr_max) m*delta_t jest równe długości trwania sygnału :)
if signal_duration <= 0:
print("Długość sygnału musi być większa od 0")
exit(0)
if factor >= 1:
print("Współczynnik musi być mniejszy od 1, przyjęta wartość to 0,9")
factor = 0.9
delta_k = factor/(signal_duration * max_v_gr)
return delta_k # delta_k zwracana jest w rad/m
def calculate_delta_x(k_Nyquista):
return 1/(2*k_Nyquista) # w metrach
def find_max_k(mode, k_vect, max_omega_kHz):
if max_omega_kHz > mode.all_omega_khz[-1]:
max_k = mode.findPoint([mode.points[-2], mode.points[-1]], max_omega_kHz)
elif max_omega_kHz < mode.minOmega:
max_k = -1
else:
P1 = selectMode.Point()
P2 = selectMode.Point()
for ind in range(len(mode.points)-1):
if mode.points[ind].w < max_omega_kHz and mode.points[ind+1].w > max_omega_kHz:
P1 = mode.points[ind]
P2 = mode.points[ind+1]
break
max_k = mode.findPoint([P1, P2], max_omega_kHz)
return max_k
def find_omega_in_dispercion_curves(mode, temp_k, k_vect):
omega = mode.points[0].w
if temp_k > k_vect[-1]:
omega = mode.findPointWithGivenK([mode.points[-2], mode.points[-1]], temp_k)
elif temp_k < k_vect[0]:
if mode.points[0].w < 5:
temp_point = selectMode.Point()
omega = mode.findPointWithGivenK([temp_point, mode.points[0]], temp_k)
else:
omega = mode.points[0].w
else:
for ind in range(len(k_vect)-1):
if k_vect[ind] < temp_k and k_vect[ind + 1] > temp_k:
omega = mode.findPointWithGivenK([mode.points[ind], mode.points[ind+1]], temp_k)
return omega
def find_omega_in_dispercion_curves_rad_s(mode, temp_k, k_vect):
omega = mode.points[0].wkat_complex
if temp_k > k_vect[-1]:
omega = mode.findPointWithGivenK_rad_s([mode.points[-2], mode.points[-1]], temp_k)
elif temp_k < k_vect[0]:
if mode.points[0].w < 5:
temp_point = selectMode.Point()
omega = mode.findPointWithGivenK_rad_s([temp_point, mode.points[0]], temp_k)
else:
omega = mode.points[0].wkat_complex
else:
for ind in range(len(k_vect)-1):
if k_vect[ind] < temp_k and k_vect[ind + 1] > temp_k:
omega = mode.findPointWithGivenK_rad_s([mode.points[ind], mode.points[ind+1]], temp_k)
break
return omega
def find_value_by_omega_in_G_w(G_w, freq_sampling_kHz, omega):
value = -1
for ind in range(len(freq_sampling_kHz)-1):
if freq_sampling_kHz[ind] == omega:
value = G_w[ind]
break
elif freq_sampling_kHz[ind] < omega and freq_sampling_kHz[ind + 1] > omega:
a = (G_w[ind] - G_w[ind+1])/(freq_sampling_kHz[ind] - freq_sampling_kHz[ind +1])
b = G_w[ind] - a * freq_sampling_kHz[ind]
value = a* omega + b
break
if value == -1:
if omega == freq_sampling_kHz[-1]:
value = G_w[-1]
return value
def calculate_group_velocity(mode, k_sampling_rad_m, ind, k_vect):
k1 = k_sampling_rad_m[ind + 1]
k2 = k_sampling_rad_m[ind]
om1 = find_omega_in_dispercion_curves_rad_s(mode, k1, k_vect)
om2 = find_omega_in_dispercion_curves_rad_s(mode, k2, k_vect)
group_velocity = (om1 - om2)/(k1 - k2)
return group_velocity
def calculate_mean_mode(dispercion_curves, numbers_of_propagated_modes):
modes = []
for ind in numbers_of_propagated_modes:
modes.append(dispercion_curves.getMode(ind))
mean_mode = selectMode.Mode()
mean_k_vector = []
omegs = modes[0].all_omega_khz
for ind in range(len(omegs)):
mean_k = modes[0].points[ind].k
for mode_ind in range(len(modes)-1):
calc_k = Anim_dyspersji.curve_sampling_new(modes[mode_ind + 1].all_omega_khz, dispercion_curves.k_v, [omegs[ind]])[0]
mean_k = mean_k + calc_k
mean_k = mean_k/(len(modes))
mean_mode.addPoint(modes[0].points[ind])
mean_mode.points[ind].k = mean_k
mean_k_vector.append(mean_k)
return [mean_mode, mean_k_vector]
def mapping_from_time_to_distance(dispersion, dispercion_curves, propagated_modes, need_to_pad = False):
if need_to_pad:
signal_to_fft = pad_timetraces_zeroes(dispersion[0], dispersion[1])
else:
signal_to_fft = dispersion
signal_after_fft = np.fft.rfft(signal_to_fft[1])
time = signal_to_fft[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal_to_fft[1]), d=dt)*1e-3
new_freq_sampling_kHz = frequency_from_numpy
modes = []
for ind in range(len(propagated_modes)):
modes.append(dispercion_curves.getMode(propagated_modes[ind]))
k_vect = dispercion_curves.k_v
G_w = np.sqrt(signal_after_fft.real**2 + signal_after_fft.imag**2)
if len(modes) > 1:
mean_data = calculate_mean_mode(dispercion_curves, propagated_modes)
mean_mode = mean_data[0]
mean_k_vector = mean_data[1]
else:
mean_mode = modes[0]
mean_k_vector = dispercion_curves.k_v
mode_0 = mean_mode
k_vect = mean_k_vector
v_gr_max = 0
for ind in range(len(k_vect) - 1):
print("Indeks wynosi " + str(ind))
value = (mode_0.points[ind + 1].wkat_complex - mode_0.points[ind].wkat_complex)/(k_vect[ind+1]-k_vect[ind])
if value > v_gr_max:
v_gr_max = value
#------------------Wyliczanie ograniczeń --------------------------------
k_nyq = calculate_k_nyquist(dispercion_curves, dt)
delta_x = calculate_delta_x(k_nyq)
delta_k = calculate_delta_k(v_gr_max.real, time[-1])
n = calculate_n(k_nyq, delta_k) # n to długość wektora x, liczba próbek na odległości
max_k = find_max_k(mode_0, k_vect, new_freq_sampling_kHz[-1])
new_k_sampling_rad_m = []
while max_k/delta_k > 40000:
delta_k = delta_k * 5
k = 0
while k < max_k:
new_k_sampling_rad_m.append(k)
k += delta_k
G_k = []
ind = 0
for temp_k in new_k_sampling_rad_m:
om = find_omega_in_dispercion_curves(mode_0, temp_k, k_vect)
val = find_value_by_omega_in_G_w(signal_after_fft, new_freq_sampling_kHz, om)
G_k.append(val)
ind += 1
v_gr = []
for ind in range(len(new_k_sampling_rad_m) - 1):
value = calculate_group_velocity(mode_0, new_k_sampling_rad_m, ind, k_vect)
v_gr.append(value)
v_gr.append(v_gr[-1])
H_k = []
for ind in range(len(v_gr)):
H_k.append(G_k[ind] * v_gr[ind])
h_x = np.fft.ifft(H_k) / (2000 * np.pi)
distance = 1/delta_k # w metrach
n = len(h_x)
dx = distance/n
dist_vect = []
for i in range(n):
dist_vect.append(i*dx*2*np.pi/len(propagated_modes))
return [dist_vect, h_x]
def wave_length_propagation(signal, numbers_of_modes, disp_curves, distance_m, F_PADZEROS, mult=8):
modes_table = []
for mode_number in numbers_of_modes:
modes_table.append(disp_curves.getMode(mode_number))
if F_PADZEROS:
signal_to_fft = pad_timetraces_zeroes(signal[0], signal[1], mult)
else:
signal_to_fft = signal
signal_after_fft = np.fft.rfft(signal_to_fft[1])
time = signal_to_fft[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal_to_fft[1]), d=dt)*1e-3#*1e4
k_vect = []
new_signal_after_fft = []
for ind, f in enumerate(frequency_from_numpy):
k_vect.append(0)
for mode in modes_table:
k_vect[-1] += mode.findKWithGivenOmega_kHz(f)
new_signal_after_fft.append(signal_after_fft[ind] * np.exp(-1j * k_vect[ind] * distance_m))
propagated_signal = np.fft.irfft(new_signal_after_fft) #/distance_m
new_time = np.linspace(time[0], time[-1], len(propagated_signal))
return [new_time, propagated_signal]
def time_reverse(signal):
time_vector = signal[0]
new_signal = []
for s in signal[1]:
new_signal.append(s)
new_signal.reverse()
return [time_vector, new_signal]
def time_reverse_compensation(signal, distance, numbers_of_modes, disp_curves):
signal_temp = wave_length_propagation(signal, numbers_of_modes, disp_curves, distance, True, 100)
return time_reverse(signal_temp)
def linear_mapping_compensation(signal, number_of_modes, disp_curves):
signal_after_fft = np.fft.rfft(signal[1])
time = signal | edMode('../../../Dane/25mm_stal/Node4_25_8_8/kvect', '../../../Dane/25mm_stal/Node4_25_8_8/omega')
KD3.selectMode()
KD3.plot_modes(30)
dist = 2 # w metrach
signal_array3, time_x_freq3 = Anim_dyspersji.get_chirp()
signal3 = wave_length_propagation([time_x_freq3[0], signal_array3[3]], [1, 2, 3], KD3, dist, True, 100)
wilcox3 = mapping_from_time_to_distance(signal3, KD3, [1, 2, 3])
plt.figure("Wilcox")
plt.subplot(311)
plt.plot(time_x_freq3[0], signal_array3[3])
plt.title("Sygnał wejściowy")
plt.xlabel("distence [m]")
plt.ylabel("Amplitude [-]")
plt.subplot(312)
plt.plot(signal3[0], signal3[1])
plt.title("Rozproszony sygnał")
plt.xlabel("time [s]")
plt.ylabel("Amplitude [-]")
plt.subplot(313)
plt.plot(wilcox3[0], wilcox3[1])
plt.title("Skompensowany sygnał")
plt.xlabel("distence [m]")
plt.ylabel("Amplitude [-]")
plt.show()
exit(0)
| [0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal[1]), d=dt)*1e-3
G_w = np.sqrt(signal_after_fft.real**2 + signal_after_fft.imag**2)
#znalezienie najsilniejszej/średniej omegi
max_g = G_w[0]
max_ind = 0
for ind, g in enumerate(G_w):
if g>max_g:
max_g = g
max_ind = ind
w_0 = frequency_from_numpy[max_ind]
mean_mode = disp_curves.getMode(number_of_modes)
mean_k_vector = disp_curves.k_v
k_vect = []
for ind, f in enumerate(frequency_from_numpy):
k_vect.append(mean_mode.findKWithGivenOmega_kHz(f))
G_k = []
ind = 0
for temp_k in k_vect:
om = find_omega_in_dispercion_curves(mean_mode, temp_k, mean_k_vector)
val = find_value_by_omega_in_G_w(signal_after_fft, frequency_from_numpy, om)
G_k.append(val)
k_0 = mean_mode.findKWithGivenOmega_kHz(w_0)
k_1 = 0
point1 = selectMode.Point()
point2 = selectMode.Point()
point3 = selectMode.Point()
if w_0 < mean_mode.minOmega:
k_1 = 0
if w_0 > mean_mode.points[-1].w:
point1 = mean_mode.points[-2]
point2 = mean_mode.points[-1]
for ind in range(len(mean_mode.points) - 1):
if mean_mode.points[ind].w == w_0:
point1 = mean_mode.points[ind-1]
point2 = mean_mode.points[ind]
point3 = mean_mode.points[ind+1]
break
if mean_mode.points[ind].w > w_0:
continue
if mean_mode.points[ind].w < w_0 and mean_mode.points[ind+1].w > w_0:
point1 = mean_mode.points[ind]
point2 = mean_mode.points[ind+1]
break
if point3.k == 0:
k_1 = (point1.k - point2.k)/(point1.w - point2.w)
else:
k_1_left = (point1.k - point2.k)/(point1.w - point2.w)
k_1_right = (point2.k - point3.k)/(point2.w - point3.w)
k1 = (k_1_right + k_1_left)/2
new_G_w = []
for ind, f in enumerate(frequency_from_numpy):
print(len(frequency_from_numpy)-ind)
k_lin = k_0 + k_1*(f - w_0)
val = find_value_by_omega_in_G_w(G_k, frequency_from_numpy, k_lin)
new_G_w.append(val)
new_g_t = np.fft.ifft(new_G_w)
new_time = np.linspace(time[0], time[-1], len(new_g_t))
return [new_time, new_g_t]
if __name__ == "__main__":
KD3 = selectMode.Select | identifier_body |
compensation_disp.py | from Propagation import selectMode
from Animation import Anim_dyspersji
import matplotlib.pyplot as plt
import numpy as np
def find_accurate_len(actual_len, factor=8):
min_len = factor * actual_len
estimated_len = 256
while estimated_len < min_len:
estimated_len *= 2
return estimated_len
def pad_timetraces_zeroes(time_vector, signal_vector, multi=8):
actual_len = len(signal_vector)
estimated_len = find_accurate_len(actual_len, multi)
dt = time_vector[-1]/len(time_vector)
len_to_add = estimated_len - actual_len
new_signal = []
new_time_vector = []
for i in range(len(signal_vector)):
new_signal.append(signal_vector[i])
new_time_vector.append((time_vector[i]))
for i in range(len_to_add):
new_signal.append(0)
new_time_vector.append(new_time_vector[-1] + dt)
return [new_time_vector, new_signal]
def calculate_n(k_Nyquista, delta_k, factor=1.1):
# Funkcja obliczająca porządaną ilość próbek w wektorze odległości w końcowym śladzie
# powinien być zbliżony do długości wektora czasu
# n > 2(k_Nyquista/deltak)
if factor <= 1:
print("Współczynnik musi być większy od 1, przyjęta wartość wynosi 1,1")
factor = 1.1
return int(factor * 2 * (k_Nyquista/delta_k))
def calculate_k_nyquist(dispercion_curves, dt, factor=1.1):
#k_Nyquista powinno być >= k(f_Nyquista) f_Nyquista to 1/(2*delta_t)
#Zależność k(f) przechowywana jest w krzywych dyspersji
if factor <= 1:
print("Podany współczynnik musi być większy od 1, przyjęto wartość 1,1")
factor = 1.1
f_Nyq = 1/(2*dt) # to jest w Hz
f_Nyq_kHz = f_Nyq/1000
max_k_Nyq = 0
for mode in dispercion_curves.AllModes.modeTable: | k_temp = Anim_dyspersji.curve_sampling(mode.all_omega_khz, dispercion_curves.k_v, [f_Nyq_kHz])
if k_temp > max_k_Nyq:
max_k_Nyq = k_temp
return factor*max_k_Nyq[0] # Zwracana wartość jest w rad/m
def calculate_delta_k(max_v_gr, signal_duration, factor=0.9):
# delta k powinno być = 1/(n(delta_x) i mniejsze niż 1/(m*delta_t*v_gr_max) m*delta_t jest równe długości trwania sygnału :)
if signal_duration <= 0:
print("Długość sygnału musi być większa od 0")
exit(0)
if factor >= 1:
print("Współczynnik musi być mniejszy od 1, przyjęta wartość to 0,9")
factor = 0.9
delta_k = factor/(signal_duration * max_v_gr)
return delta_k # delta_k zwracana jest w rad/m
def calculate_delta_x(k_Nyquista):
return 1/(2*k_Nyquista) # w metrach
def find_max_k(mode, k_vect, max_omega_kHz):
if max_omega_kHz > mode.all_omega_khz[-1]:
max_k = mode.findPoint([mode.points[-2], mode.points[-1]], max_omega_kHz)
elif max_omega_kHz < mode.minOmega:
max_k = -1
else:
P1 = selectMode.Point()
P2 = selectMode.Point()
for ind in range(len(mode.points)-1):
if mode.points[ind].w < max_omega_kHz and mode.points[ind+1].w > max_omega_kHz:
P1 = mode.points[ind]
P2 = mode.points[ind+1]
break
max_k = mode.findPoint([P1, P2], max_omega_kHz)
return max_k
def find_omega_in_dispercion_curves(mode, temp_k, k_vect):
omega = mode.points[0].w
if temp_k > k_vect[-1]:
omega = mode.findPointWithGivenK([mode.points[-2], mode.points[-1]], temp_k)
elif temp_k < k_vect[0]:
if mode.points[0].w < 5:
temp_point = selectMode.Point()
omega = mode.findPointWithGivenK([temp_point, mode.points[0]], temp_k)
else:
omega = mode.points[0].w
else:
for ind in range(len(k_vect)-1):
if k_vect[ind] < temp_k and k_vect[ind + 1] > temp_k:
omega = mode.findPointWithGivenK([mode.points[ind], mode.points[ind+1]], temp_k)
return omega
def find_omega_in_dispercion_curves_rad_s(mode, temp_k, k_vect):
omega = mode.points[0].wkat_complex
if temp_k > k_vect[-1]:
omega = mode.findPointWithGivenK_rad_s([mode.points[-2], mode.points[-1]], temp_k)
elif temp_k < k_vect[0]:
if mode.points[0].w < 5:
temp_point = selectMode.Point()
omega = mode.findPointWithGivenK_rad_s([temp_point, mode.points[0]], temp_k)
else:
omega = mode.points[0].wkat_complex
else:
for ind in range(len(k_vect)-1):
if k_vect[ind] < temp_k and k_vect[ind + 1] > temp_k:
omega = mode.findPointWithGivenK_rad_s([mode.points[ind], mode.points[ind+1]], temp_k)
break
return omega
def find_value_by_omega_in_G_w(G_w, freq_sampling_kHz, omega):
value = -1
for ind in range(len(freq_sampling_kHz)-1):
if freq_sampling_kHz[ind] == omega:
value = G_w[ind]
break
elif freq_sampling_kHz[ind] < omega and freq_sampling_kHz[ind + 1] > omega:
a = (G_w[ind] - G_w[ind+1])/(freq_sampling_kHz[ind] - freq_sampling_kHz[ind +1])
b = G_w[ind] - a * freq_sampling_kHz[ind]
value = a* omega + b
break
if value == -1:
if omega == freq_sampling_kHz[-1]:
value = G_w[-1]
return value
def calculate_group_velocity(mode, k_sampling_rad_m, ind, k_vect):
k1 = k_sampling_rad_m[ind + 1]
k2 = k_sampling_rad_m[ind]
om1 = find_omega_in_dispercion_curves_rad_s(mode, k1, k_vect)
om2 = find_omega_in_dispercion_curves_rad_s(mode, k2, k_vect)
group_velocity = (om1 - om2)/(k1 - k2)
return group_velocity
def calculate_mean_mode(dispercion_curves, numbers_of_propagated_modes):
modes = []
for ind in numbers_of_propagated_modes:
modes.append(dispercion_curves.getMode(ind))
mean_mode = selectMode.Mode()
mean_k_vector = []
omegs = modes[0].all_omega_khz
for ind in range(len(omegs)):
mean_k = modes[0].points[ind].k
for mode_ind in range(len(modes)-1):
calc_k = Anim_dyspersji.curve_sampling_new(modes[mode_ind + 1].all_omega_khz, dispercion_curves.k_v, [omegs[ind]])[0]
mean_k = mean_k + calc_k
mean_k = mean_k/(len(modes))
mean_mode.addPoint(modes[0].points[ind])
mean_mode.points[ind].k = mean_k
mean_k_vector.append(mean_k)
return [mean_mode, mean_k_vector]
def mapping_from_time_to_distance(dispersion, dispercion_curves, propagated_modes, need_to_pad = False):
if need_to_pad:
signal_to_fft = pad_timetraces_zeroes(dispersion[0], dispersion[1])
else:
signal_to_fft = dispersion
signal_after_fft = np.fft.rfft(signal_to_fft[1])
time = signal_to_fft[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal_to_fft[1]), d=dt)*1e-3
new_freq_sampling_kHz = frequency_from_numpy
modes = []
for ind in range(len(propagated_modes)):
modes.append(dispercion_curves.getMode(propagated_modes[ind]))
k_vect = dispercion_curves.k_v
G_w = np.sqrt(signal_after_fft.real**2 + signal_after_fft.imag**2)
if len(modes) > 1:
mean_data = calculate_mean_mode(dispercion_curves, propagated_modes)
mean_mode = mean_data[0]
mean_k_vector = mean_data[1]
else:
mean_mode = modes[0]
mean_k_vector = dispercion_curves.k_v
mode_0 = mean_mode
k_vect = mean_k_vector
v_gr_max = 0
for ind in range(len(k_vect) - 1):
print("Indeks wynosi " + str(ind))
value = (mode_0.points[ind + 1].wkat_complex - mode_0.points[ind].wkat_complex)/(k_vect[ind+1]-k_vect[ind])
if value > v_gr_max:
v_gr_max = value
#------------------Wyliczanie ograniczeń --------------------------------
k_nyq = calculate_k_nyquist(dispercion_curves, dt)
delta_x = calculate_delta_x(k_nyq)
delta_k = calculate_delta_k(v_gr_max.real, time[-1])
n = calculate_n(k_nyq, delta_k) # n to długość wektora x, liczba próbek na odległości
max_k = find_max_k(mode_0, k_vect, new_freq_sampling_kHz[-1])
new_k_sampling_rad_m = []
while max_k/delta_k > 40000:
delta_k = delta_k * 5
k = 0
while k < max_k:
new_k_sampling_rad_m.append(k)
k += delta_k
G_k = []
ind = 0
for temp_k in new_k_sampling_rad_m:
om = find_omega_in_dispercion_curves(mode_0, temp_k, k_vect)
val = find_value_by_omega_in_G_w(signal_after_fft, new_freq_sampling_kHz, om)
G_k.append(val)
ind += 1
v_gr = []
for ind in range(len(new_k_sampling_rad_m) - 1):
value = calculate_group_velocity(mode_0, new_k_sampling_rad_m, ind, k_vect)
v_gr.append(value)
v_gr.append(v_gr[-1])
H_k = []
for ind in range(len(v_gr)):
H_k.append(G_k[ind] * v_gr[ind])
h_x = np.fft.ifft(H_k) / (2000 * np.pi)
distance = 1/delta_k # w metrach
n = len(h_x)
dx = distance/n
dist_vect = []
for i in range(n):
dist_vect.append(i*dx*2*np.pi/len(propagated_modes))
return [dist_vect, h_x]
def wave_length_propagation(signal, numbers_of_modes, disp_curves, distance_m, F_PADZEROS, mult=8):
modes_table = []
for mode_number in numbers_of_modes:
modes_table.append(disp_curves.getMode(mode_number))
if F_PADZEROS:
signal_to_fft = pad_timetraces_zeroes(signal[0], signal[1], mult)
else:
signal_to_fft = signal
signal_after_fft = np.fft.rfft(signal_to_fft[1])
time = signal_to_fft[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal_to_fft[1]), d=dt)*1e-3#*1e4
k_vect = []
new_signal_after_fft = []
for ind, f in enumerate(frequency_from_numpy):
k_vect.append(0)
for mode in modes_table:
k_vect[-1] += mode.findKWithGivenOmega_kHz(f)
new_signal_after_fft.append(signal_after_fft[ind] * np.exp(-1j * k_vect[ind] * distance_m))
propagated_signal = np.fft.irfft(new_signal_after_fft) #/distance_m
new_time = np.linspace(time[0], time[-1], len(propagated_signal))
return [new_time, propagated_signal]
def time_reverse(signal):
time_vector = signal[0]
new_signal = []
for s in signal[1]:
new_signal.append(s)
new_signal.reverse()
return [time_vector, new_signal]
def time_reverse_compensation(signal, distance, numbers_of_modes, disp_curves):
signal_temp = wave_length_propagation(signal, numbers_of_modes, disp_curves, distance, True, 100)
return time_reverse(signal_temp)
def linear_mapping_compensation(signal, number_of_modes, disp_curves):
signal_after_fft = np.fft.rfft(signal[1])
time = signal[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal[1]), d=dt)*1e-3
G_w = np.sqrt(signal_after_fft.real**2 + signal_after_fft.imag**2)
#znalezienie najsilniejszej/średniej omegi
max_g = G_w[0]
max_ind = 0
for ind, g in enumerate(G_w):
if g>max_g:
max_g = g
max_ind = ind
w_0 = frequency_from_numpy[max_ind]
mean_mode = disp_curves.getMode(number_of_modes)
mean_k_vector = disp_curves.k_v
k_vect = []
for ind, f in enumerate(frequency_from_numpy):
k_vect.append(mean_mode.findKWithGivenOmega_kHz(f))
G_k = []
ind = 0
for temp_k in k_vect:
om = find_omega_in_dispercion_curves(mean_mode, temp_k, mean_k_vector)
val = find_value_by_omega_in_G_w(signal_after_fft, frequency_from_numpy, om)
G_k.append(val)
k_0 = mean_mode.findKWithGivenOmega_kHz(w_0)
k_1 = 0
point1 = selectMode.Point()
point2 = selectMode.Point()
point3 = selectMode.Point()
if w_0 < mean_mode.minOmega:
k_1 = 0
if w_0 > mean_mode.points[-1].w:
point1 = mean_mode.points[-2]
point2 = mean_mode.points[-1]
for ind in range(len(mean_mode.points) - 1):
if mean_mode.points[ind].w == w_0:
point1 = mean_mode.points[ind-1]
point2 = mean_mode.points[ind]
point3 = mean_mode.points[ind+1]
break
if mean_mode.points[ind].w > w_0:
continue
if mean_mode.points[ind].w < w_0 and mean_mode.points[ind+1].w > w_0:
point1 = mean_mode.points[ind]
point2 = mean_mode.points[ind+1]
break
if point3.k == 0:
k_1 = (point1.k - point2.k)/(point1.w - point2.w)
else:
k_1_left = (point1.k - point2.k)/(point1.w - point2.w)
k_1_right = (point2.k - point3.k)/(point2.w - point3.w)
k1 = (k_1_right + k_1_left)/2
new_G_w = []
for ind, f in enumerate(frequency_from_numpy):
print(len(frequency_from_numpy)-ind)
k_lin = k_0 + k_1*(f - w_0)
val = find_value_by_omega_in_G_w(G_k, frequency_from_numpy, k_lin)
new_G_w.append(val)
new_g_t = np.fft.ifft(new_G_w)
new_time = np.linspace(time[0], time[-1], len(new_g_t))
return [new_time, new_g_t]
if __name__ == "__main__":
KD3 = selectMode.SelectedMode('../../../Dane/25mm_stal/Node4_25_8_8/kvect', '../../../Dane/25mm_stal/Node4_25_8_8/omega')
KD3.selectMode()
KD3.plot_modes(30)
dist = 2 # w metrach
signal_array3, time_x_freq3 = Anim_dyspersji.get_chirp()
signal3 = wave_length_propagation([time_x_freq3[0], signal_array3[3]], [1, 2, 3], KD3, dist, True, 100)
wilcox3 = mapping_from_time_to_distance(signal3, KD3, [1, 2, 3])
plt.figure("Wilcox")
plt.subplot(311)
plt.plot(time_x_freq3[0], signal_array3[3])
plt.title("Sygnał wejściowy")
plt.xlabel("distence [m]")
plt.ylabel("Amplitude [-]")
plt.subplot(312)
plt.plot(signal3[0], signal3[1])
plt.title("Rozproszony sygnał")
plt.xlabel("time [s]")
plt.ylabel("Amplitude [-]")
plt.subplot(313)
plt.plot(wilcox3[0], wilcox3[1])
plt.title("Skompensowany sygnał")
plt.xlabel("distence [m]")
plt.ylabel("Amplitude [-]")
plt.show()
exit(0) | random_line_split |
|
compensation_disp.py | from Propagation import selectMode
from Animation import Anim_dyspersji
import matplotlib.pyplot as plt
import numpy as np
def find_accurate_len(actual_len, factor=8):
min_len = factor * actual_len
estimated_len = 256
while estimated_len < min_len:
estimated_len *= 2
return estimated_len
def pad_timetraces_zeroes(time_vector, signal_vector, multi=8):
actual_len = len(signal_vector)
estimated_len = find_accurate_len(actual_len, multi)
dt = time_vector[-1]/len(time_vector)
len_to_add = estimated_len - actual_len
new_signal = []
new_time_vector = []
for i in range(len(signal_vector)):
new_signal.append(signal_vector[i])
new_time_vector.append((time_vector[i]))
for i in range(len_to_add):
new_signal.append(0)
new_time_vector.append(new_time_vector[-1] + dt)
return [new_time_vector, new_signal]
def calculate_n(k_Nyquista, delta_k, factor=1.1):
# Funkcja obliczająca porządaną ilość próbek w wektorze odległości w końcowym śladzie
# powinien być zbliżony do długości wektora czasu
# n > 2(k_Nyquista/deltak)
if factor <= 1:
print("Współczynnik musi być większy od 1, przyjęta wartość wynosi 1,1")
factor = 1.1
return int(factor * 2 * (k_Nyquista/delta_k))
def calculate_k_nyquist(dispercion_curves, dt, factor=1.1):
#k_Nyquista powinno być >= k(f_Nyquista) f_Nyquista to 1/(2*delta_t)
#Zależność k(f) przechowywana jest w krzywych dyspersji
if factor <= 1:
print("Podany współczynnik musi być większy od 1, przyjęto wartość 1,1")
factor = 1.1
f_Nyq = 1/(2*dt) # to jest w Hz
f_Nyq_kHz = f_Nyq/1000
max_k_Nyq = 0
for mode in dispercion_curves.AllModes.modeTable:
k_temp = Anim_dyspersji.curve_sampling(mode.all_omega_khz, dispercion_curves.k_v, [f_Nyq_kHz])
if k_temp > max_k_Nyq:
max_k_Nyq = k_temp
return factor*max_k_Nyq[0] # Zwracana wartość jest w rad/m
def calculate_delta_k(max_v_gr, signal_duration, factor=0.9):
# delta k powinno być = 1/(n(delta_x) i mniejsze niż 1/(m*delta_t*v_gr_max) m*delta_t jest równe długości trwania sygnału :)
if signal_duration <= 0:
print("Długość sygnału musi być większa od 0")
exit(0)
if factor >= 1:
print("Współczynnik musi być mniejszy od 1, przyjęta wartość to 0,9")
factor = 0.9
delta_k = factor/(signal_duration * max_v_gr)
return delta_k # delta_k zwracana jest w rad/m
def calculate_delta_x(k_Nyquista):
return 1/(2*k_Nyquista) # w metrach
def find_max_k(mode, k_vect, max_omega_kHz):
if max_omega_kHz > mode.all_omega_khz[-1]:
max_k = mode.findPoint([mode.points[-2], mode.points[-1]], max_omega_kHz)
elif max_omega_kHz < mode.minOmega:
max_k = -1
else:
P1 = selectMode.Point()
P2 = selectMode.Point()
for ind in range(len(mode.points)-1):
if mode.points[ind].w < max_omega_kHz and mode.points[ind+1].w > max_omega_kHz:
P1 = mode.points[ind]
P2 = mode.points[ind+1]
break
max_k = mode.findPoint([P1, P2], max_omega_kHz)
return max_k
def find_omega_in_dispercion_curves(mode, temp_k, k_vect):
omega = mode.points[0].w
if temp_k > k_vect[-1]:
omega = mode.findPointWithGivenK([mode.points[-2], mode.points[-1]], temp_k)
elif temp_k < k_vect[0]:
if mode.points[0].w < 5:
temp_point = selectMode.Point()
omega = mode.findPointWithGivenK([temp_point, mode.points[0]], temp_k)
else:
omega = mode.points[0].w
else:
for ind in range(len(k_vect)-1):
if k_vect[ind] < temp_k and k_vect[ind + 1] > temp_k:
omega = mode.findPointWithGivenK([mode.points[ind], mode.points[ind+1]], temp_k)
return omega
def find_omega_in_dispercion_curves_rad_s(mode, temp_k, k_vect):
omega = mode.points[0].wkat_complex
if temp_k > k_vect[-1]:
omega = mode.findPointWithGivenK_rad_s([mode.points[-2], mode.points[-1]], temp_k)
elif temp_k < k_vect[0]:
if mode.points[0].w < 5:
temp_point = selectMode.Point()
omega = mode.findPointWithGivenK_rad_s([temp_point, mode.points[0]], temp_k)
else:
omega = mode.points[0].wkat_complex
else:
for ind in range(len(k_vect)-1):
if k_vect[ind] < temp_k and k_vect[ind + 1] > temp_k:
omega = mode.findPointWithGivenK_rad_s([mode.points[ind], mode.points[ind+1]], temp_k)
break
return omega
def find_value_by_omega_in_G_w(G_w, freq_sampling_kHz, omega):
value = -1
for ind in range(len(freq_sampling_kHz)-1):
if freq_sampling_kHz[ind] == omega:
value = G_w[ind]
break
elif freq_sampling_kHz[ind] < omega and freq_sampling_kHz[ind + 1] > omega:
a = (G_w[ind] - G_w[ind+1])/(freq_sampling_kHz[ind] - freq_sampling_kHz[ind +1])
b = G_w[ind] - a * freq_sampling_kHz[ind]
value = a* omega + b
break
if value == -1:
if omega == freq_sampling_kHz[-1]:
value = G_w[-1]
return value
def calculate_group_velocity(mode, k_sampling_rad_m, ind, k_vect):
k1 = k_sampling_rad_m[ind + 1]
k2 = k_sampling_rad_m[ind]
om1 = find_omega_in_dispercion_curves_rad_s(mode, k1, k_vect)
om2 = find_omega_in_dispercion_curves_rad_s(mode, k2, k_vect)
group_velocity = (om1 - om2)/(k1 - k2)
return group_velocity
def calculate_mean_mode(dispercion_curves, numbers_of_propagated_modes):
modes = []
for ind in numbers_of_propagated_modes:
modes.append(dispercion_curves.getMode(ind))
mean_mode = selectMode.Mode()
mean_k_vector = []
omegs = modes[0].all_omega_khz
for ind in range(len(omegs)):
mean_k = modes[0].points[ind].k
for mode_ind in range(len(modes)-1):
calc_k = Anim_dyspersji.curve_sampling_new(modes[mode_ind + 1].all_omega_khz, dispercion_curves.k_v, [omegs[ind]])[0]
mean_k = mean_k + calc_k
mean_k = mean_k/(len(modes))
mean_mode.addPoint(modes[0].points[ind])
mean_mode.points[ind].k = mean_k
mean_k_vector.append(mean_k)
return [mean_mode, mean_k_vector]
def mapping_from_time_to_distance(dispersion, dispercion_curves, propagated_modes, need_to_pad = False):
if need_to_pad:
signal_to_fft = pad_timetraces_zeroes(dispersion[0], dispersion[1])
else:
signal_to_fft = dispersion
signal_after_fft = np.fft.rfft(signal_to_fft[1])
time = signal_to_fft[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal_to_fft[1]), d=dt)*1e-3
new_freq_sampling_kHz = frequency_from_numpy
modes = []
for ind in range(len(propagated_modes)):
modes.append(dispercion_curves.getMode(propagated_modes[ind]))
k_vect = dispercion_curves.k_v
G_w = np.sqrt(signal_after_fft.real**2 + signal_after_fft.imag**2)
if len(modes) > 1:
mean_data = calculate_mean_mode(dispercion_curves, propagated_modes)
mean_mode = mean_data[0]
mean_k_vector = mean_data[1]
else:
mean_mode = modes[0]
mean_k_vector = dispercion_curves.k_v
mode_0 = mean_mode
k_vect = mean_k_vector
v_gr_max = 0
for ind in range(len(k_vect) - 1):
print("Indeks wynosi " + str(ind))
value = (mode_0.points[ind + 1].wkat_complex - mode_0.points[ind].wkat_complex)/(k_vect[ind+1]-k_vect[ind])
if value > v_gr_max:
v_gr_max = value
#------------------Wyliczanie ograniczeń --------------------------------
k_nyq = calculate_k_nyquist(dispercion_curves, dt)
delta_x = calculate_delta_x(k_nyq)
delta_k = calculate_delta_k(v_gr_max.real, time[-1])
n = calculate_n(k_nyq, delta_k) # n to długość wektora x, liczba próbek na odległości
max_k = find_max_k(mode_0, k_vect, new_freq_sampling_kHz[-1])
new_k_sampling_rad_m = []
while max_k/delta_k > 40000:
delta_k = delta_k * 5
k = 0
while k < max_k:
new_k_sampling_rad_m.append(k)
k += delta_k
G_k = []
ind = 0
for temp_k in new_k_sampling_rad_m:
om = find_omega_in_dispercion_curves(mode_0, temp_k, k_vect)
val = find_value_by_omega_in_G_w(signal_after_fft, new_freq_sampling_kHz, om)
G_k.append(val)
ind += 1
v_gr = []
for ind in range(len(new_k_sampling_rad_m) - 1):
value = calculate_group_velocity(mode_0, new_k_sampling_rad_m, ind, k_vect)
v_gr.append(value)
v_gr.append(v_gr[-1])
H_k = []
for ind in range(len(v_gr)):
H_k.append(G_k[ind] * v_gr[ind])
h_x = np.fft.ifft(H_k) / (2000 * np.pi)
distance = 1/delta_k # w metrach
n = len(h_x)
dx = distance/n
dist_vect = []
for i in range(n):
dist_vect.append(i*dx*2*np.pi/len(propagated_modes))
return [dist_vect, h_x]
def wave_length_propagation(signal, numbers_of_modes, disp_curves, distance_m, F_PADZEROS, mult=8):
modes_table = []
for mode_number in numbers_of_modes:
modes_table.append(disp_curves.getMode(mode_number))
if F_PADZEROS:
signal_to_fft = pad_timetraces_zeroes(signal[0], signal[1], mult)
else:
signal_to_fft = signal
signal_after_fft = np.fft.rfft(signal_to_fft[1])
time = signal_to_fft[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal_to_fft[1]), d=dt)*1e-3#*1e4
k_vect = []
new_signal_after_fft = []
for ind, f in enumerate(frequency_from_numpy):
k_vect.append(0)
for mode in modes_table:
k_vect[-1] += mode.findKWithGivenOmega_kHz(f)
new_signal_after_fft.append(signal_after_fft[ind] * np.exp(-1j * k_vect[ind] * distance_m))
propagated_signal = np.fft.irfft(new_signal_after_fft) #/distance_m
new_time = np.linspace(time[0], time[-1], len(propagated_signal))
return [new_time, propagated_signal]
def time_reverse(signal):
time_vector = signal[0]
new_signal = []
for s in signal[1]:
new_signal.append(s)
new_signal.reverse()
return [time_vector, new_signal]
def time_reverse_compensation(signal, distance, numbers_of_modes, disp_curves):
signal_temp = wave_length_propagation(signal, numbers_of_modes, disp_curves, distance, True, 100)
return time_reverse(signal_temp)
def linear_mapping_compensation(signal, number_of_modes, disp_c | ft = np.fft.rfft(signal[1])
time = signal[0]
dt = time[-1]/len(time)
frequency_from_numpy = np.fft.rfftfreq(len(signal[1]), d=dt)*1e-3
G_w = np.sqrt(signal_after_fft.real**2 + signal_after_fft.imag**2)
#znalezienie najsilniejszej/średniej omegi
max_g = G_w[0]
max_ind = 0
for ind, g in enumerate(G_w):
if g>max_g:
max_g = g
max_ind = ind
w_0 = frequency_from_numpy[max_ind]
mean_mode = disp_curves.getMode(number_of_modes)
mean_k_vector = disp_curves.k_v
k_vect = []
for ind, f in enumerate(frequency_from_numpy):
k_vect.append(mean_mode.findKWithGivenOmega_kHz(f))
G_k = []
ind = 0
for temp_k in k_vect:
om = find_omega_in_dispercion_curves(mean_mode, temp_k, mean_k_vector)
val = find_value_by_omega_in_G_w(signal_after_fft, frequency_from_numpy, om)
G_k.append(val)
k_0 = mean_mode.findKWithGivenOmega_kHz(w_0)
k_1 = 0
point1 = selectMode.Point()
point2 = selectMode.Point()
point3 = selectMode.Point()
if w_0 < mean_mode.minOmega:
k_1 = 0
if w_0 > mean_mode.points[-1].w:
point1 = mean_mode.points[-2]
point2 = mean_mode.points[-1]
for ind in range(len(mean_mode.points) - 1):
if mean_mode.points[ind].w == w_0:
point1 = mean_mode.points[ind-1]
point2 = mean_mode.points[ind]
point3 = mean_mode.points[ind+1]
break
if mean_mode.points[ind].w > w_0:
continue
if mean_mode.points[ind].w < w_0 and mean_mode.points[ind+1].w > w_0:
point1 = mean_mode.points[ind]
point2 = mean_mode.points[ind+1]
break
if point3.k == 0:
k_1 = (point1.k - point2.k)/(point1.w - point2.w)
else:
k_1_left = (point1.k - point2.k)/(point1.w - point2.w)
k_1_right = (point2.k - point3.k)/(point2.w - point3.w)
k1 = (k_1_right + k_1_left)/2
new_G_w = []
for ind, f in enumerate(frequency_from_numpy):
print(len(frequency_from_numpy)-ind)
k_lin = k_0 + k_1*(f - w_0)
val = find_value_by_omega_in_G_w(G_k, frequency_from_numpy, k_lin)
new_G_w.append(val)
new_g_t = np.fft.ifft(new_G_w)
new_time = np.linspace(time[0], time[-1], len(new_g_t))
return [new_time, new_g_t]
if __name__ == "__main__":
KD3 = selectMode.SelectedMode('../../../Dane/25mm_stal/Node4_25_8_8/kvect', '../../../Dane/25mm_stal/Node4_25_8_8/omega')
KD3.selectMode()
KD3.plot_modes(30)
dist = 2 # w metrach
signal_array3, time_x_freq3 = Anim_dyspersji.get_chirp()
signal3 = wave_length_propagation([time_x_freq3[0], signal_array3[3]], [1, 2, 3], KD3, dist, True, 100)
wilcox3 = mapping_from_time_to_distance(signal3, KD3, [1, 2, 3])
plt.figure("Wilcox")
plt.subplot(311)
plt.plot(time_x_freq3[0], signal_array3[3])
plt.title("Sygnał wejściowy")
plt.xlabel("distence [m]")
plt.ylabel("Amplitude [-]")
plt.subplot(312)
plt.plot(signal3[0], signal3[1])
plt.title("Rozproszony sygnał")
plt.xlabel("time [s]")
plt.ylabel("Amplitude [-]")
plt.subplot(313)
plt.plot(wilcox3[0], wilcox3[1])
plt.title("Skompensowany sygnał")
plt.xlabel("distence [m]")
plt.ylabel("Amplitude [-]")
plt.show()
exit(0)
| urves):
signal_after_f | identifier_name |
channelamqp.go | /*
Package mqmetric contains a set of routines common to several
commands used to export MQ metrics to different backend
storage mechanisms including Prometheus and InfluxDB.
*/
package mqmetric
/*
Copyright (c) IBM Corporation 2016, 2022
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
Mark Taylor - Initial Contribution
*/
/*
Functions in this file use the DISPLAY CHSTATUS CLIENTID(*) command to extract metrics
about running MQ AMQP channels
*/
import (
_ "fmt"
"strings"
"time"
"github.com/ibm-messaging/mq-golang/v5/ibmmq"
)
const (
// Most of the ATTR_ fields can be inherited from the channel.go module
ATTR_CHL_AMQP_CLIENT_ID = "clientid"
ATTR_CHL_AMQP_MESSAGES_RECEIVED = "messages_rcvd"
ATTR_CHL_AMQP_MESSAGES_SENT = "messages_sent"
ATTR_CHL_AMQP_CONNECTIONS = "connection_count"
)
/*
Unlike the statistics produced via a topic, there is no discovery
of the attributes available in object STATUS queries. There is also
no discovery of descriptions for them. So this function hardcodes the
attributes we are going to look for and gives the associated descriptive
text.
*/
func ChannelAMQPInitAttributes() {
traceEntry("ChannelAMQPInitAttributes")
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
if os.init {
traceExit("ChannelAMQPInitAttributes", 1)
return
}
st.Attributes = make(map[string]*StatusAttribute)
// These fields are used to construct the key to the per-channel map values and
// as tags to uniquely identify a channel instance
attr := ATTR_CHL_NAME
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Channel Name")
attr = ATTR_CHL_AMQP_CLIENT_ID
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Client ID")
// Some other fields
attr = ATTR_CHL_CONNNAME
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Connection Name")
// These are the integer status fields that are of interest
attr = ATTR_CHL_AMQP_MESSAGES_RECEIVED
st.Attributes[attr] = newStatusAttribute(attr, "Messages Received", ibmmq.MQIACH_MSGS_RCVD)
st.Attributes[attr].delta = true // We have to manage the differences as MQ reports cumulative values
attr = ATTR_CHL_AMQP_MESSAGES_SENT
st.Attributes[attr] = newStatusAttribute(attr, "Messages Sent", ibmmq.MQIACH_MSGS_SENT)
st.Attributes[attr].delta = true // We have to manage the differences as MQ reports cumulative values
attr = ATTR_CHL_AMQP_CONNECTIONS
st.Attributes[attr] = newStatusAttribute(attr, "Connections", ibmmq.MQIACF_CONNECTION_COUNT)
// This is decoded by MQCHS_* values
attr = ATTR_CHL_STATUS
st.Attributes[attr] = newStatusAttribute(attr, "Channel Status", ibmmq.MQIACH_CHANNEL_STATUS)
attr = ATTR_CHL_SINCE_MSG
st.Attributes[attr] = newStatusAttribute(attr, "Time Since Msg", -1)
// These are not really monitoring metrics but it may enable calculations to be made such as %used for
// the channel instance availability. It's extracted at startup of the program via INQUIRE_CHL and not updated later
// until rediscovery is done based on a separate schedule.
attr = ATTR_CHL_MAX_INST
st.Attributes[attr] = newStatusAttribute(attr, "MaxInst", -1)
attr = ATTR_CHL_MAX_INSTC
st.Attributes[attr] = newStatusAttribute(attr, "MaxInstC", -1)
// Current Instances is treated a bit oddly. Although reported on each channel status,
// it actually refers to the total number of instances of the same name.
attr = ATTR_CHL_CUR_INST
st.Attributes[attr] = newStatusAttribute(attr, "Current Instances", -1)
os.init = true
traceExit("ChannelAMQPInitAttributes", 0)
}
// If we need to list the channels that match a pattern. Not needed for
// the status queries as they (unlike the pub/sub resource stats) accept
// patterns in the PCF command
func InquireAMQPChannels(patterns string) ([]string, error) {
traceEntry("InquireAMQPChannels")
ChannelAMQPInitAttributes()
rc, err := inquireObjectsWithFilter(patterns, ibmmq.MQOT_CHANNEL, OT_CHANNEL_AMQP)
traceExitErr("InquireAMQPChannels", 0, err)
return rc, err
}
func CollectAMQPChannelStatus(patterns string) error {
var err error
traceEntry("CollectAMQPChannelStatus")
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
os.objectSeen = make(map[string]bool) // Record which channels have been seen in this period
ChannelAMQPInitAttributes()
// Empty any collected values
for k := range st.Attributes {
st.Attributes[k].Values = make(map[string]*StatusValue)
}
for k := range amqpInfoMap {
amqpInfoMap[k].AttrCurInst = 0
}
channelPatterns := strings.Split(patterns, ",")
if len(channelPatterns) == 0 {
traceExit("CollectAMQPChannelStatus", 1)
return nil
}
for _, pattern := range channelPatterns {
pattern = strings.TrimSpace(pattern)
if len(pattern) == 0 {
continue
}
// This would allow us to extract SAVED information too
errCurrent := collectAMQPChannelStatus(pattern, ibmmq.MQOT_CURRENT_CHANNEL)
err = errCurrent
}
// Need to clean out the prevValues elements to stop short-lived channels
// building up in the map
for a, _ := range st.Attributes {
if st.Attributes[a].delta {
m := st.Attributes[a].prevValues
for key, _ := range m {
if _, ok := os.objectSeen[key]; ok {
// Leave it in the map
} else {
// need to delete it from the map
delete(m, key)
}
}
}
}
// Set the metrics corresponding to attributes for all the monitored channels
// The current instance count is not, strictly speaking, an attribute but it's a way
// of providing a metric alongside each channel which shows how many there are of that name.
// All instances of the same channel name, regardless of other aspects (eg remote connName)
// are given the same instance count so it could be extracted.
for key, _ := range st.Attributes[ATTR_CHL_NAME].Values {
chlName := st.Attributes[ATTR_CHL_NAME].Values[key].ValueString
if s, ok := amqpInfoMap[chlName]; ok {
maxInstC := s.AttrMaxInstC
st.Attributes[ATTR_CHL_MAX_INSTC].Values[key] = newStatusValueInt64(maxInstC)
maxInst := s.AttrMaxInst
st.Attributes[ATTR_CHL_MAX_INST].Values[key] = newStatusValueInt64(maxInst)
curInst := s.AttrCurInst
st.Attributes[ATTR_CHL_CUR_INST].Values[key] = newStatusValueInt64(curInst)
}
}
traceExitErr("CollectAMQPChannelStatus", 0, err)
return err
}
// Issue the INQUIRE_CHANNEL_STATUS command for a channel or wildcarded channel name
// Collect the responses and build up the statistics. Add CLIENTID(*) to get the actual
// instances instead of an aggregated response
func collectAMQPChannelStatus(pattern string, instanceType int32) error {
var err error
traceEntryF("collectAMQPChannelStatus", "Pattern: %s", pattern)
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
statusClearReplyQ()
putmqmd, pmo, cfh, buf := statusSetCommandHeaders()
// Can allow all the other fields to default
cfh.Command = ibmmq.MQCMD_INQUIRE_CHANNEL_STATUS
// Add the parameters one at a time into a buffer
pcfparm := new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CHANNEL_NAME
pcfparm.String = []string{pattern}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Add the parameters one at a time into a buffer
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER
pcfparm.Parameter = ibmmq.MQIACH_CHANNEL_TYPE
pcfparm.Int64Value = []int64{int64(ibmmq.MQCHT_AMQP)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CLIENT_ID
pcfparm.String = []string{"*"}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Once we know the total number of parameters, put the
// CFH header on the front of the buffer.
buf = append(cfh.Bytes(), buf...)
// And now put the command to the queue
err = ci.si.cmdQObj.Put(putmqmd, pmo, buf)
if err != nil {
traceExitErr("collectAMQPChannelStatus", 1, err)
return err
}
// Now get the responses - loop until all have been received (one
// per channel) or we run out of time
for allReceived := false; !allReceived; {
cfh, buf, allReceived, err = statusGetReply(putmqmd.MsgId)
if buf != nil {
key := parseAMQPChlData(instanceType, cfh, buf)
if key != "" {
os.objectSeen[key] = true
}
}
}
traceExitErr("collectAMQPChannelStatus", 0, err)
return err
}
// Given a PCF response message, parse it to extract the desired statistics
func parseAMQPChlData(instanceType int32, cfh *ibmmq.MQCFH, buf []byte) string {
var elem *ibmmq.PCFParameter
traceEntry("parseAMQPChlData")
ci := getConnection(GetConnectionKey())
//os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
chlName := ""
connName := ""
clientId := ""
key := ""
lastMsgDate := ""
lastMsgTime := ""
parmAvail := true
bytesRead := 0
offset := 0
datalen := len(buf)
if cfh == nil || cfh.ParameterCount == 0 {
traceExit("parseAMQPChlData", 1)
return ""
}
// Parse it once to extract the fields that are needed for the map key
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
switch elem.Parameter {
case ibmmq.MQCACH_CHANNEL_NAME:
chlName = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_CONNECTION_NAME:
connName = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_CLIENT_ID:
clientId = strings.TrimSpace(elem.String[0])
}
}
// Create a unique key for this channel instance
if connName == "" {
connName = DUMMY_STRING
}
if ci.hideAMQPClientId {
clientId = DUMMY_STRING
}
key = chlName + "/" + connName + "/" + clientId
logDebug("AMQP status - key: %s", key)
st.Attributes[ATTR_CHL_NAME].Values[key] = newStatusValueString(chlName)
st.Attributes[ATTR_CHL_CONNNAME].Values[key] = newStatusValueString(connName)
st.Attributes[ATTR_CHL_AMQP_CLIENT_ID].Values[key] = newStatusValueString(clientId)
// And then re-parse the message so we can store the metrics now knowing the map key
parmAvail = true
offset = 0
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
if !statusGetIntAttributes(GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP), elem, key) {
switch elem.Parameter {
case ibmmq.MQCACH_LAST_MSG_TIME:
lastMsgTime = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_LAST_MSG_DATE:
lastMsgDate = strings.TrimSpace(elem.String[0])
}
}
}
now := time.Now()
diff := statusTimeDiff(now, lastMsgDate, lastMsgTime)
st.Attributes[ATTR_CHL_SINCE_MSG].Values[key] = newStatusValueInt64(diff)
// Bump the number of active instances of the channel, treating it a bit like a
// regular config attribute.
if s, ok := amqpInfoMap[chlName]; ok {
s.AttrCurInst++
}
traceExitF("parseAMQPChlData", 0, "Key: %s", key)
return key
}
// Issue the INQUIRE_CHANNEL call for wildcarded channel names and
// extract the required attributes
func inquireAMQPChannelAttributes(objectPatternsList string, infoMap map[string]*ObjInfo) error {
var err error
traceEntry("inquireAMQPChannelAttributes")
ci := getConnection(GetConnectionKey())
statusClearReplyQ()
if objectPatternsList == "" {
traceExitErr("inquireAMQPChannelAttributes", 1, err)
return err
}
objectPatterns := strings.Split(strings.TrimSpace(objectPatternsList), ",")
for i := 0; i < len(objectPatterns) && err == nil; i++ {
var buf []byte
pattern := strings.TrimSpace(objectPatterns[i])
if len(pattern) == 0 {
continue
}
putmqmd, pmo, cfh, buf := statusSetCommandHeaders()
// Can allow all the other fields to default
cfh.Command = ibmmq.MQCMD_INQUIRE_CHANNEL
cfh.ParameterCount = 0
// Add the parameters one at a time into a buffer
pcfparm := new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CHANNEL_NAME
pcfparm.String = []string{pattern}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER
pcfparm.Parameter = ibmmq.MQIACH_CHANNEL_TYPE
pcfparm.Int64Value = []int64{int64(ibmmq.MQCHT_AMQP)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER_LIST
pcfparm.Parameter = ibmmq.MQIACF_CHANNEL_ATTRS
pcfparm.Int64Value = []int64{int64(ibmmq.MQIACH_MAX_INSTANCES), int64(ibmmq.MQIACH_MAX_INSTS_PER_CLIENT), int64(ibmmq.MQCACH_DESC), int64(ibmmq.MQIACH_CHANNEL_TYPE)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Once we know the total number of parameters, put the
// CFH header on the front of the buffer.
buf = append(cfh.Bytes(), buf...)
// And now put the command to the queue
err = ci.si.cmdQObj.Put(putmqmd, pmo, buf)
if err != nil {
traceExitErr("inquireAMQPChannelAttributes", 2, err)
return err
}
for allReceived := false; !allReceived; {
cfh, buf, allReceived, err = statusGetReply(putmqmd.MsgId)
if buf != nil {
parseAMQPChannelAttrData(cfh, buf, infoMap)
}
}
}
traceExit("inquireAMQPChannelAttributes", 0)
return nil
}
func parseAMQPChannelAttrData(cfh *ibmmq.MQCFH, buf []byte, infoMap map[string]*ObjInfo) {
var elem *ibmmq.PCFParameter
var ci *ObjInfo
var ok bool
traceEntry("parseAMQPChannelAttrData")
chlName := ""
parmAvail := true
bytesRead := 0
offset := 0
datalen := len(buf)
if cfh.ParameterCount == 0 {
traceExit("parseAMQPChannelAttrData", 1)
return
}
// Parse it once to extract the fields that are needed for the map key
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
// Only one field needed for channels
switch elem.Parameter {
case ibmmq.MQCACH_CHANNEL_NAME:
chlName = strings.TrimSpace(elem.String[0])
}
}
// And then re-parse the message so we can store the metrics now knowing the map key
parmAvail = true
offset = 0
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
switch elem.Parameter {
case ibmmq.MQIACH_MAX_INSTANCES:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrMaxInst = v
ci.exists = true
}
case ibmmq.MQIACH_MAX_INSTS_PER_CLIENT:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrMaxInstC = v
ci.exists = true
} | v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrChlType = v
ci.exists = true
}
case ibmmq.MQCACH_DESC:
v := elem.String[0]
if v != "" {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.Description = printableStringUTF8(v)
ci.exists = true
}
}
}
traceExit("parseAMQPChannelAttrData", 0)
return
} |
case ibmmq.MQIACH_CHANNEL_TYPE: | random_line_split |
channelamqp.go | /*
Package mqmetric contains a set of routines common to several
commands used to export MQ metrics to different backend
storage mechanisms including Prometheus and InfluxDB.
*/
package mqmetric
/*
Copyright (c) IBM Corporation 2016, 2022
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
Mark Taylor - Initial Contribution
*/
/*
Functions in this file use the DISPLAY CHSTATUS CLIENTID(*) command to extract metrics
about running MQ AMQP channels
*/
import (
_ "fmt"
"strings"
"time"
"github.com/ibm-messaging/mq-golang/v5/ibmmq"
)
const (
// Most of the ATTR_ fields can be inherited from the channel.go module
ATTR_CHL_AMQP_CLIENT_ID = "clientid"
ATTR_CHL_AMQP_MESSAGES_RECEIVED = "messages_rcvd"
ATTR_CHL_AMQP_MESSAGES_SENT = "messages_sent"
ATTR_CHL_AMQP_CONNECTIONS = "connection_count"
)
/*
Unlike the statistics produced via a topic, there is no discovery
of the attributes available in object STATUS queries. There is also
no discovery of descriptions for them. So this function hardcodes the
attributes we are going to look for and gives the associated descriptive
text.
*/
func ChannelAMQPInitAttributes() {
traceEntry("ChannelAMQPInitAttributes")
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
if os.init {
traceExit("ChannelAMQPInitAttributes", 1)
return
}
st.Attributes = make(map[string]*StatusAttribute)
// These fields are used to construct the key to the per-channel map values and
// as tags to uniquely identify a channel instance
attr := ATTR_CHL_NAME
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Channel Name")
attr = ATTR_CHL_AMQP_CLIENT_ID
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Client ID")
// Some other fields
attr = ATTR_CHL_CONNNAME
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Connection Name")
// These are the integer status fields that are of interest
attr = ATTR_CHL_AMQP_MESSAGES_RECEIVED
st.Attributes[attr] = newStatusAttribute(attr, "Messages Received", ibmmq.MQIACH_MSGS_RCVD)
st.Attributes[attr].delta = true // We have to manage the differences as MQ reports cumulative values
attr = ATTR_CHL_AMQP_MESSAGES_SENT
st.Attributes[attr] = newStatusAttribute(attr, "Messages Sent", ibmmq.MQIACH_MSGS_SENT)
st.Attributes[attr].delta = true // We have to manage the differences as MQ reports cumulative values
attr = ATTR_CHL_AMQP_CONNECTIONS
st.Attributes[attr] = newStatusAttribute(attr, "Connections", ibmmq.MQIACF_CONNECTION_COUNT)
// This is decoded by MQCHS_* values
attr = ATTR_CHL_STATUS
st.Attributes[attr] = newStatusAttribute(attr, "Channel Status", ibmmq.MQIACH_CHANNEL_STATUS)
attr = ATTR_CHL_SINCE_MSG
st.Attributes[attr] = newStatusAttribute(attr, "Time Since Msg", -1)
// These are not really monitoring metrics but it may enable calculations to be made such as %used for
// the channel instance availability. It's extracted at startup of the program via INQUIRE_CHL and not updated later
// until rediscovery is done based on a separate schedule.
attr = ATTR_CHL_MAX_INST
st.Attributes[attr] = newStatusAttribute(attr, "MaxInst", -1)
attr = ATTR_CHL_MAX_INSTC
st.Attributes[attr] = newStatusAttribute(attr, "MaxInstC", -1)
// Current Instances is treated a bit oddly. Although reported on each channel status,
// it actually refers to the total number of instances of the same name.
attr = ATTR_CHL_CUR_INST
st.Attributes[attr] = newStatusAttribute(attr, "Current Instances", -1)
os.init = true
traceExit("ChannelAMQPInitAttributes", 0)
}
// If we need to list the channels that match a pattern. Not needed for
// the status queries as they (unlike the pub/sub resource stats) accept
// patterns in the PCF command
func InquireAMQPChannels(patterns string) ([]string, error) {
traceEntry("InquireAMQPChannels")
ChannelAMQPInitAttributes()
rc, err := inquireObjectsWithFilter(patterns, ibmmq.MQOT_CHANNEL, OT_CHANNEL_AMQP)
traceExitErr("InquireAMQPChannels", 0, err)
return rc, err
}
func CollectAMQPChannelStatus(patterns string) error {
var err error
traceEntry("CollectAMQPChannelStatus")
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
os.objectSeen = make(map[string]bool) // Record which channels have been seen in this period
ChannelAMQPInitAttributes()
// Empty any collected values
for k := range st.Attributes |
for k := range amqpInfoMap {
amqpInfoMap[k].AttrCurInst = 0
}
channelPatterns := strings.Split(patterns, ",")
if len(channelPatterns) == 0 {
traceExit("CollectAMQPChannelStatus", 1)
return nil
}
for _, pattern := range channelPatterns {
pattern = strings.TrimSpace(pattern)
if len(pattern) == 0 {
continue
}
// This would allow us to extract SAVED information too
errCurrent := collectAMQPChannelStatus(pattern, ibmmq.MQOT_CURRENT_CHANNEL)
err = errCurrent
}
// Need to clean out the prevValues elements to stop short-lived channels
// building up in the map
for a, _ := range st.Attributes {
if st.Attributes[a].delta {
m := st.Attributes[a].prevValues
for key, _ := range m {
if _, ok := os.objectSeen[key]; ok {
// Leave it in the map
} else {
// need to delete it from the map
delete(m, key)
}
}
}
}
// Set the metrics corresponding to attributes for all the monitored channels
// The current instance count is not, strictly speaking, an attribute but it's a way
// of providing a metric alongside each channel which shows how many there are of that name.
// All instances of the same channel name, regardless of other aspects (eg remote connName)
// are given the same instance count so it could be extracted.
for key, _ := range st.Attributes[ATTR_CHL_NAME].Values {
chlName := st.Attributes[ATTR_CHL_NAME].Values[key].ValueString
if s, ok := amqpInfoMap[chlName]; ok {
maxInstC := s.AttrMaxInstC
st.Attributes[ATTR_CHL_MAX_INSTC].Values[key] = newStatusValueInt64(maxInstC)
maxInst := s.AttrMaxInst
st.Attributes[ATTR_CHL_MAX_INST].Values[key] = newStatusValueInt64(maxInst)
curInst := s.AttrCurInst
st.Attributes[ATTR_CHL_CUR_INST].Values[key] = newStatusValueInt64(curInst)
}
}
traceExitErr("CollectAMQPChannelStatus", 0, err)
return err
}
// Issue the INQUIRE_CHANNEL_STATUS command for a channel or wildcarded channel name
// Collect the responses and build up the statistics. Add CLIENTID(*) to get the actual
// instances instead of an aggregated response
func collectAMQPChannelStatus(pattern string, instanceType int32) error {
var err error
traceEntryF("collectAMQPChannelStatus", "Pattern: %s", pattern)
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
statusClearReplyQ()
putmqmd, pmo, cfh, buf := statusSetCommandHeaders()
// Can allow all the other fields to default
cfh.Command = ibmmq.MQCMD_INQUIRE_CHANNEL_STATUS
// Add the parameters one at a time into a buffer
pcfparm := new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CHANNEL_NAME
pcfparm.String = []string{pattern}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Add the parameters one at a time into a buffer
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER
pcfparm.Parameter = ibmmq.MQIACH_CHANNEL_TYPE
pcfparm.Int64Value = []int64{int64(ibmmq.MQCHT_AMQP)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CLIENT_ID
pcfparm.String = []string{"*"}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Once we know the total number of parameters, put the
// CFH header on the front of the buffer.
buf = append(cfh.Bytes(), buf...)
// And now put the command to the queue
err = ci.si.cmdQObj.Put(putmqmd, pmo, buf)
if err != nil {
traceExitErr("collectAMQPChannelStatus", 1, err)
return err
}
// Now get the responses - loop until all have been received (one
// per channel) or we run out of time
for allReceived := false; !allReceived; {
cfh, buf, allReceived, err = statusGetReply(putmqmd.MsgId)
if buf != nil {
key := parseAMQPChlData(instanceType, cfh, buf)
if key != "" {
os.objectSeen[key] = true
}
}
}
traceExitErr("collectAMQPChannelStatus", 0, err)
return err
}
// Given a PCF response message, parse it to extract the desired statistics
func parseAMQPChlData(instanceType int32, cfh *ibmmq.MQCFH, buf []byte) string {
var elem *ibmmq.PCFParameter
traceEntry("parseAMQPChlData")
ci := getConnection(GetConnectionKey())
//os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
chlName := ""
connName := ""
clientId := ""
key := ""
lastMsgDate := ""
lastMsgTime := ""
parmAvail := true
bytesRead := 0
offset := 0
datalen := len(buf)
if cfh == nil || cfh.ParameterCount == 0 {
traceExit("parseAMQPChlData", 1)
return ""
}
// Parse it once to extract the fields that are needed for the map key
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
switch elem.Parameter {
case ibmmq.MQCACH_CHANNEL_NAME:
chlName = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_CONNECTION_NAME:
connName = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_CLIENT_ID:
clientId = strings.TrimSpace(elem.String[0])
}
}
// Create a unique key for this channel instance
if connName == "" {
connName = DUMMY_STRING
}
if ci.hideAMQPClientId {
clientId = DUMMY_STRING
}
key = chlName + "/" + connName + "/" + clientId
logDebug("AMQP status - key: %s", key)
st.Attributes[ATTR_CHL_NAME].Values[key] = newStatusValueString(chlName)
st.Attributes[ATTR_CHL_CONNNAME].Values[key] = newStatusValueString(connName)
st.Attributes[ATTR_CHL_AMQP_CLIENT_ID].Values[key] = newStatusValueString(clientId)
// And then re-parse the message so we can store the metrics now knowing the map key
parmAvail = true
offset = 0
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
if !statusGetIntAttributes(GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP), elem, key) {
switch elem.Parameter {
case ibmmq.MQCACH_LAST_MSG_TIME:
lastMsgTime = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_LAST_MSG_DATE:
lastMsgDate = strings.TrimSpace(elem.String[0])
}
}
}
now := time.Now()
diff := statusTimeDiff(now, lastMsgDate, lastMsgTime)
st.Attributes[ATTR_CHL_SINCE_MSG].Values[key] = newStatusValueInt64(diff)
// Bump the number of active instances of the channel, treating it a bit like a
// regular config attribute.
if s, ok := amqpInfoMap[chlName]; ok {
s.AttrCurInst++
}
traceExitF("parseAMQPChlData", 0, "Key: %s", key)
return key
}
// Issue the INQUIRE_CHANNEL call for wildcarded channel names and
// extract the required attributes
func inquireAMQPChannelAttributes(objectPatternsList string, infoMap map[string]*ObjInfo) error {
var err error
traceEntry("inquireAMQPChannelAttributes")
ci := getConnection(GetConnectionKey())
statusClearReplyQ()
if objectPatternsList == "" {
traceExitErr("inquireAMQPChannelAttributes", 1, err)
return err
}
objectPatterns := strings.Split(strings.TrimSpace(objectPatternsList), ",")
for i := 0; i < len(objectPatterns) && err == nil; i++ {
var buf []byte
pattern := strings.TrimSpace(objectPatterns[i])
if len(pattern) == 0 {
continue
}
putmqmd, pmo, cfh, buf := statusSetCommandHeaders()
// Can allow all the other fields to default
cfh.Command = ibmmq.MQCMD_INQUIRE_CHANNEL
cfh.ParameterCount = 0
// Add the parameters one at a time into a buffer
pcfparm := new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CHANNEL_NAME
pcfparm.String = []string{pattern}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER
pcfparm.Parameter = ibmmq.MQIACH_CHANNEL_TYPE
pcfparm.Int64Value = []int64{int64(ibmmq.MQCHT_AMQP)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER_LIST
pcfparm.Parameter = ibmmq.MQIACF_CHANNEL_ATTRS
pcfparm.Int64Value = []int64{int64(ibmmq.MQIACH_MAX_INSTANCES), int64(ibmmq.MQIACH_MAX_INSTS_PER_CLIENT), int64(ibmmq.MQCACH_DESC), int64(ibmmq.MQIACH_CHANNEL_TYPE)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Once we know the total number of parameters, put the
// CFH header on the front of the buffer.
buf = append(cfh.Bytes(), buf...)
// And now put the command to the queue
err = ci.si.cmdQObj.Put(putmqmd, pmo, buf)
if err != nil {
traceExitErr("inquireAMQPChannelAttributes", 2, err)
return err
}
for allReceived := false; !allReceived; {
cfh, buf, allReceived, err = statusGetReply(putmqmd.MsgId)
if buf != nil {
parseAMQPChannelAttrData(cfh, buf, infoMap)
}
}
}
traceExit("inquireAMQPChannelAttributes", 0)
return nil
}
func parseAMQPChannelAttrData(cfh *ibmmq.MQCFH, buf []byte, infoMap map[string]*ObjInfo) {
var elem *ibmmq.PCFParameter
var ci *ObjInfo
var ok bool
traceEntry("parseAMQPChannelAttrData")
chlName := ""
parmAvail := true
bytesRead := 0
offset := 0
datalen := len(buf)
if cfh.ParameterCount == 0 {
traceExit("parseAMQPChannelAttrData", 1)
return
}
// Parse it once to extract the fields that are needed for the map key
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
// Only one field needed for channels
switch elem.Parameter {
case ibmmq.MQCACH_CHANNEL_NAME:
chlName = strings.TrimSpace(elem.String[0])
}
}
// And then re-parse the message so we can store the metrics now knowing the map key
parmAvail = true
offset = 0
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
switch elem.Parameter {
case ibmmq.MQIACH_MAX_INSTANCES:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrMaxInst = v
ci.exists = true
}
case ibmmq.MQIACH_MAX_INSTS_PER_CLIENT:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrMaxInstC = v
ci.exists = true
}
case ibmmq.MQIACH_CHANNEL_TYPE:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrChlType = v
ci.exists = true
}
case ibmmq.MQCACH_DESC:
v := elem.String[0]
if v != "" {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.Description = printableStringUTF8(v)
ci.exists = true
}
}
}
traceExit("parseAMQPChannelAttrData", 0)
return
}
| {
st.Attributes[k].Values = make(map[string]*StatusValue)
} | conditional_block |
channelamqp.go | /*
Package mqmetric contains a set of routines common to several
commands used to export MQ metrics to different backend
storage mechanisms including Prometheus and InfluxDB.
*/
package mqmetric
/*
Copyright (c) IBM Corporation 2016, 2022
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
Mark Taylor - Initial Contribution
*/
/*
Functions in this file use the DISPLAY CHSTATUS CLIENTID(*) command to extract metrics
about running MQ AMQP channels
*/
import (
_ "fmt"
"strings"
"time"
"github.com/ibm-messaging/mq-golang/v5/ibmmq"
)
const (
// Most of the ATTR_ fields can be inherited from the channel.go module
ATTR_CHL_AMQP_CLIENT_ID = "clientid"
ATTR_CHL_AMQP_MESSAGES_RECEIVED = "messages_rcvd"
ATTR_CHL_AMQP_MESSAGES_SENT = "messages_sent"
ATTR_CHL_AMQP_CONNECTIONS = "connection_count"
)
/*
Unlike the statistics produced via a topic, there is no discovery
of the attributes available in object STATUS queries. There is also
no discovery of descriptions for them. So this function hardcodes the
attributes we are going to look for and gives the associated descriptive
text.
*/
func ChannelAMQPInitAttributes() {
traceEntry("ChannelAMQPInitAttributes")
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
if os.init {
traceExit("ChannelAMQPInitAttributes", 1)
return
}
st.Attributes = make(map[string]*StatusAttribute)
// These fields are used to construct the key to the per-channel map values and
// as tags to uniquely identify a channel instance
attr := ATTR_CHL_NAME
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Channel Name")
attr = ATTR_CHL_AMQP_CLIENT_ID
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Client ID")
// Some other fields
attr = ATTR_CHL_CONNNAME
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Connection Name")
// These are the integer status fields that are of interest
attr = ATTR_CHL_AMQP_MESSAGES_RECEIVED
st.Attributes[attr] = newStatusAttribute(attr, "Messages Received", ibmmq.MQIACH_MSGS_RCVD)
st.Attributes[attr].delta = true // We have to manage the differences as MQ reports cumulative values
attr = ATTR_CHL_AMQP_MESSAGES_SENT
st.Attributes[attr] = newStatusAttribute(attr, "Messages Sent", ibmmq.MQIACH_MSGS_SENT)
st.Attributes[attr].delta = true // We have to manage the differences as MQ reports cumulative values
attr = ATTR_CHL_AMQP_CONNECTIONS
st.Attributes[attr] = newStatusAttribute(attr, "Connections", ibmmq.MQIACF_CONNECTION_COUNT)
// This is decoded by MQCHS_* values
attr = ATTR_CHL_STATUS
st.Attributes[attr] = newStatusAttribute(attr, "Channel Status", ibmmq.MQIACH_CHANNEL_STATUS)
attr = ATTR_CHL_SINCE_MSG
st.Attributes[attr] = newStatusAttribute(attr, "Time Since Msg", -1)
// These are not really monitoring metrics but it may enable calculations to be made such as %used for
// the channel instance availability. It's extracted at startup of the program via INQUIRE_CHL and not updated later
// until rediscovery is done based on a separate schedule.
attr = ATTR_CHL_MAX_INST
st.Attributes[attr] = newStatusAttribute(attr, "MaxInst", -1)
attr = ATTR_CHL_MAX_INSTC
st.Attributes[attr] = newStatusAttribute(attr, "MaxInstC", -1)
// Current Instances is treated a bit oddly. Although reported on each channel status,
// it actually refers to the total number of instances of the same name.
attr = ATTR_CHL_CUR_INST
st.Attributes[attr] = newStatusAttribute(attr, "Current Instances", -1)
os.init = true
traceExit("ChannelAMQPInitAttributes", 0)
}
// If we need to list the channels that match a pattern. Not needed for
// the status queries as they (unlike the pub/sub resource stats) accept
// patterns in the PCF command
func InquireAMQPChannels(patterns string) ([]string, error) {
traceEntry("InquireAMQPChannels")
ChannelAMQPInitAttributes()
rc, err := inquireObjectsWithFilter(patterns, ibmmq.MQOT_CHANNEL, OT_CHANNEL_AMQP)
traceExitErr("InquireAMQPChannels", 0, err)
return rc, err
}
func CollectAMQPChannelStatus(patterns string) error |
// Issue the INQUIRE_CHANNEL_STATUS command for a channel or wildcarded channel name
// Collect the responses and build up the statistics. Add CLIENTID(*) to get the actual
// instances instead of an aggregated response
func collectAMQPChannelStatus(pattern string, instanceType int32) error {
var err error
traceEntryF("collectAMQPChannelStatus", "Pattern: %s", pattern)
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
statusClearReplyQ()
putmqmd, pmo, cfh, buf := statusSetCommandHeaders()
// Can allow all the other fields to default
cfh.Command = ibmmq.MQCMD_INQUIRE_CHANNEL_STATUS
// Add the parameters one at a time into a buffer
pcfparm := new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CHANNEL_NAME
pcfparm.String = []string{pattern}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Add the parameters one at a time into a buffer
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER
pcfparm.Parameter = ibmmq.MQIACH_CHANNEL_TYPE
pcfparm.Int64Value = []int64{int64(ibmmq.MQCHT_AMQP)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CLIENT_ID
pcfparm.String = []string{"*"}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Once we know the total number of parameters, put the
// CFH header on the front of the buffer.
buf = append(cfh.Bytes(), buf...)
// And now put the command to the queue
err = ci.si.cmdQObj.Put(putmqmd, pmo, buf)
if err != nil {
traceExitErr("collectAMQPChannelStatus", 1, err)
return err
}
// Now get the responses - loop until all have been received (one
// per channel) or we run out of time
for allReceived := false; !allReceived; {
cfh, buf, allReceived, err = statusGetReply(putmqmd.MsgId)
if buf != nil {
key := parseAMQPChlData(instanceType, cfh, buf)
if key != "" {
os.objectSeen[key] = true
}
}
}
traceExitErr("collectAMQPChannelStatus", 0, err)
return err
}
// Given a PCF response message, parse it to extract the desired statistics
func parseAMQPChlData(instanceType int32, cfh *ibmmq.MQCFH, buf []byte) string {
var elem *ibmmq.PCFParameter
traceEntry("parseAMQPChlData")
ci := getConnection(GetConnectionKey())
//os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
chlName := ""
connName := ""
clientId := ""
key := ""
lastMsgDate := ""
lastMsgTime := ""
parmAvail := true
bytesRead := 0
offset := 0
datalen := len(buf)
if cfh == nil || cfh.ParameterCount == 0 {
traceExit("parseAMQPChlData", 1)
return ""
}
// Parse it once to extract the fields that are needed for the map key
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
switch elem.Parameter {
case ibmmq.MQCACH_CHANNEL_NAME:
chlName = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_CONNECTION_NAME:
connName = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_CLIENT_ID:
clientId = strings.TrimSpace(elem.String[0])
}
}
// Create a unique key for this channel instance
if connName == "" {
connName = DUMMY_STRING
}
if ci.hideAMQPClientId {
clientId = DUMMY_STRING
}
key = chlName + "/" + connName + "/" + clientId
logDebug("AMQP status - key: %s", key)
st.Attributes[ATTR_CHL_NAME].Values[key] = newStatusValueString(chlName)
st.Attributes[ATTR_CHL_CONNNAME].Values[key] = newStatusValueString(connName)
st.Attributes[ATTR_CHL_AMQP_CLIENT_ID].Values[key] = newStatusValueString(clientId)
// And then re-parse the message so we can store the metrics now knowing the map key
parmAvail = true
offset = 0
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
if !statusGetIntAttributes(GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP), elem, key) {
switch elem.Parameter {
case ibmmq.MQCACH_LAST_MSG_TIME:
lastMsgTime = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_LAST_MSG_DATE:
lastMsgDate = strings.TrimSpace(elem.String[0])
}
}
}
now := time.Now()
diff := statusTimeDiff(now, lastMsgDate, lastMsgTime)
st.Attributes[ATTR_CHL_SINCE_MSG].Values[key] = newStatusValueInt64(diff)
// Bump the number of active instances of the channel, treating it a bit like a
// regular config attribute.
if s, ok := amqpInfoMap[chlName]; ok {
s.AttrCurInst++
}
traceExitF("parseAMQPChlData", 0, "Key: %s", key)
return key
}
// Issue the INQUIRE_CHANNEL call for wildcarded channel names and
// extract the required attributes
func inquireAMQPChannelAttributes(objectPatternsList string, infoMap map[string]*ObjInfo) error {
var err error
traceEntry("inquireAMQPChannelAttributes")
ci := getConnection(GetConnectionKey())
statusClearReplyQ()
if objectPatternsList == "" {
traceExitErr("inquireAMQPChannelAttributes", 1, err)
return err
}
objectPatterns := strings.Split(strings.TrimSpace(objectPatternsList), ",")
for i := 0; i < len(objectPatterns) && err == nil; i++ {
var buf []byte
pattern := strings.TrimSpace(objectPatterns[i])
if len(pattern) == 0 {
continue
}
putmqmd, pmo, cfh, buf := statusSetCommandHeaders()
// Can allow all the other fields to default
cfh.Command = ibmmq.MQCMD_INQUIRE_CHANNEL
cfh.ParameterCount = 0
// Add the parameters one at a time into a buffer
pcfparm := new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CHANNEL_NAME
pcfparm.String = []string{pattern}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER
pcfparm.Parameter = ibmmq.MQIACH_CHANNEL_TYPE
pcfparm.Int64Value = []int64{int64(ibmmq.MQCHT_AMQP)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER_LIST
pcfparm.Parameter = ibmmq.MQIACF_CHANNEL_ATTRS
pcfparm.Int64Value = []int64{int64(ibmmq.MQIACH_MAX_INSTANCES), int64(ibmmq.MQIACH_MAX_INSTS_PER_CLIENT), int64(ibmmq.MQCACH_DESC), int64(ibmmq.MQIACH_CHANNEL_TYPE)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Once we know the total number of parameters, put the
// CFH header on the front of the buffer.
buf = append(cfh.Bytes(), buf...)
// And now put the command to the queue
err = ci.si.cmdQObj.Put(putmqmd, pmo, buf)
if err != nil {
traceExitErr("inquireAMQPChannelAttributes", 2, err)
return err
}
for allReceived := false; !allReceived; {
cfh, buf, allReceived, err = statusGetReply(putmqmd.MsgId)
if buf != nil {
parseAMQPChannelAttrData(cfh, buf, infoMap)
}
}
}
traceExit("inquireAMQPChannelAttributes", 0)
return nil
}
func parseAMQPChannelAttrData(cfh *ibmmq.MQCFH, buf []byte, infoMap map[string]*ObjInfo) {
var elem *ibmmq.PCFParameter
var ci *ObjInfo
var ok bool
traceEntry("parseAMQPChannelAttrData")
chlName := ""
parmAvail := true
bytesRead := 0
offset := 0
datalen := len(buf)
if cfh.ParameterCount == 0 {
traceExit("parseAMQPChannelAttrData", 1)
return
}
// Parse it once to extract the fields that are needed for the map key
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
// Only one field needed for channels
switch elem.Parameter {
case ibmmq.MQCACH_CHANNEL_NAME:
chlName = strings.TrimSpace(elem.String[0])
}
}
// And then re-parse the message so we can store the metrics now knowing the map key
parmAvail = true
offset = 0
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
switch elem.Parameter {
case ibmmq.MQIACH_MAX_INSTANCES:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrMaxInst = v
ci.exists = true
}
case ibmmq.MQIACH_MAX_INSTS_PER_CLIENT:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrMaxInstC = v
ci.exists = true
}
case ibmmq.MQIACH_CHANNEL_TYPE:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrChlType = v
ci.exists = true
}
case ibmmq.MQCACH_DESC:
v := elem.String[0]
if v != "" {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.Description = printableStringUTF8(v)
ci.exists = true
}
}
}
traceExit("parseAMQPChannelAttrData", 0)
return
}
| {
var err error
traceEntry("CollectAMQPChannelStatus")
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
os.objectSeen = make(map[string]bool) // Record which channels have been seen in this period
ChannelAMQPInitAttributes()
// Empty any collected values
for k := range st.Attributes {
st.Attributes[k].Values = make(map[string]*StatusValue)
}
for k := range amqpInfoMap {
amqpInfoMap[k].AttrCurInst = 0
}
channelPatterns := strings.Split(patterns, ",")
if len(channelPatterns) == 0 {
traceExit("CollectAMQPChannelStatus", 1)
return nil
}
for _, pattern := range channelPatterns {
pattern = strings.TrimSpace(pattern)
if len(pattern) == 0 {
continue
}
// This would allow us to extract SAVED information too
errCurrent := collectAMQPChannelStatus(pattern, ibmmq.MQOT_CURRENT_CHANNEL)
err = errCurrent
}
// Need to clean out the prevValues elements to stop short-lived channels
// building up in the map
for a, _ := range st.Attributes {
if st.Attributes[a].delta {
m := st.Attributes[a].prevValues
for key, _ := range m {
if _, ok := os.objectSeen[key]; ok {
// Leave it in the map
} else {
// need to delete it from the map
delete(m, key)
}
}
}
}
// Set the metrics corresponding to attributes for all the monitored channels
// The current instance count is not, strictly speaking, an attribute but it's a way
// of providing a metric alongside each channel which shows how many there are of that name.
// All instances of the same channel name, regardless of other aspects (eg remote connName)
// are given the same instance count so it could be extracted.
for key, _ := range st.Attributes[ATTR_CHL_NAME].Values {
chlName := st.Attributes[ATTR_CHL_NAME].Values[key].ValueString
if s, ok := amqpInfoMap[chlName]; ok {
maxInstC := s.AttrMaxInstC
st.Attributes[ATTR_CHL_MAX_INSTC].Values[key] = newStatusValueInt64(maxInstC)
maxInst := s.AttrMaxInst
st.Attributes[ATTR_CHL_MAX_INST].Values[key] = newStatusValueInt64(maxInst)
curInst := s.AttrCurInst
st.Attributes[ATTR_CHL_CUR_INST].Values[key] = newStatusValueInt64(curInst)
}
}
traceExitErr("CollectAMQPChannelStatus", 0, err)
return err
} | identifier_body |
channelamqp.go | /*
Package mqmetric contains a set of routines common to several
commands used to export MQ metrics to different backend
storage mechanisms including Prometheus and InfluxDB.
*/
package mqmetric
/*
Copyright (c) IBM Corporation 2016, 2022
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
Mark Taylor - Initial Contribution
*/
/*
Functions in this file use the DISPLAY CHSTATUS CLIENTID(*) command to extract metrics
about running MQ AMQP channels
*/
import (
_ "fmt"
"strings"
"time"
"github.com/ibm-messaging/mq-golang/v5/ibmmq"
)
const (
// Most of the ATTR_ fields can be inherited from the channel.go module
ATTR_CHL_AMQP_CLIENT_ID = "clientid"
ATTR_CHL_AMQP_MESSAGES_RECEIVED = "messages_rcvd"
ATTR_CHL_AMQP_MESSAGES_SENT = "messages_sent"
ATTR_CHL_AMQP_CONNECTIONS = "connection_count"
)
/*
Unlike the statistics produced via a topic, there is no discovery
of the attributes available in object STATUS queries. There is also
no discovery of descriptions for them. So this function hardcodes the
attributes we are going to look for and gives the associated descriptive
text.
*/
func ChannelAMQPInitAttributes() {
traceEntry("ChannelAMQPInitAttributes")
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
if os.init {
traceExit("ChannelAMQPInitAttributes", 1)
return
}
st.Attributes = make(map[string]*StatusAttribute)
// These fields are used to construct the key to the per-channel map values and
// as tags to uniquely identify a channel instance
attr := ATTR_CHL_NAME
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Channel Name")
attr = ATTR_CHL_AMQP_CLIENT_ID
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Client ID")
// Some other fields
attr = ATTR_CHL_CONNNAME
st.Attributes[attr] = newPseudoStatusAttribute(attr, "Connection Name")
// These are the integer status fields that are of interest
attr = ATTR_CHL_AMQP_MESSAGES_RECEIVED
st.Attributes[attr] = newStatusAttribute(attr, "Messages Received", ibmmq.MQIACH_MSGS_RCVD)
st.Attributes[attr].delta = true // We have to manage the differences as MQ reports cumulative values
attr = ATTR_CHL_AMQP_MESSAGES_SENT
st.Attributes[attr] = newStatusAttribute(attr, "Messages Sent", ibmmq.MQIACH_MSGS_SENT)
st.Attributes[attr].delta = true // We have to manage the differences as MQ reports cumulative values
attr = ATTR_CHL_AMQP_CONNECTIONS
st.Attributes[attr] = newStatusAttribute(attr, "Connections", ibmmq.MQIACF_CONNECTION_COUNT)
// This is decoded by MQCHS_* values
attr = ATTR_CHL_STATUS
st.Attributes[attr] = newStatusAttribute(attr, "Channel Status", ibmmq.MQIACH_CHANNEL_STATUS)
attr = ATTR_CHL_SINCE_MSG
st.Attributes[attr] = newStatusAttribute(attr, "Time Since Msg", -1)
// These are not really monitoring metrics but it may enable calculations to be made such as %used for
// the channel instance availability. It's extracted at startup of the program via INQUIRE_CHL and not updated later
// until rediscovery is done based on a separate schedule.
attr = ATTR_CHL_MAX_INST
st.Attributes[attr] = newStatusAttribute(attr, "MaxInst", -1)
attr = ATTR_CHL_MAX_INSTC
st.Attributes[attr] = newStatusAttribute(attr, "MaxInstC", -1)
// Current Instances is treated a bit oddly. Although reported on each channel status,
// it actually refers to the total number of instances of the same name.
attr = ATTR_CHL_CUR_INST
st.Attributes[attr] = newStatusAttribute(attr, "Current Instances", -1)
os.init = true
traceExit("ChannelAMQPInitAttributes", 0)
}
// If we need to list the channels that match a pattern. Not needed for
// the status queries as they (unlike the pub/sub resource stats) accept
// patterns in the PCF command
func InquireAMQPChannels(patterns string) ([]string, error) {
traceEntry("InquireAMQPChannels")
ChannelAMQPInitAttributes()
rc, err := inquireObjectsWithFilter(patterns, ibmmq.MQOT_CHANNEL, OT_CHANNEL_AMQP)
traceExitErr("InquireAMQPChannels", 0, err)
return rc, err
}
func CollectAMQPChannelStatus(patterns string) error {
var err error
traceEntry("CollectAMQPChannelStatus")
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
os.objectSeen = make(map[string]bool) // Record which channels have been seen in this period
ChannelAMQPInitAttributes()
// Empty any collected values
for k := range st.Attributes {
st.Attributes[k].Values = make(map[string]*StatusValue)
}
for k := range amqpInfoMap {
amqpInfoMap[k].AttrCurInst = 0
}
channelPatterns := strings.Split(patterns, ",")
if len(channelPatterns) == 0 {
traceExit("CollectAMQPChannelStatus", 1)
return nil
}
for _, pattern := range channelPatterns {
pattern = strings.TrimSpace(pattern)
if len(pattern) == 0 {
continue
}
// This would allow us to extract SAVED information too
errCurrent := collectAMQPChannelStatus(pattern, ibmmq.MQOT_CURRENT_CHANNEL)
err = errCurrent
}
// Need to clean out the prevValues elements to stop short-lived channels
// building up in the map
for a, _ := range st.Attributes {
if st.Attributes[a].delta {
m := st.Attributes[a].prevValues
for key, _ := range m {
if _, ok := os.objectSeen[key]; ok {
// Leave it in the map
} else {
// need to delete it from the map
delete(m, key)
}
}
}
}
// Set the metrics corresponding to attributes for all the monitored channels
// The current instance count is not, strictly speaking, an attribute but it's a way
// of providing a metric alongside each channel which shows how many there are of that name.
// All instances of the same channel name, regardless of other aspects (eg remote connName)
// are given the same instance count so it could be extracted.
for key, _ := range st.Attributes[ATTR_CHL_NAME].Values {
chlName := st.Attributes[ATTR_CHL_NAME].Values[key].ValueString
if s, ok := amqpInfoMap[chlName]; ok {
maxInstC := s.AttrMaxInstC
st.Attributes[ATTR_CHL_MAX_INSTC].Values[key] = newStatusValueInt64(maxInstC)
maxInst := s.AttrMaxInst
st.Attributes[ATTR_CHL_MAX_INST].Values[key] = newStatusValueInt64(maxInst)
curInst := s.AttrCurInst
st.Attributes[ATTR_CHL_CUR_INST].Values[key] = newStatusValueInt64(curInst)
}
}
traceExitErr("CollectAMQPChannelStatus", 0, err)
return err
}
// Issue the INQUIRE_CHANNEL_STATUS command for a channel or wildcarded channel name
// Collect the responses and build up the statistics. Add CLIENTID(*) to get the actual
// instances instead of an aggregated response
func collectAMQPChannelStatus(pattern string, instanceType int32) error {
var err error
traceEntryF("collectAMQPChannelStatus", "Pattern: %s", pattern)
ci := getConnection(GetConnectionKey())
os := &ci.objectStatus[OT_CHANNEL_AMQP]
statusClearReplyQ()
putmqmd, pmo, cfh, buf := statusSetCommandHeaders()
// Can allow all the other fields to default
cfh.Command = ibmmq.MQCMD_INQUIRE_CHANNEL_STATUS
// Add the parameters one at a time into a buffer
pcfparm := new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CHANNEL_NAME
pcfparm.String = []string{pattern}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Add the parameters one at a time into a buffer
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER
pcfparm.Parameter = ibmmq.MQIACH_CHANNEL_TYPE
pcfparm.Int64Value = []int64{int64(ibmmq.MQCHT_AMQP)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CLIENT_ID
pcfparm.String = []string{"*"}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Once we know the total number of parameters, put the
// CFH header on the front of the buffer.
buf = append(cfh.Bytes(), buf...)
// And now put the command to the queue
err = ci.si.cmdQObj.Put(putmqmd, pmo, buf)
if err != nil {
traceExitErr("collectAMQPChannelStatus", 1, err)
return err
}
// Now get the responses - loop until all have been received (one
// per channel) or we run out of time
for allReceived := false; !allReceived; {
cfh, buf, allReceived, err = statusGetReply(putmqmd.MsgId)
if buf != nil {
key := parseAMQPChlData(instanceType, cfh, buf)
if key != "" {
os.objectSeen[key] = true
}
}
}
traceExitErr("collectAMQPChannelStatus", 0, err)
return err
}
// Given a PCF response message, parse it to extract the desired statistics
func parseAMQPChlData(instanceType int32, cfh *ibmmq.MQCFH, buf []byte) string {
var elem *ibmmq.PCFParameter
traceEntry("parseAMQPChlData")
ci := getConnection(GetConnectionKey())
//os := &ci.objectStatus[OT_CHANNEL_AMQP]
st := GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP)
chlName := ""
connName := ""
clientId := ""
key := ""
lastMsgDate := ""
lastMsgTime := ""
parmAvail := true
bytesRead := 0
offset := 0
datalen := len(buf)
if cfh == nil || cfh.ParameterCount == 0 {
traceExit("parseAMQPChlData", 1)
return ""
}
// Parse it once to extract the fields that are needed for the map key
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
switch elem.Parameter {
case ibmmq.MQCACH_CHANNEL_NAME:
chlName = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_CONNECTION_NAME:
connName = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_CLIENT_ID:
clientId = strings.TrimSpace(elem.String[0])
}
}
// Create a unique key for this channel instance
if connName == "" {
connName = DUMMY_STRING
}
if ci.hideAMQPClientId {
clientId = DUMMY_STRING
}
key = chlName + "/" + connName + "/" + clientId
logDebug("AMQP status - key: %s", key)
st.Attributes[ATTR_CHL_NAME].Values[key] = newStatusValueString(chlName)
st.Attributes[ATTR_CHL_CONNNAME].Values[key] = newStatusValueString(connName)
st.Attributes[ATTR_CHL_AMQP_CLIENT_ID].Values[key] = newStatusValueString(clientId)
// And then re-parse the message so we can store the metrics now knowing the map key
parmAvail = true
offset = 0
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
if !statusGetIntAttributes(GetObjectStatus(GetConnectionKey(), OT_CHANNEL_AMQP), elem, key) {
switch elem.Parameter {
case ibmmq.MQCACH_LAST_MSG_TIME:
lastMsgTime = strings.TrimSpace(elem.String[0])
case ibmmq.MQCACH_LAST_MSG_DATE:
lastMsgDate = strings.TrimSpace(elem.String[0])
}
}
}
now := time.Now()
diff := statusTimeDiff(now, lastMsgDate, lastMsgTime)
st.Attributes[ATTR_CHL_SINCE_MSG].Values[key] = newStatusValueInt64(diff)
// Bump the number of active instances of the channel, treating it a bit like a
// regular config attribute.
if s, ok := amqpInfoMap[chlName]; ok {
s.AttrCurInst++
}
traceExitF("parseAMQPChlData", 0, "Key: %s", key)
return key
}
// Issue the INQUIRE_CHANNEL call for wildcarded channel names and
// extract the required attributes
func | (objectPatternsList string, infoMap map[string]*ObjInfo) error {
var err error
traceEntry("inquireAMQPChannelAttributes")
ci := getConnection(GetConnectionKey())
statusClearReplyQ()
if objectPatternsList == "" {
traceExitErr("inquireAMQPChannelAttributes", 1, err)
return err
}
objectPatterns := strings.Split(strings.TrimSpace(objectPatternsList), ",")
for i := 0; i < len(objectPatterns) && err == nil; i++ {
var buf []byte
pattern := strings.TrimSpace(objectPatterns[i])
if len(pattern) == 0 {
continue
}
putmqmd, pmo, cfh, buf := statusSetCommandHeaders()
// Can allow all the other fields to default
cfh.Command = ibmmq.MQCMD_INQUIRE_CHANNEL
cfh.ParameterCount = 0
// Add the parameters one at a time into a buffer
pcfparm := new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_STRING
pcfparm.Parameter = ibmmq.MQCACH_CHANNEL_NAME
pcfparm.String = []string{pattern}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER
pcfparm.Parameter = ibmmq.MQIACH_CHANNEL_TYPE
pcfparm.Int64Value = []int64{int64(ibmmq.MQCHT_AMQP)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
pcfparm = new(ibmmq.PCFParameter)
pcfparm.Type = ibmmq.MQCFT_INTEGER_LIST
pcfparm.Parameter = ibmmq.MQIACF_CHANNEL_ATTRS
pcfparm.Int64Value = []int64{int64(ibmmq.MQIACH_MAX_INSTANCES), int64(ibmmq.MQIACH_MAX_INSTS_PER_CLIENT), int64(ibmmq.MQCACH_DESC), int64(ibmmq.MQIACH_CHANNEL_TYPE)}
cfh.ParameterCount++
buf = append(buf, pcfparm.Bytes()...)
// Once we know the total number of parameters, put the
// CFH header on the front of the buffer.
buf = append(cfh.Bytes(), buf...)
// And now put the command to the queue
err = ci.si.cmdQObj.Put(putmqmd, pmo, buf)
if err != nil {
traceExitErr("inquireAMQPChannelAttributes", 2, err)
return err
}
for allReceived := false; !allReceived; {
cfh, buf, allReceived, err = statusGetReply(putmqmd.MsgId)
if buf != nil {
parseAMQPChannelAttrData(cfh, buf, infoMap)
}
}
}
traceExit("inquireAMQPChannelAttributes", 0)
return nil
}
func parseAMQPChannelAttrData(cfh *ibmmq.MQCFH, buf []byte, infoMap map[string]*ObjInfo) {
var elem *ibmmq.PCFParameter
var ci *ObjInfo
var ok bool
traceEntry("parseAMQPChannelAttrData")
chlName := ""
parmAvail := true
bytesRead := 0
offset := 0
datalen := len(buf)
if cfh.ParameterCount == 0 {
traceExit("parseAMQPChannelAttrData", 1)
return
}
// Parse it once to extract the fields that are needed for the map key
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
// Only one field needed for channels
switch elem.Parameter {
case ibmmq.MQCACH_CHANNEL_NAME:
chlName = strings.TrimSpace(elem.String[0])
}
}
// And then re-parse the message so we can store the metrics now knowing the map key
parmAvail = true
offset = 0
for parmAvail && cfh.CompCode != ibmmq.MQCC_FAILED {
elem, bytesRead = ibmmq.ReadPCFParameter(buf[offset:])
offset += bytesRead
// Have we now reached the end of the message
if offset >= datalen {
parmAvail = false
}
switch elem.Parameter {
case ibmmq.MQIACH_MAX_INSTANCES:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrMaxInst = v
ci.exists = true
}
case ibmmq.MQIACH_MAX_INSTS_PER_CLIENT:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrMaxInstC = v
ci.exists = true
}
case ibmmq.MQIACH_CHANNEL_TYPE:
v := elem.Int64Value[0]
if v > 0 {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.AttrChlType = v
ci.exists = true
}
case ibmmq.MQCACH_DESC:
v := elem.String[0]
if v != "" {
if ci, ok = infoMap[chlName]; !ok {
ci = new(ObjInfo)
infoMap[chlName] = ci
}
ci.Description = printableStringUTF8(v)
ci.exists = true
}
}
}
traceExit("parseAMQPChannelAttrData", 0)
return
}
| inquireAMQPChannelAttributes | identifier_name |
btckey.go | /* btckeygenie v1.0.0
* https://github.com/vsergeev/btckeygenie
* License: MIT
*/
package btckey
import (
"bytes"
"golang.org/x/crypto/ripemd160"
"crypto/sha256"
"fmt"
"io"
"math/big"
"strings"
)
/******************************************************************************/
/* ECDSA Keypair Generation */
/******************************************************************************/
var secp256k1 EllipticCurve
func init() {
/* See Certicom's SEC2 2.7.1, pg.15 */
/* secp256k1 elliptic curve parameters */
secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
secp256k1.A, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000000", 16)
secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
secp256k1.G.X, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
secp256k1.G.Y, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
secp256k1.H, _ = new(big.Int).SetString("01", 16)
}
// PublicKey represents a Bitcoin public key.
type PublicKey struct {
Point
}
// PrivateKey represents a Bitcoin private key.
type PrivateKey struct {
PublicKey
D *big.Int
}
func NewPrivateKey(d *big.Int) (*PrivateKey) {
key := &PrivateKey{D: d}
key.derive()
return key
}
// derive derives a Bitcoin public key from a Bitcoin private key.
func (priv *PrivateKey) derive() (pub *PublicKey) {
/* See Certicom's SEC1 3.2.1, pg.23 */
/* Derive public key from Q = d*G */
Q := secp256k1.ScalarBaseMult(priv.D)
/* Check that Q is on the curve */
if !secp256k1.IsOnCurve(Q) {
panic("Catastrophic math logic failure in public key derivation.")
}
priv.X = Q.X
priv.Y = Q.Y
return &priv.PublicKey
}
// GenerateKey generates a public and private key pair using random source rand.
func GenerateKey(rand io.Reader) (priv PrivateKey, err error) {
/* See Certicom's SEC1 3.2.1, pg.23 */
/* See NSA's Suite B Implementer’s Guide to FIPS 186-3 (ECDSA) A.1.1, pg.18 */
/* Select private key d randomly from [1, n) */
/* Read N bit length random bytes + 64 extra bits */
b := make([]byte, secp256k1.N.BitLen()/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return priv, fmt.Errorf("Reading random reader: %v", err)
}
d := new(big.Int).SetBytes(b)
/* Mod n-1 to shift d into [0, n-1) range */
d.Mod(d, new(big.Int).Sub(secp256k1.N, big.NewInt(1)))
/* Add one to shift d to [1, n) range */
d.Add(d, big.NewInt(1))
priv.D = d
/* Derive public key from private key */
priv.derive()
return priv, nil
}
/******************************************************************************/
/* Base-58 Encode/Decode */
/******************************************************************************/
// b58encode encodes a byte slice b into a base-58 encoded string.
func b58encode(b []byte) (s string) {
/* See https://en.bitcoin.it/wiki/Base58Check_encoding */
const BITCOIN_BASE58_TABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
/* Convert big endian bytes to big int */
x := new(big.Int).SetBytes(b)
/* Initialize */
r := new(big.Int)
m := big.NewInt(58)
zero := big.NewInt(0)
s = ""
/* Convert big int to string */
for x.Cmp(zero) > 0 {
/* x, r = (x / 58, x % 58) */
x.QuoRem(x, m, r)
/* Prepend ASCII character */
s = string(BITCOIN_BASE58_TABLE[r.Int64()]) + s
}
return s
}
// b58decode decodes a base-58 encoded string into a byte slice b.
func b58decode(s string) (b []byte, err error) {
/* See https://en.bitcoin.it/wiki/Base58Check_encoding */
const BITCOIN_BASE58_TABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
/* Initialize */
x := big.NewInt(0)
m := big.NewInt(58)
/* Convert string to big int */
for i := 0; i < len(s); i++ {
b58index := strings.IndexByte(BITCOIN_BASE58_TABLE, s[i])
if b58index == -1 {
return nil, fmt.Errorf("Invalid base-58 character encountered: '%c', index %d.", s[i], i)
}
b58value := big.NewInt(int64(b58index))
x.Mul(x, m)
x.Add(x, b58value)
}
/* Convert big int to big endian bytes */
b = x.Bytes()
return b, nil
}
/******************************************************************************/
/* Base-58 Check Encode/Decode */
/******************************************************************************/
// b58checkencode encodes version ver and byte slice b into a base-58 check encoded string.
func b58checkencode(ver uint8, b []byte) (s string) {
/* Prepend version */
bcpy := append([]byte{ver}, b...)
/* Create a new SHA256 context */
sha256_h := sha256.New()
/* SHA256 Hash #1 */
sha256_h.Reset()
sha256_h.Write(bcpy)
hash1 := sha256_h.Sum(nil)
/* SHA256 Hash #2 */
sha256_h.Reset()
sha256_h.Write(hash1)
hash2 := sha256_h.Sum(nil)
/* Append first four bytes of hash */
bcpy = append(bcpy, hash2[0:4]...)
/* Encode base58 string */
s = b58encode(bcpy)
/* For number of leading 0's in bytes, prepend 1 */
for _, v := range bcpy {
if v != 0 {
break
}
s = "1" + s
}
return s
}
// b58checkdecode decodes base-58 check encoded string s into a version ver and byte slice b.
func b58checkdecode(s string) (ver uint8, b []byte, err error) {
/* Decode base58 string */
b, err = b58decode(s)
if err != nil {
return 0, nil, err
}
/* Add leading zero bytes */
for i := 0; i < len(s); i++ {
if s[i] != '1' {
| b = append([]byte{0x3f}, b...)
}
/* Verify checksum */
if len(b) < 5 {
return 0, nil, fmt.Errorf("Invalid base-58 check string: missing checksum.")
}
/* Create a new SHA256 context */
sha256_h := sha256.New()
/* SHA256 Hash #1 */
sha256_h.Reset()
sha256_h.Write(b[:len(b)-4])
hash1 := sha256_h.Sum(nil)
/* SHA256 Hash #2 */
sha256_h.Reset()
sha256_h.Write(hash1)
hash2 := sha256_h.Sum(nil)
/* Compare checksum */
if bytes.Compare(hash2[0:4], b[len(b)-4:]) != 0 {
return 0, nil, fmt.Errorf("Invalid base-58 check string: invalid checksum.")
}
/* Strip checksum bytes */
b = b[:len(b)-4]
/* Extract and strip version */
ver = b[0]
b = b[1:]
return ver, b, nil
}
/******************************************************************************/
/* Bitcoin Private Key Import/Export */
/******************************************************************************/
// CheckWIF checks that string wif is a valid Wallet Import Format or Wallet Import Format Compressed string. If it is not, err is populated with the reason.
func CheckWIF(wif string) (valid bool, err error) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Base58 Check Decode the WIF string */
ver, priv_bytes, err := b58checkdecode(wif)
if err != nil {
return false, err
}
/* Check that the version byte is 0x80 */
if ver != 0x80 {
return false, fmt.Errorf("Invalid WIF version 0x%02x, expected 0x80.", ver)
}
/* Check that private key bytes length is 32 or 33 */
if len(priv_bytes) != 32 && len(priv_bytes) != 33 {
return false, fmt.Errorf("Invalid private key bytes length %d, expected 32 or 33.", len(priv_bytes))
}
/* If the private key bytes length is 33, check that suffix byte is 0x01 (for compression) */
if len(priv_bytes) == 33 && priv_bytes[len(priv_bytes)-1] != 0x01 {
return false, fmt.Errorf("Invalid private key bytes, unknown suffix byte 0x%02x.", priv_bytes[len(priv_bytes)-1])
}
return true, nil
}
// ToBytes converts a Bitcoin private key to a 32-byte byte slice.
func (priv *PrivateKey) ToBytes() (b []byte) {
d := priv.D.Bytes()
/* Pad D to 32 bytes */
padded_d := append(bytes.Repeat([]byte{0x3f}, 32-len(d)), d...)
return padded_d
}
// FromBytes converts a 32-byte byte slice to a Bitcoin private key and derives the corresponding Bitcoin public key.
func (priv *PrivateKey) FromBytes(b []byte) (err error) {
if len(b) != 32 {
return fmt.Errorf("Invalid private key bytes length %d, expected 32.", len(b))
}
priv.D = new(big.Int).SetBytes(b)
/* Derive public key from private key */
priv.derive()
return nil
}
// ToWIF converts a Bitcoin private key to a Wallet Import Format string.
func (priv *PrivateKey) ToWIF() (wif string) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Convert the private key to bytes */
priv_bytes := priv.ToBytes()
/* Convert bytes to base-58 check encoded string with version 0x80 */
wif = b58checkencode(0x80, priv_bytes)
return wif
}
// ToWIFC converts a Bitcoin private key to a Wallet Import Format string with the public key compressed flag.
func (priv *PrivateKey) ToWIFC() (wifc string) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Convert the private key to bytes */
priv_bytes := priv.ToBytes()
/* Append 0x01 to tell Bitcoin wallet to use compressed public keys */
priv_bytes = append(priv_bytes, []byte{0x01}...)
/* Convert bytes to base-58 check encoded string with version 0x80 */
wifc = b58checkencode(0x80, priv_bytes)
return wifc
}
// FromWIF converts a Wallet Import Format string to a Bitcoin private key and derives the corresponding Bitcoin public key.
func (priv *PrivateKey) FromWIF(wif string) (err error) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Base58 Check Decode the WIF string */
ver, priv_bytes, err := b58checkdecode(wif)
if err != nil {
return err
}
/* Check that the version byte is 0x80 */
if ver != 0x80 {
return fmt.Errorf("Invalid WIF version 0x%02x, expected 0x80.", ver)
}
/* If the private key bytes length is 33, check that suffix byte is 0x01 (for compression) and strip it off */
if len(priv_bytes) == 33 {
if priv_bytes[len(priv_bytes)-1] != 0x01 {
return fmt.Errorf("Invalid private key, unknown suffix byte 0x%02x.", priv_bytes[len(priv_bytes)-1])
}
priv_bytes = priv_bytes[0:32]
}
/* Convert from bytes to a private key */
err = priv.FromBytes(priv_bytes)
if err != nil {
return err
}
/* Derive public key from private key */
priv.derive()
return nil
}
/******************************************************************************/
/* Bitcoin Public Key Import/Export */
/******************************************************************************/
// ToBytes converts a Bitcoin public key to a 33-byte byte slice with point compression.
func (pub *PublicKey) ToBytes() (b []byte) {
/* See Certicom SEC1 2.3.3, pg. 10 */
x := pub.X.Bytes()
/* Pad X to 32-bytes */
padded_x := append(bytes.Repeat([]byte{0x3f}, 32-len(x)), x...)
/* Add prefix 0x02 or 0x03 depending on ylsb */
if pub.Y.Bit(0) == 0 {
return append([]byte{0x02}, padded_x...)
}
return append([]byte{0x03}, padded_x...)
}
// ToBytesUncompressed converts a Bitcoin public key to a 65-byte byte slice without point compression.
func (pub *PublicKey) ToBytesUncompressed() (b []byte) {
/* See Certicom SEC1 2.3.3, pg. 10 */
x := pub.X.Bytes()
y := pub.Y.Bytes()
/* Pad X and Y coordinate bytes to 32-bytes */
padded_x := append(bytes.Repeat([]byte{0x3f}, 32-len(x)), x...)
padded_y := append(bytes.Repeat([]byte{0x3f}, 32-len(y)), y...)
/* Add prefix 0x04 for uncompressed coordinates */
return append([]byte{0x04}, append(padded_x, padded_y...)...)
}
// FromBytes converts a byte slice (either with or without point compression) to a Bitcoin public key.
func (pub *PublicKey) FromBytes(b []byte) (err error) {
/* See Certicom SEC1 2.3.4, pg. 11 */
if len(b) < 33 {
return fmt.Errorf("Invalid public key bytes length %d, expected at least 33.", len(b))
}
if b[0] == 0x02 || b[0] == 0x03 {
/* Compressed public key */
if len(b) != 33 {
return fmt.Errorf("Invalid public key bytes length %d, expected 33.", len(b))
}
P, err := secp256k1.Decompress(new(big.Int).SetBytes(b[1:33]), uint(b[0]&0x1))
if err != nil {
return fmt.Errorf("Invalid compressed public key bytes, decompression error: %v", err)
}
pub.X = P.X
pub.Y = P.Y
} else if b[0] == 0x04 {
/* Uncompressed public key */
if len(b) != 65 {
return fmt.Errorf("Invalid public key bytes length %d, expected 65.", len(b))
}
pub.X = new(big.Int).SetBytes(b[1:33])
pub.Y = new(big.Int).SetBytes(b[33:65])
/* Check that the point is on the curve */
if !secp256k1.IsOnCurve(pub.Point) {
return fmt.Errorf("Invalid public key bytes: point not on curve.")
}
} else {
return fmt.Errorf("Invalid public key prefix byte 0x%02x, expected 0x02, 0x03, or 0x04.", b[0])
}
return nil
}
// ToAddress converts a Bitcoin public key to a compressed Bitcoin address string.
func (pub *PublicKey) ToAddress() (address string) {
/* See https://en.bitcoin.it/wiki/Technical_background_of_Bitcoin_addresses */
/* Convert the public key to bytes */
pub_bytes := pub.ToBytes()
/* SHA256 Hash */
sha256_h := sha256.New()
sha256_h.Reset()
sha256_h.Write(pub_bytes)
pub_hash_1 := sha256_h.Sum(nil)
/* RIPEMD-160 Hash */
ripemd160_h := ripemd160.New()
ripemd160_h.Reset()
ripemd160_h.Write(pub_hash_1)
pub_hash_2 := ripemd160_h.Sum(nil)
/* Convert hash bytes to base58 check encoded sequence */
address = b58checkencode(0x3f, pub_hash_2)
return address
}
// ToAddressUncompressed converts a Bitcoin public key to an uncompressed Bitcoin address string.
func (pub *PublicKey) ToAddressUncompressed() (address string) {
/* See https://en.bitcoin.it/wiki/Technical_background_of_Bitcoin_addresses */
/* Convert the public key to bytes */
pub_bytes := pub.ToBytesUncompressed()
/* SHA256 Hash */
sha256_h := sha256.New()
sha256_h.Reset()
sha256_h.Write(pub_bytes)
pub_hash_1 := sha256_h.Sum(nil)
/* RIPEMD-160 Hash */
ripemd160_h := ripemd160.New()
ripemd160_h.Reset()
ripemd160_h.Write(pub_hash_1)
pub_hash_2 := ripemd160_h.Sum(nil)
/* Convert hash bytes to base58 check encoded sequence */
address = b58checkencode(0x3f, pub_hash_2)
return address
}
| break
}
| conditional_block |
btckey.go | /* btckeygenie v1.0.0
* https://github.com/vsergeev/btckeygenie
* License: MIT
*/
package btckey
import (
"bytes"
"golang.org/x/crypto/ripemd160"
"crypto/sha256"
"fmt"
"io"
"math/big"
"strings"
)
/******************************************************************************/
/* ECDSA Keypair Generation */
/******************************************************************************/
var secp256k1 EllipticCurve
func init() {
/* See Certicom's SEC2 2.7.1, pg.15 */
/* secp256k1 elliptic curve parameters */
secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
secp256k1.A, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000000", 16)
secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
secp256k1.G.X, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
secp256k1.G.Y, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
secp256k1.H, _ = new(big.Int).SetString("01", 16)
}
// PublicKey represents a Bitcoin public key.
type PublicKey struct {
Point
}
// PrivateKey represents a Bitcoin private key.
type PrivateKey struct {
PublicKey
D *big.Int
}
func NewPrivateKey(d *big.Int) (*PrivateKey) {
key := &PrivateKey{D: d}
key.derive()
return key
}
// derive derives a Bitcoin public key from a Bitcoin private key.
func (priv *PrivateKey) derive() (pub *PublicKey) {
/* See Certicom's SEC1 3.2.1, pg.23 */
/* Derive public key from Q = d*G */
Q := secp256k1.ScalarBaseMult(priv.D)
/* Check that Q is on the curve */
if !secp256k1.IsOnCurve(Q) {
panic("Catastrophic math logic failure in public key derivation.")
}
priv.X = Q.X
priv.Y = Q.Y
return &priv.PublicKey
}
// GenerateKey generates a public and private key pair using random source rand.
func GenerateKey(rand io.Reader) (priv PrivateKey, err error) {
/* See Certicom's SEC1 3.2.1, pg.23 */
/* See NSA's Suite B Implementer’s Guide to FIPS 186-3 (ECDSA) A.1.1, pg.18 */
/* Select private key d randomly from [1, n) */
/* Read N bit length random bytes + 64 extra bits */
b := make([]byte, secp256k1.N.BitLen()/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return priv, fmt.Errorf("Reading random reader: %v", err)
}
d := new(big.Int).SetBytes(b)
/* Mod n-1 to shift d into [0, n-1) range */
d.Mod(d, new(big.Int).Sub(secp256k1.N, big.NewInt(1)))
/* Add one to shift d to [1, n) range */
d.Add(d, big.NewInt(1))
priv.D = d
/* Derive public key from private key */
priv.derive()
return priv, nil
}
/******************************************************************************/
/* Base-58 Encode/Decode */
/******************************************************************************/
// b58encode encodes a byte slice b into a base-58 encoded string.
func b58encode(b []byte) (s string) {
/* See https://en.bitcoin.it/wiki/Base58Check_encoding */
const BITCOIN_BASE58_TABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
/* Convert big endian bytes to big int */
x := new(big.Int).SetBytes(b)
/* Initialize */
r := new(big.Int)
m := big.NewInt(58)
zero := big.NewInt(0)
s = ""
/* Convert big int to string */
for x.Cmp(zero) > 0 {
/* x, r = (x / 58, x % 58) */
x.QuoRem(x, m, r)
/* Prepend ASCII character */
s = string(BITCOIN_BASE58_TABLE[r.Int64()]) + s
}
return s
}
// b58decode decodes a base-58 encoded string into a byte slice b.
func b58decode(s string) (b []byte, err error) {
/* See https://en.bitcoin.it/wiki/Base58Check_encoding */
const BITCOIN_BASE58_TABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
/* Initialize */
x := big.NewInt(0)
m := big.NewInt(58)
/* Convert string to big int */
for i := 0; i < len(s); i++ {
b58index := strings.IndexByte(BITCOIN_BASE58_TABLE, s[i])
if b58index == -1 {
return nil, fmt.Errorf("Invalid base-58 character encountered: '%c', index %d.", s[i], i)
}
b58value := big.NewInt(int64(b58index))
x.Mul(x, m)
x.Add(x, b58value)
}
/* Convert big int to big endian bytes */
b = x.Bytes()
return b, nil
}
/******************************************************************************/
/* Base-58 Check Encode/Decode */
/******************************************************************************/
// b58checkencode encodes version ver and byte slice b into a base-58 check encoded string.
func b58checkencode(ver uint8, b []byte) (s string) {
/* Prepend version */
bcpy := append([]byte{ver}, b...)
/* Create a new SHA256 context */
sha256_h := sha256.New()
/* SHA256 Hash #1 */
sha256_h.Reset()
sha256_h.Write(bcpy)
hash1 := sha256_h.Sum(nil)
/* SHA256 Hash #2 */
sha256_h.Reset()
sha256_h.Write(hash1)
hash2 := sha256_h.Sum(nil)
/* Append first four bytes of hash */
bcpy = append(bcpy, hash2[0:4]...)
/* Encode base58 string */
s = b58encode(bcpy)
/* For number of leading 0's in bytes, prepend 1 */
for _, v := range bcpy {
if v != 0 {
break
}
s = "1" + s
}
return s
}
// b58checkdecode decodes base-58 check encoded string s into a version ver and byte slice b.
func b58checkdecode(s string) (ver uint8, b []byte, err error) {
/* Decode base58 string */
b, err = b58decode(s)
if err != nil {
return 0, nil, err
}
/* Add leading zero bytes */
for i := 0; i < len(s); i++ {
if s[i] != '1' {
break
}
b = append([]byte{0x3f}, b...)
}
/* Verify checksum */
if len(b) < 5 {
return 0, nil, fmt.Errorf("Invalid base-58 check string: missing checksum.")
}
/* Create a new SHA256 context */
sha256_h := sha256.New()
/* SHA256 Hash #1 */
sha256_h.Reset()
sha256_h.Write(b[:len(b)-4])
hash1 := sha256_h.Sum(nil)
/* SHA256 Hash #2 */
sha256_h.Reset()
sha256_h.Write(hash1)
hash2 := sha256_h.Sum(nil)
/* Compare checksum */
if bytes.Compare(hash2[0:4], b[len(b)-4:]) != 0 {
return 0, nil, fmt.Errorf("Invalid base-58 check string: invalid checksum.")
}
/* Strip checksum bytes */
b = b[:len(b)-4]
/* Extract and strip version */
ver = b[0]
b = b[1:]
return ver, b, nil
}
/******************************************************************************/
/* Bitcoin Private Key Import/Export */
/******************************************************************************/
// CheckWIF checks that string wif is a valid Wallet Import Format or Wallet Import Format Compressed string. If it is not, err is populated with the reason.
func CheckWIF(wif string) (valid bool, err error) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Base58 Check Decode the WIF string */
ver, priv_bytes, err := b58checkdecode(wif)
if err != nil {
return false, err
}
/* Check that the version byte is 0x80 */
if ver != 0x80 {
return false, fmt.Errorf("Invalid WIF version 0x%02x, expected 0x80.", ver)
}
/* Check that private key bytes length is 32 or 33 */
if len(priv_bytes) != 32 && len(priv_bytes) != 33 {
return false, fmt.Errorf("Invalid private key bytes length %d, expected 32 or 33.", len(priv_bytes))
}
/* If the private key bytes length is 33, check that suffix byte is 0x01 (for compression) */
if len(priv_bytes) == 33 && priv_bytes[len(priv_bytes)-1] != 0x01 {
return false, fmt.Errorf("Invalid private key bytes, unknown suffix byte 0x%02x.", priv_bytes[len(priv_bytes)-1])
}
return true, nil
}
// ToBytes converts a Bitcoin private key to a 32-byte byte slice.
func (priv *PrivateKey) ToBytes() (b []byte) {
d := priv.D.Bytes()
/* Pad D to 32 bytes */
padded_d := append(bytes.Repeat([]byte{0x3f}, 32-len(d)), d...)
return padded_d
}
// FromBytes converts a 32-byte byte slice to a Bitcoin private key and derives the corresponding Bitcoin public key.
func (priv *PrivateKey) FromBytes(b []byte) (err error) {
if len(b) != 32 {
return fmt.Errorf("Invalid private key bytes length %d, expected 32.", len(b))
}
priv.D = new(big.Int).SetBytes(b)
/* Derive public key from private key */
priv.derive()
return nil
}
// ToWIF converts a Bitcoin private key to a Wallet Import Format string.
func (priv *PrivateKey) ToWIF() (wif string) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Convert the private key to bytes */
priv_bytes := priv.ToBytes()
/* Convert bytes to base-58 check encoded string with version 0x80 */
wif = b58checkencode(0x80, priv_bytes)
return wif
}
// ToWIFC converts a Bitcoin private key to a Wallet Import Format string with the public key compressed flag.
func (priv *PrivateKey) ToWIFC() (wifc string) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Convert the private key to bytes */
priv_bytes := priv.ToBytes()
/* Append 0x01 to tell Bitcoin wallet to use compressed public keys */
priv_bytes = append(priv_bytes, []byte{0x01}...)
/* Convert bytes to base-58 check encoded string with version 0x80 */
wifc = b58checkencode(0x80, priv_bytes)
return wifc
}
// FromWIF converts a Wallet Import Format string to a Bitcoin private key and derives the corresponding Bitcoin public key.
func (priv *PrivateKey) FromWIF(wif string) (err error) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Base58 Check Decode the WIF string */
ver, priv_bytes, err := b58checkdecode(wif)
if err != nil {
return err
}
/* Check that the version byte is 0x80 */
if ver != 0x80 {
return fmt.Errorf("Invalid WIF version 0x%02x, expected 0x80.", ver)
}
/* If the private key bytes length is 33, check that suffix byte is 0x01 (for compression) and strip it off */
if len(priv_bytes) == 33 {
if priv_bytes[len(priv_bytes)-1] != 0x01 {
return fmt.Errorf("Invalid private key, unknown suffix byte 0x%02x.", priv_bytes[len(priv_bytes)-1])
}
priv_bytes = priv_bytes[0:32]
}
/* Convert from bytes to a private key */
err = priv.FromBytes(priv_bytes)
if err != nil {
return err
}
/* Derive public key from private key */
priv.derive()
return nil
}
/******************************************************************************/
/* Bitcoin Public Key Import/Export */
/******************************************************************************/
// ToBytes converts a Bitcoin public key to a 33-byte byte slice with point compression.
func (pub *PublicKey) ToBytes() (b []byte) {
/* See Certicom SEC1 2.3.3, pg. 10 */
x := pub.X.Bytes()
/* Pad X to 32-bytes */
padded_x := append(bytes.Repeat([]byte{0x3f}, 32-len(x)), x...)
/* Add prefix 0x02 or 0x03 depending on ylsb */
if pub.Y.Bit(0) == 0 {
return append([]byte{0x02}, padded_x...)
}
return append([]byte{0x03}, padded_x...)
}
// ToBytesUncompressed converts a Bitcoin public key to a 65-byte byte slice without point compression.
func (pub *PublicKey) ToBytesUncompressed() (b []byte) {
/* See Certicom SEC1 2.3.3, pg. 10 */
x := pub.X.Bytes()
y := pub.Y.Bytes()
/* Pad X and Y coordinate bytes to 32-bytes */
padded_x := append(bytes.Repeat([]byte{0x3f}, 32-len(x)), x...)
padded_y := append(bytes.Repeat([]byte{0x3f}, 32-len(y)), y...)
/* Add prefix 0x04 for uncompressed coordinates */
return append([]byte{0x04}, append(padded_x, padded_y...)...)
}
// FromBytes converts a byte slice (either with or without point compression) to a Bitcoin public key.
func (pub *PublicKey) FromBytes(b []byte) (err error) {
/* See Certicom SEC1 2.3.4, pg. 11 */
if len(b) < 33 {
return fmt.Errorf("Invalid public key bytes length %d, expected at least 33.", len(b))
}
if b[0] == 0x02 || b[0] == 0x03 {
/* Compressed public key */
if len(b) != 33 {
return fmt.Errorf("Invalid public key bytes length %d, expected 33.", len(b))
} | return fmt.Errorf("Invalid compressed public key bytes, decompression error: %v", err)
}
pub.X = P.X
pub.Y = P.Y
} else if b[0] == 0x04 {
/* Uncompressed public key */
if len(b) != 65 {
return fmt.Errorf("Invalid public key bytes length %d, expected 65.", len(b))
}
pub.X = new(big.Int).SetBytes(b[1:33])
pub.Y = new(big.Int).SetBytes(b[33:65])
/* Check that the point is on the curve */
if !secp256k1.IsOnCurve(pub.Point) {
return fmt.Errorf("Invalid public key bytes: point not on curve.")
}
} else {
return fmt.Errorf("Invalid public key prefix byte 0x%02x, expected 0x02, 0x03, or 0x04.", b[0])
}
return nil
}
// ToAddress converts a Bitcoin public key to a compressed Bitcoin address string.
func (pub *PublicKey) ToAddress() (address string) {
/* See https://en.bitcoin.it/wiki/Technical_background_of_Bitcoin_addresses */
/* Convert the public key to bytes */
pub_bytes := pub.ToBytes()
/* SHA256 Hash */
sha256_h := sha256.New()
sha256_h.Reset()
sha256_h.Write(pub_bytes)
pub_hash_1 := sha256_h.Sum(nil)
/* RIPEMD-160 Hash */
ripemd160_h := ripemd160.New()
ripemd160_h.Reset()
ripemd160_h.Write(pub_hash_1)
pub_hash_2 := ripemd160_h.Sum(nil)
/* Convert hash bytes to base58 check encoded sequence */
address = b58checkencode(0x3f, pub_hash_2)
return address
}
// ToAddressUncompressed converts a Bitcoin public key to an uncompressed Bitcoin address string.
func (pub *PublicKey) ToAddressUncompressed() (address string) {
/* See https://en.bitcoin.it/wiki/Technical_background_of_Bitcoin_addresses */
/* Convert the public key to bytes */
pub_bytes := pub.ToBytesUncompressed()
/* SHA256 Hash */
sha256_h := sha256.New()
sha256_h.Reset()
sha256_h.Write(pub_bytes)
pub_hash_1 := sha256_h.Sum(nil)
/* RIPEMD-160 Hash */
ripemd160_h := ripemd160.New()
ripemd160_h.Reset()
ripemd160_h.Write(pub_hash_1)
pub_hash_2 := ripemd160_h.Sum(nil)
/* Convert hash bytes to base58 check encoded sequence */
address = b58checkencode(0x3f, pub_hash_2)
return address
} |
P, err := secp256k1.Decompress(new(big.Int).SetBytes(b[1:33]), uint(b[0]&0x1))
if err != nil { | random_line_split |
btckey.go | /* btckeygenie v1.0.0
* https://github.com/vsergeev/btckeygenie
* License: MIT
*/
package btckey
import (
"bytes"
"golang.org/x/crypto/ripemd160"
"crypto/sha256"
"fmt"
"io"
"math/big"
"strings"
)
/******************************************************************************/
/* ECDSA Keypair Generation */
/******************************************************************************/
var secp256k1 EllipticCurve
func init() {
/* See Certicom's SEC2 2.7.1, pg.15 */
/* secp256k1 elliptic curve parameters */
secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
secp256k1.A, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000000", 16)
secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
secp256k1.G.X, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
secp256k1.G.Y, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
secp256k1.H, _ = new(big.Int).SetString("01", 16)
}
// PublicKey represents a Bitcoin public key.
type PublicKey struct {
Point
}
// PrivateKey represents a Bitcoin private key.
type PrivateKey struct {
PublicKey
D *big.Int
}
func NewPrivateKey(d *big.Int) (*PrivateKey) {
key := &PrivateKey{D: d}
key.derive()
return key
}
// derive derives a Bitcoin public key from a Bitcoin private key.
func (priv *PrivateKey) derive() (pub *PublicKey) {
/* See Certicom's SEC1 3.2.1, pg.23 */
/* Derive public key from Q = d*G */
Q := secp256k1.ScalarBaseMult(priv.D)
/* Check that Q is on the curve */
if !secp256k1.IsOnCurve(Q) {
panic("Catastrophic math logic failure in public key derivation.")
}
priv.X = Q.X
priv.Y = Q.Y
return &priv.PublicKey
}
// GenerateKey generates a public and private key pair using random source rand.
func GenerateKey(rand io.Reader) (priv PrivateKey, err error) {
/* See Certicom's SEC1 3.2.1, pg.23 */
/* See NSA's Suite B Implementer’s Guide to FIPS 186-3 (ECDSA) A.1.1, pg.18 */
/* Select private key d randomly from [1, n) */
/* Read N bit length random bytes + 64 extra bits */
b := make([]byte, secp256k1.N.BitLen()/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return priv, fmt.Errorf("Reading random reader: %v", err)
}
d := new(big.Int).SetBytes(b)
/* Mod n-1 to shift d into [0, n-1) range */
d.Mod(d, new(big.Int).Sub(secp256k1.N, big.NewInt(1)))
/* Add one to shift d to [1, n) range */
d.Add(d, big.NewInt(1))
priv.D = d
/* Derive public key from private key */
priv.derive()
return priv, nil
}
/******************************************************************************/
/* Base-58 Encode/Decode */
/******************************************************************************/
// b58encode encodes a byte slice b into a base-58 encoded string.
func b58encode(b []byte) (s string) {
/* See https://en.bitcoin.it/wiki/Base58Check_encoding */
const BITCOIN_BASE58_TABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
/* Convert big endian bytes to big int */
x := new(big.Int).SetBytes(b)
/* Initialize */
r := new(big.Int)
m := big.NewInt(58)
zero := big.NewInt(0)
s = ""
/* Convert big int to string */
for x.Cmp(zero) > 0 {
/* x, r = (x / 58, x % 58) */
x.QuoRem(x, m, r)
/* Prepend ASCII character */
s = string(BITCOIN_BASE58_TABLE[r.Int64()]) + s
}
return s
}
// b58decode decodes a base-58 encoded string into a byte slice b.
func b58decode(s string) (b []byte, err error) {
/* See https://en.bitcoin.it/wiki/Base58Check_encoding */
const BITCOIN_BASE58_TABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
/* Initialize */
x := big.NewInt(0)
m := big.NewInt(58)
/* Convert string to big int */
for i := 0; i < len(s); i++ {
b58index := strings.IndexByte(BITCOIN_BASE58_TABLE, s[i])
if b58index == -1 {
return nil, fmt.Errorf("Invalid base-58 character encountered: '%c', index %d.", s[i], i)
}
b58value := big.NewInt(int64(b58index))
x.Mul(x, m)
x.Add(x, b58value)
}
/* Convert big int to big endian bytes */
b = x.Bytes()
return b, nil
}
/******************************************************************************/
/* Base-58 Check Encode/Decode */
/******************************************************************************/
// b58checkencode encodes version ver and byte slice b into a base-58 check encoded string.
func b58checkencode(ver uint8, b []byte) (s string) {
/* Prepend version */
bcpy := append([]byte{ver}, b...)
/* Create a new SHA256 context */
sha256_h := sha256.New()
/* SHA256 Hash #1 */
sha256_h.Reset()
sha256_h.Write(bcpy)
hash1 := sha256_h.Sum(nil)
/* SHA256 Hash #2 */
sha256_h.Reset()
sha256_h.Write(hash1)
hash2 := sha256_h.Sum(nil)
/* Append first four bytes of hash */
bcpy = append(bcpy, hash2[0:4]...)
/* Encode base58 string */
s = b58encode(bcpy)
/* For number of leading 0's in bytes, prepend 1 */
for _, v := range bcpy {
if v != 0 {
break
}
s = "1" + s
}
return s
}
// b58checkdecode decodes base-58 check encoded string s into a version ver and byte slice b.
func b58checkdecode(s string) (ver uint8, b []byte, err error) {
/* Decode base58 string */
b, err = b58decode(s)
if err != nil {
return 0, nil, err
}
/* Add leading zero bytes */
for i := 0; i < len(s); i++ {
if s[i] != '1' {
break
}
b = append([]byte{0x3f}, b...)
}
/* Verify checksum */
if len(b) < 5 {
return 0, nil, fmt.Errorf("Invalid base-58 check string: missing checksum.")
}
/* Create a new SHA256 context */
sha256_h := sha256.New()
/* SHA256 Hash #1 */
sha256_h.Reset()
sha256_h.Write(b[:len(b)-4])
hash1 := sha256_h.Sum(nil)
/* SHA256 Hash #2 */
sha256_h.Reset()
sha256_h.Write(hash1)
hash2 := sha256_h.Sum(nil)
/* Compare checksum */
if bytes.Compare(hash2[0:4], b[len(b)-4:]) != 0 {
return 0, nil, fmt.Errorf("Invalid base-58 check string: invalid checksum.")
}
/* Strip checksum bytes */
b = b[:len(b)-4]
/* Extract and strip version */
ver = b[0]
b = b[1:]
return ver, b, nil
}
/******************************************************************************/
/* Bitcoin Private Key Import/Export */
/******************************************************************************/
// CheckWIF checks that string wif is a valid Wallet Import Format or Wallet Import Format Compressed string. If it is not, err is populated with the reason.
func CheckWIF(wif string) (valid bool, err error) {
| // ToBytes converts a Bitcoin private key to a 32-byte byte slice.
func (priv *PrivateKey) ToBytes() (b []byte) {
d := priv.D.Bytes()
/* Pad D to 32 bytes */
padded_d := append(bytes.Repeat([]byte{0x3f}, 32-len(d)), d...)
return padded_d
}
// FromBytes converts a 32-byte byte slice to a Bitcoin private key and derives the corresponding Bitcoin public key.
func (priv *PrivateKey) FromBytes(b []byte) (err error) {
if len(b) != 32 {
return fmt.Errorf("Invalid private key bytes length %d, expected 32.", len(b))
}
priv.D = new(big.Int).SetBytes(b)
/* Derive public key from private key */
priv.derive()
return nil
}
// ToWIF converts a Bitcoin private key to a Wallet Import Format string.
func (priv *PrivateKey) ToWIF() (wif string) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Convert the private key to bytes */
priv_bytes := priv.ToBytes()
/* Convert bytes to base-58 check encoded string with version 0x80 */
wif = b58checkencode(0x80, priv_bytes)
return wif
}
// ToWIFC converts a Bitcoin private key to a Wallet Import Format string with the public key compressed flag.
func (priv *PrivateKey) ToWIFC() (wifc string) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Convert the private key to bytes */
priv_bytes := priv.ToBytes()
/* Append 0x01 to tell Bitcoin wallet to use compressed public keys */
priv_bytes = append(priv_bytes, []byte{0x01}...)
/* Convert bytes to base-58 check encoded string with version 0x80 */
wifc = b58checkencode(0x80, priv_bytes)
return wifc
}
// FromWIF converts a Wallet Import Format string to a Bitcoin private key and derives the corresponding Bitcoin public key.
func (priv *PrivateKey) FromWIF(wif string) (err error) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Base58 Check Decode the WIF string */
ver, priv_bytes, err := b58checkdecode(wif)
if err != nil {
return err
}
/* Check that the version byte is 0x80 */
if ver != 0x80 {
return fmt.Errorf("Invalid WIF version 0x%02x, expected 0x80.", ver)
}
/* If the private key bytes length is 33, check that suffix byte is 0x01 (for compression) and strip it off */
if len(priv_bytes) == 33 {
if priv_bytes[len(priv_bytes)-1] != 0x01 {
return fmt.Errorf("Invalid private key, unknown suffix byte 0x%02x.", priv_bytes[len(priv_bytes)-1])
}
priv_bytes = priv_bytes[0:32]
}
/* Convert from bytes to a private key */
err = priv.FromBytes(priv_bytes)
if err != nil {
return err
}
/* Derive public key from private key */
priv.derive()
return nil
}
/******************************************************************************/
/* Bitcoin Public Key Import/Export */
/******************************************************************************/
// ToBytes converts a Bitcoin public key to a 33-byte byte slice with point compression.
func (pub *PublicKey) ToBytes() (b []byte) {
/* See Certicom SEC1 2.3.3, pg. 10 */
x := pub.X.Bytes()
/* Pad X to 32-bytes */
padded_x := append(bytes.Repeat([]byte{0x3f}, 32-len(x)), x...)
/* Add prefix 0x02 or 0x03 depending on ylsb */
if pub.Y.Bit(0) == 0 {
return append([]byte{0x02}, padded_x...)
}
return append([]byte{0x03}, padded_x...)
}
// ToBytesUncompressed converts a Bitcoin public key to a 65-byte byte slice without point compression.
func (pub *PublicKey) ToBytesUncompressed() (b []byte) {
/* See Certicom SEC1 2.3.3, pg. 10 */
x := pub.X.Bytes()
y := pub.Y.Bytes()
/* Pad X and Y coordinate bytes to 32-bytes */
padded_x := append(bytes.Repeat([]byte{0x3f}, 32-len(x)), x...)
padded_y := append(bytes.Repeat([]byte{0x3f}, 32-len(y)), y...)
/* Add prefix 0x04 for uncompressed coordinates */
return append([]byte{0x04}, append(padded_x, padded_y...)...)
}
// FromBytes converts a byte slice (either with or without point compression) to a Bitcoin public key.
func (pub *PublicKey) FromBytes(b []byte) (err error) {
/* See Certicom SEC1 2.3.4, pg. 11 */
if len(b) < 33 {
return fmt.Errorf("Invalid public key bytes length %d, expected at least 33.", len(b))
}
if b[0] == 0x02 || b[0] == 0x03 {
/* Compressed public key */
if len(b) != 33 {
return fmt.Errorf("Invalid public key bytes length %d, expected 33.", len(b))
}
P, err := secp256k1.Decompress(new(big.Int).SetBytes(b[1:33]), uint(b[0]&0x1))
if err != nil {
return fmt.Errorf("Invalid compressed public key bytes, decompression error: %v", err)
}
pub.X = P.X
pub.Y = P.Y
} else if b[0] == 0x04 {
/* Uncompressed public key */
if len(b) != 65 {
return fmt.Errorf("Invalid public key bytes length %d, expected 65.", len(b))
}
pub.X = new(big.Int).SetBytes(b[1:33])
pub.Y = new(big.Int).SetBytes(b[33:65])
/* Check that the point is on the curve */
if !secp256k1.IsOnCurve(pub.Point) {
return fmt.Errorf("Invalid public key bytes: point not on curve.")
}
} else {
return fmt.Errorf("Invalid public key prefix byte 0x%02x, expected 0x02, 0x03, or 0x04.", b[0])
}
return nil
}
// ToAddress converts a Bitcoin public key to a compressed Bitcoin address string.
func (pub *PublicKey) ToAddress() (address string) {
/* See https://en.bitcoin.it/wiki/Technical_background_of_Bitcoin_addresses */
/* Convert the public key to bytes */
pub_bytes := pub.ToBytes()
/* SHA256 Hash */
sha256_h := sha256.New()
sha256_h.Reset()
sha256_h.Write(pub_bytes)
pub_hash_1 := sha256_h.Sum(nil)
/* RIPEMD-160 Hash */
ripemd160_h := ripemd160.New()
ripemd160_h.Reset()
ripemd160_h.Write(pub_hash_1)
pub_hash_2 := ripemd160_h.Sum(nil)
/* Convert hash bytes to base58 check encoded sequence */
address = b58checkencode(0x3f, pub_hash_2)
return address
}
// ToAddressUncompressed converts a Bitcoin public key to an uncompressed Bitcoin address string.
func (pub *PublicKey) ToAddressUncompressed() (address string) {
/* See https://en.bitcoin.it/wiki/Technical_background_of_Bitcoin_addresses */
/* Convert the public key to bytes */
pub_bytes := pub.ToBytesUncompressed()
/* SHA256 Hash */
sha256_h := sha256.New()
sha256_h.Reset()
sha256_h.Write(pub_bytes)
pub_hash_1 := sha256_h.Sum(nil)
/* RIPEMD-160 Hash */
ripemd160_h := ripemd160.New()
ripemd160_h.Reset()
ripemd160_h.Write(pub_hash_1)
pub_hash_2 := ripemd160_h.Sum(nil)
/* Convert hash bytes to base58 check encoded sequence */
address = b58checkencode(0x3f, pub_hash_2)
return address
}
| /* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Base58 Check Decode the WIF string */
ver, priv_bytes, err := b58checkdecode(wif)
if err != nil {
return false, err
}
/* Check that the version byte is 0x80 */
if ver != 0x80 {
return false, fmt.Errorf("Invalid WIF version 0x%02x, expected 0x80.", ver)
}
/* Check that private key bytes length is 32 or 33 */
if len(priv_bytes) != 32 && len(priv_bytes) != 33 {
return false, fmt.Errorf("Invalid private key bytes length %d, expected 32 or 33.", len(priv_bytes))
}
/* If the private key bytes length is 33, check that suffix byte is 0x01 (for compression) */
if len(priv_bytes) == 33 && priv_bytes[len(priv_bytes)-1] != 0x01 {
return false, fmt.Errorf("Invalid private key bytes, unknown suffix byte 0x%02x.", priv_bytes[len(priv_bytes)-1])
}
return true, nil
}
| identifier_body |
btckey.go | /* btckeygenie v1.0.0
* https://github.com/vsergeev/btckeygenie
* License: MIT
*/
package btckey
import (
"bytes"
"golang.org/x/crypto/ripemd160"
"crypto/sha256"
"fmt"
"io"
"math/big"
"strings"
)
/******************************************************************************/
/* ECDSA Keypair Generation */
/******************************************************************************/
var secp256k1 EllipticCurve
func init() {
/* See Certicom's SEC2 2.7.1, pg.15 */
/* secp256k1 elliptic curve parameters */
secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
secp256k1.A, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000000", 16)
secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
secp256k1.G.X, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
secp256k1.G.Y, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
secp256k1.H, _ = new(big.Int).SetString("01", 16)
}
// PublicKey represents a Bitcoin public key.
type PublicKey struct {
Point
}
// PrivateKey represents a Bitcoin private key.
type PrivateKey struct {
PublicKey
D *big.Int
}
func NewPrivateKey(d *big.Int) (*PrivateKey) {
key := &PrivateKey{D: d}
key.derive()
return key
}
// derive derives a Bitcoin public key from a Bitcoin private key.
func (priv *PrivateKey) derive() (pub *PublicKey) {
/* See Certicom's SEC1 3.2.1, pg.23 */
/* Derive public key from Q = d*G */
Q := secp256k1.ScalarBaseMult(priv.D)
/* Check that Q is on the curve */
if !secp256k1.IsOnCurve(Q) {
panic("Catastrophic math logic failure in public key derivation.")
}
priv.X = Q.X
priv.Y = Q.Y
return &priv.PublicKey
}
// GenerateKey generates a public and private key pair using random source rand.
func GenerateKey(rand io.Reader) (priv PrivateKey, err error) {
/* See Certicom's SEC1 3.2.1, pg.23 */
/* See NSA's Suite B Implementer’s Guide to FIPS 186-3 (ECDSA) A.1.1, pg.18 */
/* Select private key d randomly from [1, n) */
/* Read N bit length random bytes + 64 extra bits */
b := make([]byte, secp256k1.N.BitLen()/8+8)
_, err = io.ReadFull(rand, b)
if err != nil {
return priv, fmt.Errorf("Reading random reader: %v", err)
}
d := new(big.Int).SetBytes(b)
/* Mod n-1 to shift d into [0, n-1) range */
d.Mod(d, new(big.Int).Sub(secp256k1.N, big.NewInt(1)))
/* Add one to shift d to [1, n) range */
d.Add(d, big.NewInt(1))
priv.D = d
/* Derive public key from private key */
priv.derive()
return priv, nil
}
/******************************************************************************/
/* Base-58 Encode/Decode */
/******************************************************************************/
// b58encode encodes a byte slice b into a base-58 encoded string.
func b58encode(b []byte) (s string) {
/* See https://en.bitcoin.it/wiki/Base58Check_encoding */
const BITCOIN_BASE58_TABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
/* Convert big endian bytes to big int */
x := new(big.Int).SetBytes(b)
/* Initialize */
r := new(big.Int)
m := big.NewInt(58)
zero := big.NewInt(0)
s = ""
/* Convert big int to string */
for x.Cmp(zero) > 0 {
/* x, r = (x / 58, x % 58) */
x.QuoRem(x, m, r)
/* Prepend ASCII character */
s = string(BITCOIN_BASE58_TABLE[r.Int64()]) + s
}
return s
}
// b58decode decodes a base-58 encoded string into a byte slice b.
func b58decode(s string) (b []byte, err error) {
/* See https://en.bitcoin.it/wiki/Base58Check_encoding */
const BITCOIN_BASE58_TABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
/* Initialize */
x := big.NewInt(0)
m := big.NewInt(58)
/* Convert string to big int */
for i := 0; i < len(s); i++ {
b58index := strings.IndexByte(BITCOIN_BASE58_TABLE, s[i])
if b58index == -1 {
return nil, fmt.Errorf("Invalid base-58 character encountered: '%c', index %d.", s[i], i)
}
b58value := big.NewInt(int64(b58index))
x.Mul(x, m)
x.Add(x, b58value)
}
/* Convert big int to big endian bytes */
b = x.Bytes()
return b, nil
}
/******************************************************************************/
/* Base-58 Check Encode/Decode */
/******************************************************************************/
// b58checkencode encodes version ver and byte slice b into a base-58 check encoded string.
func b58checkencode(ver uint8, b []byte) (s string) {
/* Prepend version */
bcpy := append([]byte{ver}, b...)
/* Create a new SHA256 context */
sha256_h := sha256.New()
/* SHA256 Hash #1 */
sha256_h.Reset()
sha256_h.Write(bcpy)
hash1 := sha256_h.Sum(nil)
/* SHA256 Hash #2 */
sha256_h.Reset()
sha256_h.Write(hash1)
hash2 := sha256_h.Sum(nil)
/* Append first four bytes of hash */
bcpy = append(bcpy, hash2[0:4]...)
/* Encode base58 string */
s = b58encode(bcpy)
/* For number of leading 0's in bytes, prepend 1 */
for _, v := range bcpy {
if v != 0 {
break
}
s = "1" + s
}
return s
}
// b58checkdecode decodes base-58 check encoded string s into a version ver and byte slice b.
func b58checkdecode(s string) (ver uint8, b []byte, err error) {
/* Decode base58 string */
b, err = b58decode(s)
if err != nil {
return 0, nil, err
}
/* Add leading zero bytes */
for i := 0; i < len(s); i++ {
if s[i] != '1' {
break
}
b = append([]byte{0x3f}, b...)
}
/* Verify checksum */
if len(b) < 5 {
return 0, nil, fmt.Errorf("Invalid base-58 check string: missing checksum.")
}
/* Create a new SHA256 context */
sha256_h := sha256.New()
/* SHA256 Hash #1 */
sha256_h.Reset()
sha256_h.Write(b[:len(b)-4])
hash1 := sha256_h.Sum(nil)
/* SHA256 Hash #2 */
sha256_h.Reset()
sha256_h.Write(hash1)
hash2 := sha256_h.Sum(nil)
/* Compare checksum */
if bytes.Compare(hash2[0:4], b[len(b)-4:]) != 0 {
return 0, nil, fmt.Errorf("Invalid base-58 check string: invalid checksum.")
}
/* Strip checksum bytes */
b = b[:len(b)-4]
/* Extract and strip version */
ver = b[0]
b = b[1:]
return ver, b, nil
}
/******************************************************************************/
/* Bitcoin Private Key Import/Export */
/******************************************************************************/
// CheckWIF checks that string wif is a valid Wallet Import Format or Wallet Import Format Compressed string. If it is not, err is populated with the reason.
func CheckWIF(wif string) (valid bool, err error) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Base58 Check Decode the WIF string */
ver, priv_bytes, err := b58checkdecode(wif)
if err != nil {
return false, err
}
/* Check that the version byte is 0x80 */
if ver != 0x80 {
return false, fmt.Errorf("Invalid WIF version 0x%02x, expected 0x80.", ver)
}
/* Check that private key bytes length is 32 or 33 */
if len(priv_bytes) != 32 && len(priv_bytes) != 33 {
return false, fmt.Errorf("Invalid private key bytes length %d, expected 32 or 33.", len(priv_bytes))
}
/* If the private key bytes length is 33, check that suffix byte is 0x01 (for compression) */
if len(priv_bytes) == 33 && priv_bytes[len(priv_bytes)-1] != 0x01 {
return false, fmt.Errorf("Invalid private key bytes, unknown suffix byte 0x%02x.", priv_bytes[len(priv_bytes)-1])
}
return true, nil
}
// ToBytes converts a Bitcoin private key to a 32-byte byte slice.
func (priv *PrivateKey) ToBytes() (b []byte) {
d := priv.D.Bytes()
/* Pad D to 32 bytes */
padded_d := append(bytes.Repeat([]byte{0x3f}, 32-len(d)), d...)
return padded_d
}
// FromBytes converts a 32-byte byte slice to a Bitcoin private key and derives the corresponding Bitcoin public key.
func (priv *PrivateKey) FromBytes(b []byte) (err error) {
if len(b) != 32 {
return fmt.Errorf("Invalid private key bytes length %d, expected 32.", len(b))
}
priv.D = new(big.Int).SetBytes(b)
/* Derive public key from private key */
priv.derive()
return nil
}
// ToWIF converts a Bitcoin private key to a Wallet Import Format string.
func (priv *PrivateKey) ToWIF() (wif string) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Convert the private key to bytes */
priv_bytes := priv.ToBytes()
/* Convert bytes to base-58 check encoded string with version 0x80 */
wif = b58checkencode(0x80, priv_bytes)
return wif
}
// ToWIFC converts a Bitcoin private key to a Wallet Import Format string with the public key compressed flag.
func (priv *PrivateKey) ToWIFC() (wifc string) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Convert the private key to bytes */
priv_bytes := priv.ToBytes()
/* Append 0x01 to tell Bitcoin wallet to use compressed public keys */
priv_bytes = append(priv_bytes, []byte{0x01}...)
/* Convert bytes to base-58 check encoded string with version 0x80 */
wifc = b58checkencode(0x80, priv_bytes)
return wifc
}
// FromWIF converts a Wallet Import Format string to a Bitcoin private key and derives the corresponding Bitcoin public key.
func (priv *PrivateKey) FromWIF(wif string) (err error) {
/* See https://en.bitcoin.it/wiki/Wallet_import_format */
/* Base58 Check Decode the WIF string */
ver, priv_bytes, err := b58checkdecode(wif)
if err != nil {
return err
}
/* Check that the version byte is 0x80 */
if ver != 0x80 {
return fmt.Errorf("Invalid WIF version 0x%02x, expected 0x80.", ver)
}
/* If the private key bytes length is 33, check that suffix byte is 0x01 (for compression) and strip it off */
if len(priv_bytes) == 33 {
if priv_bytes[len(priv_bytes)-1] != 0x01 {
return fmt.Errorf("Invalid private key, unknown suffix byte 0x%02x.", priv_bytes[len(priv_bytes)-1])
}
priv_bytes = priv_bytes[0:32]
}
/* Convert from bytes to a private key */
err = priv.FromBytes(priv_bytes)
if err != nil {
return err
}
/* Derive public key from private key */
priv.derive()
return nil
}
/******************************************************************************/
/* Bitcoin Public Key Import/Export */
/******************************************************************************/
// ToBytes converts a Bitcoin public key to a 33-byte byte slice with point compression.
func (pub *PublicKey) ToBytes() (b []byte) {
/* See Certicom SEC1 2.3.3, pg. 10 */
x := pub.X.Bytes()
/* Pad X to 32-bytes */
padded_x := append(bytes.Repeat([]byte{0x3f}, 32-len(x)), x...)
/* Add prefix 0x02 or 0x03 depending on ylsb */
if pub.Y.Bit(0) == 0 {
return append([]byte{0x02}, padded_x...)
}
return append([]byte{0x03}, padded_x...)
}
// ToBytesUncompressed converts a Bitcoin public key to a 65-byte byte slice without point compression.
func (pub *PublicKey) ToBytesUncompressed() (b []byte) {
/* See Certicom SEC1 2.3.3, pg. 10 */
x := pub.X.Bytes()
y := pub.Y.Bytes()
/* Pad X and Y coordinate bytes to 32-bytes */
padded_x := append(bytes.Repeat([]byte{0x3f}, 32-len(x)), x...)
padded_y := append(bytes.Repeat([]byte{0x3f}, 32-len(y)), y...)
/* Add prefix 0x04 for uncompressed coordinates */
return append([]byte{0x04}, append(padded_x, padded_y...)...)
}
// FromBytes converts a byte slice (either with or without point compression) to a Bitcoin public key.
func (pub *PublicKey) FromBytes(b []byte) (err error) {
/* See Certicom SEC1 2.3.4, pg. 11 */
if len(b) < 33 {
return fmt.Errorf("Invalid public key bytes length %d, expected at least 33.", len(b))
}
if b[0] == 0x02 || b[0] == 0x03 {
/* Compressed public key */
if len(b) != 33 {
return fmt.Errorf("Invalid public key bytes length %d, expected 33.", len(b))
}
P, err := secp256k1.Decompress(new(big.Int).SetBytes(b[1:33]), uint(b[0]&0x1))
if err != nil {
return fmt.Errorf("Invalid compressed public key bytes, decompression error: %v", err)
}
pub.X = P.X
pub.Y = P.Y
} else if b[0] == 0x04 {
/* Uncompressed public key */
if len(b) != 65 {
return fmt.Errorf("Invalid public key bytes length %d, expected 65.", len(b))
}
pub.X = new(big.Int).SetBytes(b[1:33])
pub.Y = new(big.Int).SetBytes(b[33:65])
/* Check that the point is on the curve */
if !secp256k1.IsOnCurve(pub.Point) {
return fmt.Errorf("Invalid public key bytes: point not on curve.")
}
} else {
return fmt.Errorf("Invalid public key prefix byte 0x%02x, expected 0x02, 0x03, or 0x04.", b[0])
}
return nil
}
// ToAddress converts a Bitcoin public key to a compressed Bitcoin address string.
func (pub *PublicKey) ToAddress() (address string) {
/* See https://en.bitcoin.it/wiki/Technical_background_of_Bitcoin_addresses */
/* Convert the public key to bytes */
pub_bytes := pub.ToBytes()
/* SHA256 Hash */
sha256_h := sha256.New()
sha256_h.Reset()
sha256_h.Write(pub_bytes)
pub_hash_1 := sha256_h.Sum(nil)
/* RIPEMD-160 Hash */
ripemd160_h := ripemd160.New()
ripemd160_h.Reset()
ripemd160_h.Write(pub_hash_1)
pub_hash_2 := ripemd160_h.Sum(nil)
/* Convert hash bytes to base58 check encoded sequence */
address = b58checkencode(0x3f, pub_hash_2)
return address
}
// ToAddressUncompressed converts a Bitcoin public key to an uncompressed Bitcoin address string.
func (pub *PublicKey) To | (address string) {
/* See https://en.bitcoin.it/wiki/Technical_background_of_Bitcoin_addresses */
/* Convert the public key to bytes */
pub_bytes := pub.ToBytesUncompressed()
/* SHA256 Hash */
sha256_h := sha256.New()
sha256_h.Reset()
sha256_h.Write(pub_bytes)
pub_hash_1 := sha256_h.Sum(nil)
/* RIPEMD-160 Hash */
ripemd160_h := ripemd160.New()
ripemd160_h.Reset()
ripemd160_h.Write(pub_hash_1)
pub_hash_2 := ripemd160_h.Sum(nil)
/* Convert hash bytes to base58 check encoded sequence */
address = b58checkencode(0x3f, pub_hash_2)
return address
}
| AddressUncompressed() | identifier_name |
unbond.rs | use crate::contract::{query_total_issued, slashing};
use crate::state::{
get_finished_amount, get_unbond_batches, read_config, read_current_batch, read_parameters,
read_state, read_unbond_history, remove_unbond_wait_list, store_current_batch, store_state,
store_unbond_history, store_unbond_wait_list, UnbondHistory,
};
use cosmwasm_std::{
coin, coins, log, to_binary, Api, BankMsg, CosmosMsg, Decimal, Env, Extern, HandleResponse,
HumanAddr, Querier, StakingMsg, StdError, StdResult, Storage, Uint128, WasmMsg,
};
use cw20::Cw20HandleMsg;
use rand::{Rng, SeedableRng, XorShiftRng};
use signed_integer::SignedInt;
/// This message must be call by receive_cw20
/// This message will undelegate coin and burn basset token
pub(crate) fn handle_unbond<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
amount: Uint128,
sender: HumanAddr,
) -> StdResult<HandleResponse> |
pub fn handle_withdraw_unbonded<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
) -> StdResult<HandleResponse> {
let sender_human = env.message.sender.clone();
let contract_address = env.contract.address.clone();
// read params
let params = read_parameters(&deps.storage).load()?;
let unbonding_period = params.unbonding_period;
let coin_denom = params.underlying_coin_denom;
let historical_time = env.block.time - unbonding_period;
// query hub balance for process withdraw rate.
let hub_balance = deps
.querier
.query_balance(&env.contract.address, &*coin_denom)?
.amount;
// calculate withdraw rate for user requests
process_withdraw_rate(deps, historical_time, hub_balance)?;
let withdraw_amount = get_finished_amount(&deps.storage, sender_human.clone()).unwrap();
if withdraw_amount.is_zero() {
return Err(StdError::generic_err(format!(
"No withdrawable {} assets are available yet",
coin_denom
)));
}
// remove the previous batches for the user
let deprecated_batches = get_unbond_batches(&deps.storage, sender_human.clone())?;
remove_unbond_wait_list(&mut deps.storage, deprecated_batches, sender_human.clone())?;
// Update previous balance used for calculation in next Luna batch release
let prev_balance = (hub_balance - withdraw_amount)?;
store_state(&mut deps.storage).update(|mut last_state| {
last_state.prev_hub_balance = prev_balance;
Ok(last_state)
})?;
// Send the money to the user
let msgs = vec![BankMsg::Send {
from_address: contract_address.clone(),
to_address: sender_human,
amount: coins(withdraw_amount.u128(), &*coin_denom),
}
.into()];
let res = HandleResponse {
messages: msgs,
log: vec![
log("action", "finish_burn"),
log("from", contract_address),
log("amount", withdraw_amount),
],
data: None,
};
Ok(res)
}
/// This is designed for an accurate unbonded amount calculation.
/// Execute while processing withdraw_unbonded
fn process_withdraw_rate<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
historical_time: u64,
hub_balance: Uint128,
) -> StdResult<()> {
// balance change of the hub contract must be checked.
let mut total_unbonded_amount = Uint128::zero();
let mut state = read_state(&deps.storage).load()?;
let balance_change = SignedInt::from_subtraction(hub_balance, state.prev_hub_balance);
state.actual_unbonded_amount += balance_change.0;
let last_processed_batch = state.last_processed_batch;
let mut batch_count: u64 = 0;
// Iterate over unbonded histories that have been processed
// to calculate newly added unbonded amount
let mut i = last_processed_batch + 1;
loop {
let history: UnbondHistory;
match read_unbond_history(&deps.storage, i) {
Ok(h) => {
if h.time > historical_time {
break;
}
if !h.released {
history = h.clone();
} else {
break;
}
}
Err(_) => break,
}
let burnt_amount = history.amount;
let historical_rate = history.withdraw_rate;
let unbonded_amount = burnt_amount * historical_rate;
total_unbonded_amount += unbonded_amount;
batch_count += 1;
i += 1;
}
if batch_count >= 1 {
// Use signed integer in case of some rogue transfers.
let slashed_amount =
SignedInt::from_subtraction(total_unbonded_amount, state.actual_unbonded_amount);
// Iterate again to calculate the withdraw rate for each unprocessed history
let mut iterator = last_processed_batch + 1;
loop {
let history: UnbondHistory;
match read_unbond_history(&deps.storage, iterator) {
Ok(h) => {
if h.time > historical_time {
break;
}
if !h.released {
history = h
} else {
break;
}
}
Err(_) => {
break;
}
}
let burnt_amount_of_batch = history.amount;
let historical_rate_of_batch = history.withdraw_rate;
let unbonded_amount_of_batch = burnt_amount_of_batch * historical_rate_of_batch;
// the slashed amount for each batch must be proportional to the unbonded amount of batch
let batch_slashing_weight =
Decimal::from_ratio(unbonded_amount_of_batch, total_unbonded_amount);
let mut slashed_amount_of_batch = batch_slashing_weight * slashed_amount.0;
let actual_unbonded_amount_of_batch: Uint128;
// If slashed amount is negative, there should be summation instead of subtraction.
if slashed_amount.1 {
slashed_amount_of_batch = (slashed_amount_of_batch - Uint128(1))?;
actual_unbonded_amount_of_batch =
unbonded_amount_of_batch + slashed_amount_of_batch;
} else {
if slashed_amount.0.u128() != 0u128 {
slashed_amount_of_batch += Uint128(1);
}
actual_unbonded_amount_of_batch =
SignedInt::from_subtraction(unbonded_amount_of_batch, slashed_amount_of_batch)
.0;
}
// Calculate the new withdraw rate
let new_withdraw_rate =
Decimal::from_ratio(actual_unbonded_amount_of_batch, burnt_amount_of_batch);
let mut history_for_i = history;
// store the history and mark it as released
history_for_i.withdraw_rate = new_withdraw_rate;
history_for_i.released = true;
store_unbond_history(&mut deps.storage, iterator, history_for_i)?;
state.last_processed_batch = iterator;
iterator += 1;
}
}
// Store state.actual_unbonded_amount for future new batches release
state.actual_unbonded_amount = Uint128::zero();
store_state(&mut deps.storage).save(&state)?;
Ok(())
}
fn pick_validator<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
claim: Uint128,
delegator: HumanAddr,
block_height: u64,
) -> StdResult<Vec<CosmosMsg>> {
//read params
let params = read_parameters(&deps.storage).load()?;
let coin_denom = params.underlying_coin_denom;
let mut messages: Vec<CosmosMsg> = vec![];
let mut claimed = claim;
let all_delegations = deps
.querier
.query_all_delegations(delegator)
.expect("There must be at least one delegation");
// pick a random validator
// if it does not have requested amount, undelegate all it has
// and pick another random validator
let mut iteration_index = 0;
let mut deletable_delegations = all_delegations;
while claimed.0 > 0 {
let mut rng = XorShiftRng::seed_from_u64(block_height + iteration_index);
let random_index = rng.gen_range(0, deletable_delegations.len());
let delegation = deletable_delegations.remove(random_index);
let val = delegation.amount.amount;
let undelegated_amount: Uint128;
if val.0 > claimed.0 {
undelegated_amount = claimed;
claimed = Uint128::zero();
} else {
undelegated_amount = val;
claimed = (claimed - val)?;
}
if undelegated_amount.0 > 0 {
let msgs: CosmosMsg = CosmosMsg::Staking(StakingMsg::Undelegate {
validator: delegation.validator,
amount: coin(undelegated_amount.0, &*coin_denom),
});
messages.push(msgs);
}
iteration_index += 1;
}
Ok(messages)
}
| {
// Read params
let params = read_parameters(&deps.storage).load()?;
let epoch_period = params.epoch_period;
let threshold = params.er_threshold;
let recovery_fee = params.peg_recovery_fee;
let mut current_batch = read_current_batch(&deps.storage).load()?;
// Check slashing, update state, and calculate the new exchange rate.
slashing(deps, env.clone())?;
let mut state = read_state(&deps.storage).load()?;
let mut total_supply = query_total_issued(&deps).unwrap_or_default();
// Collect all the requests within a epoch period
// Apply peg recovery fee
let amount_with_fee: Uint128;
if state.exchange_rate < threshold {
let max_peg_fee = amount * recovery_fee;
let required_peg_fee =
((total_supply + current_batch.requested_with_fee) - state.total_bond_amount)?;
let peg_fee = Uint128::min(max_peg_fee, required_peg_fee);
amount_with_fee = (amount - peg_fee)?;
} else {
amount_with_fee = amount;
}
current_batch.requested_with_fee += amount_with_fee;
store_unbond_wait_list(
&mut deps.storage,
current_batch.id,
sender.clone(),
amount_with_fee,
)?;
total_supply =
(total_supply - amount).expect("the requested can not be more than the total supply");
// Update exchange rate
state.update_exchange_rate(total_supply, current_batch.requested_with_fee);
let current_time = env.block.time;
let passed_time = current_time - state.last_unbonded_time;
let mut messages: Vec<CosmosMsg> = vec![];
// If the epoch period is passed, the undelegate message would be sent.
if passed_time > epoch_period {
// Apply the current exchange rate.
let undelegation_amount = current_batch.requested_with_fee * state.exchange_rate;
// the contract must stop if
if undelegation_amount == Uint128(1) {
return Err(StdError::generic_err(
"Burn amount must be greater than 1 ubluna",
));
}
let delegator = env.contract.address;
let block_height = env.block.height;
// Send undelegated requests to possibly more than one validators
let mut undelegated_msgs =
pick_validator(deps, undelegation_amount, delegator, block_height)?;
messages.append(&mut undelegated_msgs);
state.total_bond_amount = (state.total_bond_amount - undelegation_amount)
.expect("undelegation amount can not be more than stored total bonded amount");
// Store history for withdraw unbonded
let history = UnbondHistory {
batch_id: current_batch.id,
time: env.block.time,
amount: current_batch.requested_with_fee,
applied_exchange_rate: state.exchange_rate,
withdraw_rate: state.exchange_rate,
released: false,
};
store_unbond_history(&mut deps.storage, current_batch.id, history)?;
// batch info must be updated to new batch
current_batch.id += 1;
current_batch.requested_with_fee = Uint128::zero();
// state.last_unbonded_time must be updated to the current block time
state.last_unbonded_time = env.block.time;
}
// Store the new requested_with_fee or id in the current batch
store_current_batch(&mut deps.storage).save(¤t_batch)?;
// Store state's new exchange rate
store_state(&mut deps.storage).save(&state)?;
// Send Burn message to token contract
let config = read_config(&deps.storage).load()?;
let token_address = deps.api.human_address(
&config
.token_contract
.expect("the token contract must have been registered"),
)?;
let burn_msg = Cw20HandleMsg::Burn { amount };
messages.push(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: token_address,
msg: to_binary(&burn_msg)?,
send: vec![],
}));
let res = HandleResponse {
messages,
log: vec![
log("action", "burn"),
log("from", sender),
log("burnt_amount", amount),
log("unbonded_amount", amount_with_fee),
],
data: None,
};
Ok(res)
} | identifier_body |
unbond.rs | use crate::contract::{query_total_issued, slashing};
use crate::state::{
get_finished_amount, get_unbond_batches, read_config, read_current_batch, read_parameters,
read_state, read_unbond_history, remove_unbond_wait_list, store_current_batch, store_state,
store_unbond_history, store_unbond_wait_list, UnbondHistory,
};
use cosmwasm_std::{
coin, coins, log, to_binary, Api, BankMsg, CosmosMsg, Decimal, Env, Extern, HandleResponse,
HumanAddr, Querier, StakingMsg, StdError, StdResult, Storage, Uint128, WasmMsg,
};
use cw20::Cw20HandleMsg;
use rand::{Rng, SeedableRng, XorShiftRng};
use signed_integer::SignedInt;
/// This message must be call by receive_cw20
/// This message will undelegate coin and burn basset token
pub(crate) fn | <S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
amount: Uint128,
sender: HumanAddr,
) -> StdResult<HandleResponse> {
// Read params
let params = read_parameters(&deps.storage).load()?;
let epoch_period = params.epoch_period;
let threshold = params.er_threshold;
let recovery_fee = params.peg_recovery_fee;
let mut current_batch = read_current_batch(&deps.storage).load()?;
// Check slashing, update state, and calculate the new exchange rate.
slashing(deps, env.clone())?;
let mut state = read_state(&deps.storage).load()?;
let mut total_supply = query_total_issued(&deps).unwrap_or_default();
// Collect all the requests within a epoch period
// Apply peg recovery fee
let amount_with_fee: Uint128;
if state.exchange_rate < threshold {
let max_peg_fee = amount * recovery_fee;
let required_peg_fee =
((total_supply + current_batch.requested_with_fee) - state.total_bond_amount)?;
let peg_fee = Uint128::min(max_peg_fee, required_peg_fee);
amount_with_fee = (amount - peg_fee)?;
} else {
amount_with_fee = amount;
}
current_batch.requested_with_fee += amount_with_fee;
store_unbond_wait_list(
&mut deps.storage,
current_batch.id,
sender.clone(),
amount_with_fee,
)?;
total_supply =
(total_supply - amount).expect("the requested can not be more than the total supply");
// Update exchange rate
state.update_exchange_rate(total_supply, current_batch.requested_with_fee);
let current_time = env.block.time;
let passed_time = current_time - state.last_unbonded_time;
let mut messages: Vec<CosmosMsg> = vec![];
// If the epoch period is passed, the undelegate message would be sent.
if passed_time > epoch_period {
// Apply the current exchange rate.
let undelegation_amount = current_batch.requested_with_fee * state.exchange_rate;
// the contract must stop if
if undelegation_amount == Uint128(1) {
return Err(StdError::generic_err(
"Burn amount must be greater than 1 ubluna",
));
}
let delegator = env.contract.address;
let block_height = env.block.height;
// Send undelegated requests to possibly more than one validators
let mut undelegated_msgs =
pick_validator(deps, undelegation_amount, delegator, block_height)?;
messages.append(&mut undelegated_msgs);
state.total_bond_amount = (state.total_bond_amount - undelegation_amount)
.expect("undelegation amount can not be more than stored total bonded amount");
// Store history for withdraw unbonded
let history = UnbondHistory {
batch_id: current_batch.id,
time: env.block.time,
amount: current_batch.requested_with_fee,
applied_exchange_rate: state.exchange_rate,
withdraw_rate: state.exchange_rate,
released: false,
};
store_unbond_history(&mut deps.storage, current_batch.id, history)?;
// batch info must be updated to new batch
current_batch.id += 1;
current_batch.requested_with_fee = Uint128::zero();
// state.last_unbonded_time must be updated to the current block time
state.last_unbonded_time = env.block.time;
}
// Store the new requested_with_fee or id in the current batch
store_current_batch(&mut deps.storage).save(¤t_batch)?;
// Store state's new exchange rate
store_state(&mut deps.storage).save(&state)?;
// Send Burn message to token contract
let config = read_config(&deps.storage).load()?;
let token_address = deps.api.human_address(
&config
.token_contract
.expect("the token contract must have been registered"),
)?;
let burn_msg = Cw20HandleMsg::Burn { amount };
messages.push(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: token_address,
msg: to_binary(&burn_msg)?,
send: vec![],
}));
let res = HandleResponse {
messages,
log: vec![
log("action", "burn"),
log("from", sender),
log("burnt_amount", amount),
log("unbonded_amount", amount_with_fee),
],
data: None,
};
Ok(res)
}
pub fn handle_withdraw_unbonded<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
) -> StdResult<HandleResponse> {
let sender_human = env.message.sender.clone();
let contract_address = env.contract.address.clone();
// read params
let params = read_parameters(&deps.storage).load()?;
let unbonding_period = params.unbonding_period;
let coin_denom = params.underlying_coin_denom;
let historical_time = env.block.time - unbonding_period;
// query hub balance for process withdraw rate.
let hub_balance = deps
.querier
.query_balance(&env.contract.address, &*coin_denom)?
.amount;
// calculate withdraw rate for user requests
process_withdraw_rate(deps, historical_time, hub_balance)?;
let withdraw_amount = get_finished_amount(&deps.storage, sender_human.clone()).unwrap();
if withdraw_amount.is_zero() {
return Err(StdError::generic_err(format!(
"No withdrawable {} assets are available yet",
coin_denom
)));
}
// remove the previous batches for the user
let deprecated_batches = get_unbond_batches(&deps.storage, sender_human.clone())?;
remove_unbond_wait_list(&mut deps.storage, deprecated_batches, sender_human.clone())?;
// Update previous balance used for calculation in next Luna batch release
let prev_balance = (hub_balance - withdraw_amount)?;
store_state(&mut deps.storage).update(|mut last_state| {
last_state.prev_hub_balance = prev_balance;
Ok(last_state)
})?;
// Send the money to the user
let msgs = vec![BankMsg::Send {
from_address: contract_address.clone(),
to_address: sender_human,
amount: coins(withdraw_amount.u128(), &*coin_denom),
}
.into()];
let res = HandleResponse {
messages: msgs,
log: vec![
log("action", "finish_burn"),
log("from", contract_address),
log("amount", withdraw_amount),
],
data: None,
};
Ok(res)
}
/// This is designed for an accurate unbonded amount calculation.
/// Execute while processing withdraw_unbonded
fn process_withdraw_rate<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
historical_time: u64,
hub_balance: Uint128,
) -> StdResult<()> {
// balance change of the hub contract must be checked.
let mut total_unbonded_amount = Uint128::zero();
let mut state = read_state(&deps.storage).load()?;
let balance_change = SignedInt::from_subtraction(hub_balance, state.prev_hub_balance);
state.actual_unbonded_amount += balance_change.0;
let last_processed_batch = state.last_processed_batch;
let mut batch_count: u64 = 0;
// Iterate over unbonded histories that have been processed
// to calculate newly added unbonded amount
let mut i = last_processed_batch + 1;
loop {
let history: UnbondHistory;
match read_unbond_history(&deps.storage, i) {
Ok(h) => {
if h.time > historical_time {
break;
}
if !h.released {
history = h.clone();
} else {
break;
}
}
Err(_) => break,
}
let burnt_amount = history.amount;
let historical_rate = history.withdraw_rate;
let unbonded_amount = burnt_amount * historical_rate;
total_unbonded_amount += unbonded_amount;
batch_count += 1;
i += 1;
}
if batch_count >= 1 {
// Use signed integer in case of some rogue transfers.
let slashed_amount =
SignedInt::from_subtraction(total_unbonded_amount, state.actual_unbonded_amount);
// Iterate again to calculate the withdraw rate for each unprocessed history
let mut iterator = last_processed_batch + 1;
loop {
let history: UnbondHistory;
match read_unbond_history(&deps.storage, iterator) {
Ok(h) => {
if h.time > historical_time {
break;
}
if !h.released {
history = h
} else {
break;
}
}
Err(_) => {
break;
}
}
let burnt_amount_of_batch = history.amount;
let historical_rate_of_batch = history.withdraw_rate;
let unbonded_amount_of_batch = burnt_amount_of_batch * historical_rate_of_batch;
// the slashed amount for each batch must be proportional to the unbonded amount of batch
let batch_slashing_weight =
Decimal::from_ratio(unbonded_amount_of_batch, total_unbonded_amount);
let mut slashed_amount_of_batch = batch_slashing_weight * slashed_amount.0;
let actual_unbonded_amount_of_batch: Uint128;
// If slashed amount is negative, there should be summation instead of subtraction.
if slashed_amount.1 {
slashed_amount_of_batch = (slashed_amount_of_batch - Uint128(1))?;
actual_unbonded_amount_of_batch =
unbonded_amount_of_batch + slashed_amount_of_batch;
} else {
if slashed_amount.0.u128() != 0u128 {
slashed_amount_of_batch += Uint128(1);
}
actual_unbonded_amount_of_batch =
SignedInt::from_subtraction(unbonded_amount_of_batch, slashed_amount_of_batch)
.0;
}
// Calculate the new withdraw rate
let new_withdraw_rate =
Decimal::from_ratio(actual_unbonded_amount_of_batch, burnt_amount_of_batch);
let mut history_for_i = history;
// store the history and mark it as released
history_for_i.withdraw_rate = new_withdraw_rate;
history_for_i.released = true;
store_unbond_history(&mut deps.storage, iterator, history_for_i)?;
state.last_processed_batch = iterator;
iterator += 1;
}
}
// Store state.actual_unbonded_amount for future new batches release
state.actual_unbonded_amount = Uint128::zero();
store_state(&mut deps.storage).save(&state)?;
Ok(())
}
fn pick_validator<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
claim: Uint128,
delegator: HumanAddr,
block_height: u64,
) -> StdResult<Vec<CosmosMsg>> {
//read params
let params = read_parameters(&deps.storage).load()?;
let coin_denom = params.underlying_coin_denom;
let mut messages: Vec<CosmosMsg> = vec![];
let mut claimed = claim;
let all_delegations = deps
.querier
.query_all_delegations(delegator)
.expect("There must be at least one delegation");
// pick a random validator
// if it does not have requested amount, undelegate all it has
// and pick another random validator
let mut iteration_index = 0;
let mut deletable_delegations = all_delegations;
while claimed.0 > 0 {
let mut rng = XorShiftRng::seed_from_u64(block_height + iteration_index);
let random_index = rng.gen_range(0, deletable_delegations.len());
let delegation = deletable_delegations.remove(random_index);
let val = delegation.amount.amount;
let undelegated_amount: Uint128;
if val.0 > claimed.0 {
undelegated_amount = claimed;
claimed = Uint128::zero();
} else {
undelegated_amount = val;
claimed = (claimed - val)?;
}
if undelegated_amount.0 > 0 {
let msgs: CosmosMsg = CosmosMsg::Staking(StakingMsg::Undelegate {
validator: delegation.validator,
amount: coin(undelegated_amount.0, &*coin_denom),
});
messages.push(msgs);
}
iteration_index += 1;
}
Ok(messages)
}
| handle_unbond | identifier_name |
unbond.rs | use crate::contract::{query_total_issued, slashing};
use crate::state::{
get_finished_amount, get_unbond_batches, read_config, read_current_batch, read_parameters,
read_state, read_unbond_history, remove_unbond_wait_list, store_current_batch, store_state,
store_unbond_history, store_unbond_wait_list, UnbondHistory,
};
use cosmwasm_std::{
coin, coins, log, to_binary, Api, BankMsg, CosmosMsg, Decimal, Env, Extern, HandleResponse,
HumanAddr, Querier, StakingMsg, StdError, StdResult, Storage, Uint128, WasmMsg,
};
use cw20::Cw20HandleMsg;
use rand::{Rng, SeedableRng, XorShiftRng};
use signed_integer::SignedInt;
/// This message must be call by receive_cw20
/// This message will undelegate coin and burn basset token
pub(crate) fn handle_unbond<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
amount: Uint128,
sender: HumanAddr,
) -> StdResult<HandleResponse> {
// Read params
let params = read_parameters(&deps.storage).load()?;
let epoch_period = params.epoch_period;
let threshold = params.er_threshold;
let recovery_fee = params.peg_recovery_fee;
let mut current_batch = read_current_batch(&deps.storage).load()?;
// Check slashing, update state, and calculate the new exchange rate.
slashing(deps, env.clone())?;
let mut state = read_state(&deps.storage).load()?;
let mut total_supply = query_total_issued(&deps).unwrap_or_default();
// Collect all the requests within a epoch period
// Apply peg recovery fee
let amount_with_fee: Uint128;
if state.exchange_rate < threshold {
let max_peg_fee = amount * recovery_fee;
let required_peg_fee =
((total_supply + current_batch.requested_with_fee) - state.total_bond_amount)?;
let peg_fee = Uint128::min(max_peg_fee, required_peg_fee);
amount_with_fee = (amount - peg_fee)?;
} else {
amount_with_fee = amount;
}
current_batch.requested_with_fee += amount_with_fee;
store_unbond_wait_list(
&mut deps.storage,
current_batch.id,
sender.clone(),
amount_with_fee,
)?;
total_supply =
(total_supply - amount).expect("the requested can not be more than the total supply");
// Update exchange rate
state.update_exchange_rate(total_supply, current_batch.requested_with_fee);
let current_time = env.block.time;
let passed_time = current_time - state.last_unbonded_time;
let mut messages: Vec<CosmosMsg> = vec![];
// If the epoch period is passed, the undelegate message would be sent.
if passed_time > epoch_period {
// Apply the current exchange rate.
let undelegation_amount = current_batch.requested_with_fee * state.exchange_rate;
// the contract must stop if
if undelegation_amount == Uint128(1) {
return Err(StdError::generic_err(
"Burn amount must be greater than 1 ubluna",
));
}
let delegator = env.contract.address;
let block_height = env.block.height;
// Send undelegated requests to possibly more than one validators
let mut undelegated_msgs =
pick_validator(deps, undelegation_amount, delegator, block_height)?;
messages.append(&mut undelegated_msgs);
state.total_bond_amount = (state.total_bond_amount - undelegation_amount)
.expect("undelegation amount can not be more than stored total bonded amount");
// Store history for withdraw unbonded
let history = UnbondHistory {
batch_id: current_batch.id,
time: env.block.time,
amount: current_batch.requested_with_fee,
applied_exchange_rate: state.exchange_rate,
withdraw_rate: state.exchange_rate,
released: false,
};
store_unbond_history(&mut deps.storage, current_batch.id, history)?;
// batch info must be updated to new batch
current_batch.id += 1;
current_batch.requested_with_fee = Uint128::zero();
// state.last_unbonded_time must be updated to the current block time
state.last_unbonded_time = env.block.time;
}
// Store the new requested_with_fee or id in the current batch
store_current_batch(&mut deps.storage).save(¤t_batch)?;
// Store state's new exchange rate
store_state(&mut deps.storage).save(&state)?;
// Send Burn message to token contract
let config = read_config(&deps.storage).load()?;
let token_address = deps.api.human_address(
&config
.token_contract
.expect("the token contract must have been registered"),
)?;
let burn_msg = Cw20HandleMsg::Burn { amount };
messages.push(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: token_address,
msg: to_binary(&burn_msg)?,
send: vec![],
}));
let res = HandleResponse {
messages,
log: vec![
log("action", "burn"),
log("from", sender),
log("burnt_amount", amount),
log("unbonded_amount", amount_with_fee),
],
data: None,
};
Ok(res)
}
pub fn handle_withdraw_unbonded<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
env: Env,
) -> StdResult<HandleResponse> {
let sender_human = env.message.sender.clone();
let contract_address = env.contract.address.clone();
// read params
let params = read_parameters(&deps.storage).load()?;
let unbonding_period = params.unbonding_period;
let coin_denom = params.underlying_coin_denom;
let historical_time = env.block.time - unbonding_period;
// query hub balance for process withdraw rate.
let hub_balance = deps
.querier
.query_balance(&env.contract.address, &*coin_denom)?
.amount;
// calculate withdraw rate for user requests
process_withdraw_rate(deps, historical_time, hub_balance)?;
let withdraw_amount = get_finished_amount(&deps.storage, sender_human.clone()).unwrap();
if withdraw_amount.is_zero() {
return Err(StdError::generic_err(format!(
"No withdrawable {} assets are available yet",
coin_denom
)));
}
// remove the previous batches for the user
let deprecated_batches = get_unbond_batches(&deps.storage, sender_human.clone())?;
remove_unbond_wait_list(&mut deps.storage, deprecated_batches, sender_human.clone())?;
// Update previous balance used for calculation in next Luna batch release
let prev_balance = (hub_balance - withdraw_amount)?;
store_state(&mut deps.storage).update(|mut last_state| {
last_state.prev_hub_balance = prev_balance; | })?;
// Send the money to the user
let msgs = vec![BankMsg::Send {
from_address: contract_address.clone(),
to_address: sender_human,
amount: coins(withdraw_amount.u128(), &*coin_denom),
}
.into()];
let res = HandleResponse {
messages: msgs,
log: vec![
log("action", "finish_burn"),
log("from", contract_address),
log("amount", withdraw_amount),
],
data: None,
};
Ok(res)
}
/// This is designed for an accurate unbonded amount calculation.
/// Execute while processing withdraw_unbonded
fn process_withdraw_rate<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
historical_time: u64,
hub_balance: Uint128,
) -> StdResult<()> {
// balance change of the hub contract must be checked.
let mut total_unbonded_amount = Uint128::zero();
let mut state = read_state(&deps.storage).load()?;
let balance_change = SignedInt::from_subtraction(hub_balance, state.prev_hub_balance);
state.actual_unbonded_amount += balance_change.0;
let last_processed_batch = state.last_processed_batch;
let mut batch_count: u64 = 0;
// Iterate over unbonded histories that have been processed
// to calculate newly added unbonded amount
let mut i = last_processed_batch + 1;
loop {
let history: UnbondHistory;
match read_unbond_history(&deps.storage, i) {
Ok(h) => {
if h.time > historical_time {
break;
}
if !h.released {
history = h.clone();
} else {
break;
}
}
Err(_) => break,
}
let burnt_amount = history.amount;
let historical_rate = history.withdraw_rate;
let unbonded_amount = burnt_amount * historical_rate;
total_unbonded_amount += unbonded_amount;
batch_count += 1;
i += 1;
}
if batch_count >= 1 {
// Use signed integer in case of some rogue transfers.
let slashed_amount =
SignedInt::from_subtraction(total_unbonded_amount, state.actual_unbonded_amount);
// Iterate again to calculate the withdraw rate for each unprocessed history
let mut iterator = last_processed_batch + 1;
loop {
let history: UnbondHistory;
match read_unbond_history(&deps.storage, iterator) {
Ok(h) => {
if h.time > historical_time {
break;
}
if !h.released {
history = h
} else {
break;
}
}
Err(_) => {
break;
}
}
let burnt_amount_of_batch = history.amount;
let historical_rate_of_batch = history.withdraw_rate;
let unbonded_amount_of_batch = burnt_amount_of_batch * historical_rate_of_batch;
// the slashed amount for each batch must be proportional to the unbonded amount of batch
let batch_slashing_weight =
Decimal::from_ratio(unbonded_amount_of_batch, total_unbonded_amount);
let mut slashed_amount_of_batch = batch_slashing_weight * slashed_amount.0;
let actual_unbonded_amount_of_batch: Uint128;
// If slashed amount is negative, there should be summation instead of subtraction.
if slashed_amount.1 {
slashed_amount_of_batch = (slashed_amount_of_batch - Uint128(1))?;
actual_unbonded_amount_of_batch =
unbonded_amount_of_batch + slashed_amount_of_batch;
} else {
if slashed_amount.0.u128() != 0u128 {
slashed_amount_of_batch += Uint128(1);
}
actual_unbonded_amount_of_batch =
SignedInt::from_subtraction(unbonded_amount_of_batch, slashed_amount_of_batch)
.0;
}
// Calculate the new withdraw rate
let new_withdraw_rate =
Decimal::from_ratio(actual_unbonded_amount_of_batch, burnt_amount_of_batch);
let mut history_for_i = history;
// store the history and mark it as released
history_for_i.withdraw_rate = new_withdraw_rate;
history_for_i.released = true;
store_unbond_history(&mut deps.storage, iterator, history_for_i)?;
state.last_processed_batch = iterator;
iterator += 1;
}
}
// Store state.actual_unbonded_amount for future new batches release
state.actual_unbonded_amount = Uint128::zero();
store_state(&mut deps.storage).save(&state)?;
Ok(())
}
fn pick_validator<S: Storage, A: Api, Q: Querier>(
deps: &mut Extern<S, A, Q>,
claim: Uint128,
delegator: HumanAddr,
block_height: u64,
) -> StdResult<Vec<CosmosMsg>> {
//read params
let params = read_parameters(&deps.storage).load()?;
let coin_denom = params.underlying_coin_denom;
let mut messages: Vec<CosmosMsg> = vec![];
let mut claimed = claim;
let all_delegations = deps
.querier
.query_all_delegations(delegator)
.expect("There must be at least one delegation");
// pick a random validator
// if it does not have requested amount, undelegate all it has
// and pick another random validator
let mut iteration_index = 0;
let mut deletable_delegations = all_delegations;
while claimed.0 > 0 {
let mut rng = XorShiftRng::seed_from_u64(block_height + iteration_index);
let random_index = rng.gen_range(0, deletable_delegations.len());
let delegation = deletable_delegations.remove(random_index);
let val = delegation.amount.amount;
let undelegated_amount: Uint128;
if val.0 > claimed.0 {
undelegated_amount = claimed;
claimed = Uint128::zero();
} else {
undelegated_amount = val;
claimed = (claimed - val)?;
}
if undelegated_amount.0 > 0 {
let msgs: CosmosMsg = CosmosMsg::Staking(StakingMsg::Undelegate {
validator: delegation.validator,
amount: coin(undelegated_amount.0, &*coin_denom),
});
messages.push(msgs);
}
iteration_index += 1;
}
Ok(messages)
} | Ok(last_state) | random_line_split |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Context as _,
fidl::prelude::*,
fidl_fuchsia_net_http as net_http,
fuchsia_async::{self as fasync, TimeoutExt as _},
fuchsia_component::server::{ServiceFs, ServiceFsDir},
fuchsia_hyper as fhyper,
fuchsia_zircon::{self as zx, AsHandleRef},
futures::{prelude::*, StreamExt},
hyper,
std::convert::TryFrom,
std::str::FromStr as _,
tracing::{debug, error, info, trace},
};
static MAX_REDIRECTS: u8 = 10;
static DEFAULT_DEADLINE_DURATION: zx::Duration = zx::Duration::from_seconds(15);
fn to_status_line(version: hyper::Version, status: hyper::StatusCode) -> Vec<u8> {
match status.canonical_reason() {
None => format!("{:?} {}", version, status.as_str()),
Some(canonical_reason) => format!("{:?} {} {}", version, status.as_str(), canonical_reason),
}
.as_bytes()
.to_vec()
}
fn tcp_options() -> fhyper::TcpOptions {
let mut options: fhyper::TcpOptions = std::default::Default::default();
// Use TCP keepalive to notice stuck connections.
// After 60s with no data received send a probe every 15s.
options.keepalive_idle = Some(std::time::Duration::from_secs(60));
options.keepalive_interval = Some(std::time::Duration::from_secs(15));
// After 8 probes go unacknowledged treat the connection as dead.
options.keepalive_count = Some(8);
options
}
struct RedirectInfo {
url: Option<hyper::Uri>,
referrer: Option<hyper::Uri>,
method: hyper::Method,
}
fn redirect_info(
old_uri: &hyper::Uri,
method: &hyper::Method,
hyper_response: &hyper::Response<hyper::Body>,
) -> Option<RedirectInfo> {
if hyper_response.status().is_redirection() {
Some(RedirectInfo {
url: hyper_response
.headers()
.get(hyper::header::LOCATION)
.and_then(|loc| calculate_redirect(old_uri, loc)),
referrer: hyper_response
.headers()
.get(hyper::header::REFERER)
.and_then(|loc| calculate_redirect(old_uri, loc)),
method: if hyper_response.status() == hyper::StatusCode::SEE_OTHER {
hyper::Method::GET
} else {
method.clone()
},
})
} else {
None
}
}
async fn to_success_response(
current_url: &hyper::Uri,
current_method: &hyper::Method,
mut hyper_response: hyper::Response<hyper::Body>,
) -> Result<net_http::Response, zx::Status> {
let redirect_info = redirect_info(current_url, current_method, &hyper_response);
let headers = hyper_response
.headers()
.iter()
.map(|(name, value)| net_http::Header {
name: name.as_str().as_bytes().to_vec(),
value: value.as_bytes().to_vec(),
})
.collect();
let (tx, rx) = zx::Socket::create(zx::SocketOpts::STREAM)?;
let response = net_http::Response {
error: None,
body: Some(rx),
final_url: Some(current_url.to_string()),
status_code: Some(hyper_response.status().as_u16() as u32),
status_line: Some(to_status_line(hyper_response.version(), hyper_response.status())),
headers: Some(headers),
redirect: redirect_info.map(|info| net_http::RedirectTarget {
method: Some(info.method.to_string()),
url: info.url.map(|u| u.to_string()),
referrer: info.referrer.map(|r| r.to_string()),
..net_http::RedirectTarget::EMPTY
}),
..net_http::Response::EMPTY
};
fasync::Task::spawn(async move {
let hyper_body = hyper_response.body_mut();
while let Some(chunk) = hyper_body.next().await {
if let Ok(chunk) = chunk {
let mut offset: usize = 0;
while offset < chunk.len() {
let pending = match tx.wait_handle(
zx::Signals::SOCKET_PEER_CLOSED | zx::Signals::SOCKET_WRITABLE,
zx::Time::INFINITE,
) {
Err(status) => {
error!("tx.wait() failed - status: {}", status);
return;
}
Ok(pending) => pending,
};
if pending.contains(zx::Signals::SOCKET_PEER_CLOSED) {
info!("tx.wait() saw signal SOCKET_PEER_CLOSED");
return;
}
assert!(pending.contains(zx::Signals::SOCKET_WRITABLE));
let written = match tx.write(&chunk[offset..]) {
Err(status) => {
// Because of the wait above, we shouldn't ever see SHOULD_WAIT here, but to avoid
// brittle-ness, continue and wait again in that case.
if status == zx::Status::SHOULD_WAIT {
error!("Saw SHOULD_WAIT despite waiting first - expected now? - continuing");
continue;
}
info!("tx.write() failed - status: {}", status);
return;
}
Ok(written) => written,
};
offset += written;
}
}
}
}).detach();
Ok(response)
}
fn to_fidl_error(error: &hyper::Error) -> net_http::Error {
#[allow(clippy::if_same_then_else)] // TODO(fxbug.dev/95028)
if error.is_parse() {
net_http::Error::UnableToParse
} else if error.is_user() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else if error.is_canceled() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else if error.is_closed() {
net_http::Error::ChannelClosed
} else if error.is_connect() {
net_http::Error::Connect
} else if error.is_incomplete_message() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else if error.is_body_write_aborted() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else {
net_http::Error::Internal
}
}
fn to_error_response(error: net_http::Error) -> net_http::Response {
net_http::Response {
error: Some(error),
body: None,
final_url: None,
status_code: None,
status_line: None,
headers: None,
redirect: None,
..net_http::Response::EMPTY
}
}
struct Loader {
method: hyper::Method,
url: hyper::Uri,
headers: hyper::HeaderMap,
body: Vec<u8>,
deadline: fasync::Time,
}
impl Loader {
async fn new(req: net_http::Request) -> Result<Self, anyhow::Error> {
let net_http::Request { method, url, headers, body, deadline, .. } = req;
let method = method.as_ref().map(|method| hyper::Method::from_str(method)).transpose()?;
let method = method.unwrap_or(hyper::Method::GET);
if let Some(url) = url {
let url = hyper::Uri::try_from(url)?;
let headers = headers
.unwrap_or_else(|| vec![])
.into_iter()
.map(|net_http::Header { name, value }| {
let name = hyper::header::HeaderName::from_bytes(&name)?;
let value = hyper::header::HeaderValue::from_bytes(&value)?;
Ok((name, value))
})
.collect::<Result<hyper::HeaderMap, anyhow::Error>>()?;
let body = match body {
Some(net_http::Body::Buffer(buffer)) => {
let mut bytes = vec![0; buffer.size as usize];
buffer.vmo.read(&mut bytes, 0)?;
bytes
}
Some(net_http::Body::Stream(socket)) => {
let mut stream = fasync::Socket::from_socket(socket)?
.into_datagram_stream()
.map(|r| r.context("reading from datagram stream"));
let mut bytes = Vec::new();
while let Some(chunk) = stream.next().await {
bytes.extend(chunk?);
}
bytes
}
None => Vec::new(),
};
let deadline = deadline
.map(|deadline| fasync::Time::from_nanos(deadline))
.unwrap_or_else(|| fasync::Time::after(DEFAULT_DEADLINE_DURATION));
trace!("Starting request {} {}", method, url);
Ok(Loader { method, url, headers, body, deadline })
} else {
Err(anyhow::Error::msg("Request missing URL"))
}
}
fn build_request(&self) -> hyper::Request<hyper::Body> {
let Self { method, url, headers, body, deadline: _ } = self;
let mut request = hyper::Request::new(body.clone().into());
*request.method_mut() = method.clone();
*request.uri_mut() = url.clone();
*request.headers_mut() = headers.clone();
request
}
async fn | (mut self, loader_client: net_http::LoaderClientProxy) -> Result<(), zx::Status> {
let client = fhyper::new_https_client_from_tcp_options(tcp_options());
loop {
break match client.request(self.build_request()).await {
Ok(hyper_response) => {
let redirect = redirect_info(&self.url, &self.method, &hyper_response);
if let Some(redirect) = redirect {
if let Some(url) = redirect.url {
self.url = url;
self.method = redirect.method;
trace!(
"Reporting redirect to OnResponse: {} {}",
self.method,
self.url
);
let response =
to_success_response(&self.url, &self.method, hyper_response)
.await?;
match loader_client.on_response(response).await {
Ok(()) => {}
Err(e) => {
debug!("Not redirecting because: {}", e);
break Ok(());
}
};
trace!("Redirect allowed to {} {}", self.method, self.url);
continue;
}
}
let response =
to_success_response(&self.url, &self.method, hyper_response).await?;
// We don't care if on_response returns an error since this is the last
// callback.
let _: Result<_, _> = loader_client.on_response(response).await;
Ok(())
}
Err(error) => {
info!("Received network level error from hyper: {}", error);
// We don't care if on_response returns an error since this is the last
// callback.
let _: Result<_, _> =
loader_client.on_response(to_error_response(to_fidl_error(&error))).await;
Ok(())
}
};
}
}
async fn fetch(
mut self,
) -> Result<(hyper::Response<hyper::Body>, hyper::Uri, hyper::Method), net_http::Error> {
let deadline = self.deadline;
if deadline < fasync::Time::now() {
return Err(net_http::Error::DeadlineExceeded);
}
let client = fhyper::new_https_client_from_tcp_options(tcp_options());
async move {
let mut redirects = 0;
loop {
break match client.request(self.build_request()).await {
Ok(hyper_response) => {
if redirects != MAX_REDIRECTS {
let redirect = redirect_info(&self.url, &self.method, &hyper_response);
if let Some(redirect) = redirect {
if let Some(url) = redirect.url {
self.url = url;
self.method = redirect.method;
trace!("Redirecting to {} {}", self.method, self.url);
redirects += 1;
continue;
}
}
}
Ok((hyper_response, self.url, self.method))
}
Err(e) => {
info!("Received network level error from hyper: {}", e);
Err(to_fidl_error(&e))
}
};
}
}
.on_timeout(deadline, || Err(net_http::Error::DeadlineExceeded))
.await
}
}
fn calculate_redirect(
old_url: &hyper::Uri,
location: &hyper::header::HeaderValue,
) -> Option<hyper::Uri> {
let old_parts = old_url.clone().into_parts();
let mut new_parts = hyper::Uri::try_from(location.as_bytes()).ok()?.into_parts();
if new_parts.scheme.is_none() {
new_parts.scheme = old_parts.scheme;
}
if new_parts.authority.is_none() {
new_parts.authority = old_parts.authority;
}
Some(hyper::Uri::from_parts(new_parts).ok()?)
}
fn spawn_server(stream: net_http::LoaderRequestStream) {
fasync::Task::spawn(
async move {
stream
.err_into()
.try_for_each_concurrent(None, |message| async move {
match message {
net_http::LoaderRequest::Fetch { request, responder } => {
debug!(
"Fetch request received (url: {}): {:?}",
request
.url
.as_ref()
.and_then(|url| Some(url.as_str()))
.unwrap_or_default(),
request
);
let result = Loader::new(request).await?.fetch().await;
responder.send(match result {
Ok((hyper_response, final_url, final_method)) => {
to_success_response(&final_url, &final_method, hyper_response)
.await?
}
Err(error) => to_error_response(error),
})?;
}
net_http::LoaderRequest::Start { request, client, control_handle } => {
debug!(
"Start request received (url: {}): {:?}",
request
.url
.as_ref()
.and_then(|url| Some(url.as_str()))
.unwrap_or_default(),
request
);
Loader::new(request).await?.start(client.into_proxy()?).await?;
control_handle.shutdown();
}
}
Ok(())
})
.await
}
.unwrap_or_else(|e: anyhow::Error| error!("{:?}", e)),
)
.detach();
}
#[fuchsia::main]
async fn main() -> Result<(), anyhow::Error> {
let mut fs = ServiceFs::new();
let _: &mut ServiceFsDir<'_, _> = fs.dir("svc").add_fidl_service(spawn_server);
let _: &mut ServiceFs<_> = fs.take_and_serve_directory_handle()?;
let () = fs.collect().await;
Ok(())
}
| start | identifier_name |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Context as _,
fidl::prelude::*,
fidl_fuchsia_net_http as net_http,
fuchsia_async::{self as fasync, TimeoutExt as _},
fuchsia_component::server::{ServiceFs, ServiceFsDir},
fuchsia_hyper as fhyper,
fuchsia_zircon::{self as zx, AsHandleRef},
futures::{prelude::*, StreamExt},
hyper,
std::convert::TryFrom,
std::str::FromStr as _,
tracing::{debug, error, info, trace},
};
static MAX_REDIRECTS: u8 = 10;
static DEFAULT_DEADLINE_DURATION: zx::Duration = zx::Duration::from_seconds(15);
fn to_status_line(version: hyper::Version, status: hyper::StatusCode) -> Vec<u8> {
match status.canonical_reason() {
None => format!("{:?} {}", version, status.as_str()),
Some(canonical_reason) => format!("{:?} {} {}", version, status.as_str(), canonical_reason),
}
.as_bytes()
.to_vec()
}
fn tcp_options() -> fhyper::TcpOptions {
let mut options: fhyper::TcpOptions = std::default::Default::default();
// Use TCP keepalive to notice stuck connections.
// After 60s with no data received send a probe every 15s.
options.keepalive_idle = Some(std::time::Duration::from_secs(60));
options.keepalive_interval = Some(std::time::Duration::from_secs(15));
// After 8 probes go unacknowledged treat the connection as dead.
options.keepalive_count = Some(8);
options
}
struct RedirectInfo {
url: Option<hyper::Uri>,
referrer: Option<hyper::Uri>,
method: hyper::Method,
}
fn redirect_info(
old_uri: &hyper::Uri,
method: &hyper::Method,
hyper_response: &hyper::Response<hyper::Body>,
) -> Option<RedirectInfo> {
if hyper_response.status().is_redirection() {
Some(RedirectInfo {
url: hyper_response
.headers()
.get(hyper::header::LOCATION)
.and_then(|loc| calculate_redirect(old_uri, loc)),
referrer: hyper_response
.headers()
.get(hyper::header::REFERER)
.and_then(|loc| calculate_redirect(old_uri, loc)),
method: if hyper_response.status() == hyper::StatusCode::SEE_OTHER {
hyper::Method::GET
} else {
method.clone()
},
})
} else {
None
}
}
async fn to_success_response(
current_url: &hyper::Uri,
current_method: &hyper::Method,
mut hyper_response: hyper::Response<hyper::Body>,
) -> Result<net_http::Response, zx::Status> {
let redirect_info = redirect_info(current_url, current_method, &hyper_response);
let headers = hyper_response
.headers()
.iter()
.map(|(name, value)| net_http::Header {
name: name.as_str().as_bytes().to_vec(),
value: value.as_bytes().to_vec(),
})
.collect();
let (tx, rx) = zx::Socket::create(zx::SocketOpts::STREAM)?;
let response = net_http::Response {
error: None,
body: Some(rx),
final_url: Some(current_url.to_string()),
status_code: Some(hyper_response.status().as_u16() as u32),
status_line: Some(to_status_line(hyper_response.version(), hyper_response.status())),
headers: Some(headers),
redirect: redirect_info.map(|info| net_http::RedirectTarget {
method: Some(info.method.to_string()),
url: info.url.map(|u| u.to_string()),
referrer: info.referrer.map(|r| r.to_string()),
..net_http::RedirectTarget::EMPTY
}),
..net_http::Response::EMPTY
};
fasync::Task::spawn(async move {
let hyper_body = hyper_response.body_mut();
while let Some(chunk) = hyper_body.next().await {
if let Ok(chunk) = chunk {
let mut offset: usize = 0;
while offset < chunk.len() {
let pending = match tx.wait_handle(
zx::Signals::SOCKET_PEER_CLOSED | zx::Signals::SOCKET_WRITABLE,
zx::Time::INFINITE,
) {
Err(status) => {
error!("tx.wait() failed - status: {}", status);
return;
}
Ok(pending) => pending,
};
if pending.contains(zx::Signals::SOCKET_PEER_CLOSED) {
info!("tx.wait() saw signal SOCKET_PEER_CLOSED");
return;
}
assert!(pending.contains(zx::Signals::SOCKET_WRITABLE));
let written = match tx.write(&chunk[offset..]) {
Err(status) => {
// Because of the wait above, we shouldn't ever see SHOULD_WAIT here, but to avoid
// brittle-ness, continue and wait again in that case.
if status == zx::Status::SHOULD_WAIT {
error!("Saw SHOULD_WAIT despite waiting first - expected now? - continuing");
continue;
}
info!("tx.write() failed - status: {}", status);
return;
}
Ok(written) => written,
};
offset += written;
}
}
}
}).detach();
Ok(response)
}
fn to_fidl_error(error: &hyper::Error) -> net_http::Error {
#[allow(clippy::if_same_then_else)] // TODO(fxbug.dev/95028)
if error.is_parse() {
net_http::Error::UnableToParse
} else if error.is_user() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else if error.is_canceled() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else if error.is_closed() {
net_http::Error::ChannelClosed
} else if error.is_connect() {
net_http::Error::Connect
} else if error.is_incomplete_message() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else if error.is_body_write_aborted() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else {
net_http::Error::Internal
}
}
fn to_error_response(error: net_http::Error) -> net_http::Response {
net_http::Response {
error: Some(error),
body: None,
final_url: None,
status_code: None,
status_line: None,
headers: None,
redirect: None,
..net_http::Response::EMPTY
}
}
struct Loader {
method: hyper::Method,
url: hyper::Uri,
headers: hyper::HeaderMap,
body: Vec<u8>,
deadline: fasync::Time,
}
impl Loader {
async fn new(req: net_http::Request) -> Result<Self, anyhow::Error> {
let net_http::Request { method, url, headers, body, deadline, .. } = req;
let method = method.as_ref().map(|method| hyper::Method::from_str(method)).transpose()?;
let method = method.unwrap_or(hyper::Method::GET);
if let Some(url) = url {
let url = hyper::Uri::try_from(url)?;
let headers = headers
.unwrap_or_else(|| vec![])
.into_iter()
.map(|net_http::Header { name, value }| {
let name = hyper::header::HeaderName::from_bytes(&name)?;
let value = hyper::header::HeaderValue::from_bytes(&value)?;
Ok((name, value))
})
.collect::<Result<hyper::HeaderMap, anyhow::Error>>()?;
let body = match body {
Some(net_http::Body::Buffer(buffer)) => {
let mut bytes = vec![0; buffer.size as usize];
buffer.vmo.read(&mut bytes, 0)?;
bytes
}
Some(net_http::Body::Stream(socket)) => {
let mut stream = fasync::Socket::from_socket(socket)?
.into_datagram_stream()
.map(|r| r.context("reading from datagram stream"));
let mut bytes = Vec::new();
while let Some(chunk) = stream.next().await {
bytes.extend(chunk?);
}
bytes
}
None => Vec::new(),
};
let deadline = deadline
.map(|deadline| fasync::Time::from_nanos(deadline))
.unwrap_or_else(|| fasync::Time::after(DEFAULT_DEADLINE_DURATION));
trace!("Starting request {} {}", method, url);
Ok(Loader { method, url, headers, body, deadline })
} else {
Err(anyhow::Error::msg("Request missing URL"))
}
}
fn build_request(&self) -> hyper::Request<hyper::Body> {
let Self { method, url, headers, body, deadline: _ } = self;
let mut request = hyper::Request::new(body.clone().into());
*request.method_mut() = method.clone();
*request.uri_mut() = url.clone();
*request.headers_mut() = headers.clone();
request
}
async fn start(mut self, loader_client: net_http::LoaderClientProxy) -> Result<(), zx::Status> |
async fn fetch(
mut self,
) -> Result<(hyper::Response<hyper::Body>, hyper::Uri, hyper::Method), net_http::Error> {
let deadline = self.deadline;
if deadline < fasync::Time::now() {
return Err(net_http::Error::DeadlineExceeded);
}
let client = fhyper::new_https_client_from_tcp_options(tcp_options());
async move {
let mut redirects = 0;
loop {
break match client.request(self.build_request()).await {
Ok(hyper_response) => {
if redirects != MAX_REDIRECTS {
let redirect = redirect_info(&self.url, &self.method, &hyper_response);
if let Some(redirect) = redirect {
if let Some(url) = redirect.url {
self.url = url;
self.method = redirect.method;
trace!("Redirecting to {} {}", self.method, self.url);
redirects += 1;
continue;
}
}
}
Ok((hyper_response, self.url, self.method))
}
Err(e) => {
info!("Received network level error from hyper: {}", e);
Err(to_fidl_error(&e))
}
};
}
}
.on_timeout(deadline, || Err(net_http::Error::DeadlineExceeded))
.await
}
}
fn calculate_redirect(
old_url: &hyper::Uri,
location: &hyper::header::HeaderValue,
) -> Option<hyper::Uri> {
let old_parts = old_url.clone().into_parts();
let mut new_parts = hyper::Uri::try_from(location.as_bytes()).ok()?.into_parts();
if new_parts.scheme.is_none() {
new_parts.scheme = old_parts.scheme;
}
if new_parts.authority.is_none() {
new_parts.authority = old_parts.authority;
}
Some(hyper::Uri::from_parts(new_parts).ok()?)
}
fn spawn_server(stream: net_http::LoaderRequestStream) {
fasync::Task::spawn(
async move {
stream
.err_into()
.try_for_each_concurrent(None, |message| async move {
match message {
net_http::LoaderRequest::Fetch { request, responder } => {
debug!(
"Fetch request received (url: {}): {:?}",
request
.url
.as_ref()
.and_then(|url| Some(url.as_str()))
.unwrap_or_default(),
request
);
let result = Loader::new(request).await?.fetch().await;
responder.send(match result {
Ok((hyper_response, final_url, final_method)) => {
to_success_response(&final_url, &final_method, hyper_response)
.await?
}
Err(error) => to_error_response(error),
})?;
}
net_http::LoaderRequest::Start { request, client, control_handle } => {
debug!(
"Start request received (url: {}): {:?}",
request
.url
.as_ref()
.and_then(|url| Some(url.as_str()))
.unwrap_or_default(),
request
);
Loader::new(request).await?.start(client.into_proxy()?).await?;
control_handle.shutdown();
}
}
Ok(())
})
.await
}
.unwrap_or_else(|e: anyhow::Error| error!("{:?}", e)),
)
.detach();
}
#[fuchsia::main]
async fn main() -> Result<(), anyhow::Error> {
let mut fs = ServiceFs::new();
let _: &mut ServiceFsDir<'_, _> = fs.dir("svc").add_fidl_service(spawn_server);
let _: &mut ServiceFs<_> = fs.take_and_serve_directory_handle()?;
let () = fs.collect().await;
Ok(())
}
| {
let client = fhyper::new_https_client_from_tcp_options(tcp_options());
loop {
break match client.request(self.build_request()).await {
Ok(hyper_response) => {
let redirect = redirect_info(&self.url, &self.method, &hyper_response);
if let Some(redirect) = redirect {
if let Some(url) = redirect.url {
self.url = url;
self.method = redirect.method;
trace!(
"Reporting redirect to OnResponse: {} {}",
self.method,
self.url
);
let response =
to_success_response(&self.url, &self.method, hyper_response)
.await?;
match loader_client.on_response(response).await {
Ok(()) => {}
Err(e) => {
debug!("Not redirecting because: {}", e);
break Ok(());
}
};
trace!("Redirect allowed to {} {}", self.method, self.url);
continue;
}
}
let response =
to_success_response(&self.url, &self.method, hyper_response).await?;
// We don't care if on_response returns an error since this is the last
// callback.
let _: Result<_, _> = loader_client.on_response(response).await;
Ok(())
}
Err(error) => {
info!("Received network level error from hyper: {}", error);
// We don't care if on_response returns an error since this is the last
// callback.
let _: Result<_, _> =
loader_client.on_response(to_error_response(to_fidl_error(&error))).await;
Ok(())
}
};
}
} | identifier_body |
main.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Context as _,
fidl::prelude::*,
fidl_fuchsia_net_http as net_http,
fuchsia_async::{self as fasync, TimeoutExt as _},
fuchsia_component::server::{ServiceFs, ServiceFsDir},
fuchsia_hyper as fhyper,
fuchsia_zircon::{self as zx, AsHandleRef},
futures::{prelude::*, StreamExt},
hyper,
std::convert::TryFrom,
std::str::FromStr as _,
tracing::{debug, error, info, trace},
};
static MAX_REDIRECTS: u8 = 10;
static DEFAULT_DEADLINE_DURATION: zx::Duration = zx::Duration::from_seconds(15);
fn to_status_line(version: hyper::Version, status: hyper::StatusCode) -> Vec<u8> {
match status.canonical_reason() {
None => format!("{:?} {}", version, status.as_str()),
Some(canonical_reason) => format!("{:?} {} {}", version, status.as_str(), canonical_reason),
}
.as_bytes()
.to_vec()
}
fn tcp_options() -> fhyper::TcpOptions {
let mut options: fhyper::TcpOptions = std::default::Default::default();
// Use TCP keepalive to notice stuck connections.
// After 60s with no data received send a probe every 15s.
options.keepalive_idle = Some(std::time::Duration::from_secs(60));
options.keepalive_interval = Some(std::time::Duration::from_secs(15));
// After 8 probes go unacknowledged treat the connection as dead.
options.keepalive_count = Some(8);
options
}
struct RedirectInfo {
url: Option<hyper::Uri>,
referrer: Option<hyper::Uri>,
method: hyper::Method,
}
fn redirect_info(
old_uri: &hyper::Uri,
method: &hyper::Method,
hyper_response: &hyper::Response<hyper::Body>,
) -> Option<RedirectInfo> {
if hyper_response.status().is_redirection() {
Some(RedirectInfo {
url: hyper_response
.headers()
.get(hyper::header::LOCATION)
.and_then(|loc| calculate_redirect(old_uri, loc)),
referrer: hyper_response
.headers()
.get(hyper::header::REFERER)
.and_then(|loc| calculate_redirect(old_uri, loc)),
method: if hyper_response.status() == hyper::StatusCode::SEE_OTHER {
hyper::Method::GET
} else {
method.clone()
},
})
} else {
None
}
}
async fn to_success_response(
current_url: &hyper::Uri,
current_method: &hyper::Method,
mut hyper_response: hyper::Response<hyper::Body>,
) -> Result<net_http::Response, zx::Status> {
let redirect_info = redirect_info(current_url, current_method, &hyper_response);
let headers = hyper_response
.headers()
.iter()
.map(|(name, value)| net_http::Header {
name: name.as_str().as_bytes().to_vec(),
value: value.as_bytes().to_vec(),
})
.collect();
let (tx, rx) = zx::Socket::create(zx::SocketOpts::STREAM)?;
let response = net_http::Response {
error: None,
body: Some(rx),
final_url: Some(current_url.to_string()),
status_code: Some(hyper_response.status().as_u16() as u32),
status_line: Some(to_status_line(hyper_response.version(), hyper_response.status())),
headers: Some(headers),
redirect: redirect_info.map(|info| net_http::RedirectTarget {
method: Some(info.method.to_string()),
url: info.url.map(|u| u.to_string()),
referrer: info.referrer.map(|r| r.to_string()),
..net_http::RedirectTarget::EMPTY
}),
..net_http::Response::EMPTY
};
fasync::Task::spawn(async move {
let hyper_body = hyper_response.body_mut();
while let Some(chunk) = hyper_body.next().await {
if let Ok(chunk) = chunk {
let mut offset: usize = 0;
while offset < chunk.len() {
let pending = match tx.wait_handle(
zx::Signals::SOCKET_PEER_CLOSED | zx::Signals::SOCKET_WRITABLE,
zx::Time::INFINITE,
) {
Err(status) => {
error!("tx.wait() failed - status: {}", status);
return;
}
Ok(pending) => pending,
};
if pending.contains(zx::Signals::SOCKET_PEER_CLOSED) {
info!("tx.wait() saw signal SOCKET_PEER_CLOSED");
return;
}
assert!(pending.contains(zx::Signals::SOCKET_WRITABLE));
let written = match tx.write(&chunk[offset..]) {
Err(status) => {
// Because of the wait above, we shouldn't ever see SHOULD_WAIT here, but to avoid
// brittle-ness, continue and wait again in that case.
if status == zx::Status::SHOULD_WAIT {
error!("Saw SHOULD_WAIT despite waiting first - expected now? - continuing");
continue;
}
info!("tx.write() failed - status: {}", status);
return;
}
Ok(written) => written,
};
offset += written;
}
}
}
}).detach();
Ok(response)
}
fn to_fidl_error(error: &hyper::Error) -> net_http::Error {
#[allow(clippy::if_same_then_else)] // TODO(fxbug.dev/95028)
if error.is_parse() {
net_http::Error::UnableToParse
} else if error.is_user() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else if error.is_canceled() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else if error.is_closed() {
net_http::Error::ChannelClosed
} else if error.is_connect() {
net_http::Error::Connect
} else if error.is_incomplete_message() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else if error.is_body_write_aborted() {
//TODO(zmbush): handle this case.
net_http::Error::Internal
} else {
net_http::Error::Internal
} | }
fn to_error_response(error: net_http::Error) -> net_http::Response {
net_http::Response {
error: Some(error),
body: None,
final_url: None,
status_code: None,
status_line: None,
headers: None,
redirect: None,
..net_http::Response::EMPTY
}
}
struct Loader {
method: hyper::Method,
url: hyper::Uri,
headers: hyper::HeaderMap,
body: Vec<u8>,
deadline: fasync::Time,
}
impl Loader {
async fn new(req: net_http::Request) -> Result<Self, anyhow::Error> {
let net_http::Request { method, url, headers, body, deadline, .. } = req;
let method = method.as_ref().map(|method| hyper::Method::from_str(method)).transpose()?;
let method = method.unwrap_or(hyper::Method::GET);
if let Some(url) = url {
let url = hyper::Uri::try_from(url)?;
let headers = headers
.unwrap_or_else(|| vec![])
.into_iter()
.map(|net_http::Header { name, value }| {
let name = hyper::header::HeaderName::from_bytes(&name)?;
let value = hyper::header::HeaderValue::from_bytes(&value)?;
Ok((name, value))
})
.collect::<Result<hyper::HeaderMap, anyhow::Error>>()?;
let body = match body {
Some(net_http::Body::Buffer(buffer)) => {
let mut bytes = vec![0; buffer.size as usize];
buffer.vmo.read(&mut bytes, 0)?;
bytes
}
Some(net_http::Body::Stream(socket)) => {
let mut stream = fasync::Socket::from_socket(socket)?
.into_datagram_stream()
.map(|r| r.context("reading from datagram stream"));
let mut bytes = Vec::new();
while let Some(chunk) = stream.next().await {
bytes.extend(chunk?);
}
bytes
}
None => Vec::new(),
};
let deadline = deadline
.map(|deadline| fasync::Time::from_nanos(deadline))
.unwrap_or_else(|| fasync::Time::after(DEFAULT_DEADLINE_DURATION));
trace!("Starting request {} {}", method, url);
Ok(Loader { method, url, headers, body, deadline })
} else {
Err(anyhow::Error::msg("Request missing URL"))
}
}
fn build_request(&self) -> hyper::Request<hyper::Body> {
let Self { method, url, headers, body, deadline: _ } = self;
let mut request = hyper::Request::new(body.clone().into());
*request.method_mut() = method.clone();
*request.uri_mut() = url.clone();
*request.headers_mut() = headers.clone();
request
}
async fn start(mut self, loader_client: net_http::LoaderClientProxy) -> Result<(), zx::Status> {
let client = fhyper::new_https_client_from_tcp_options(tcp_options());
loop {
break match client.request(self.build_request()).await {
Ok(hyper_response) => {
let redirect = redirect_info(&self.url, &self.method, &hyper_response);
if let Some(redirect) = redirect {
if let Some(url) = redirect.url {
self.url = url;
self.method = redirect.method;
trace!(
"Reporting redirect to OnResponse: {} {}",
self.method,
self.url
);
let response =
to_success_response(&self.url, &self.method, hyper_response)
.await?;
match loader_client.on_response(response).await {
Ok(()) => {}
Err(e) => {
debug!("Not redirecting because: {}", e);
break Ok(());
}
};
trace!("Redirect allowed to {} {}", self.method, self.url);
continue;
}
}
let response =
to_success_response(&self.url, &self.method, hyper_response).await?;
// We don't care if on_response returns an error since this is the last
// callback.
let _: Result<_, _> = loader_client.on_response(response).await;
Ok(())
}
Err(error) => {
info!("Received network level error from hyper: {}", error);
// We don't care if on_response returns an error since this is the last
// callback.
let _: Result<_, _> =
loader_client.on_response(to_error_response(to_fidl_error(&error))).await;
Ok(())
}
};
}
}
async fn fetch(
mut self,
) -> Result<(hyper::Response<hyper::Body>, hyper::Uri, hyper::Method), net_http::Error> {
let deadline = self.deadline;
if deadline < fasync::Time::now() {
return Err(net_http::Error::DeadlineExceeded);
}
let client = fhyper::new_https_client_from_tcp_options(tcp_options());
async move {
let mut redirects = 0;
loop {
break match client.request(self.build_request()).await {
Ok(hyper_response) => {
if redirects != MAX_REDIRECTS {
let redirect = redirect_info(&self.url, &self.method, &hyper_response);
if let Some(redirect) = redirect {
if let Some(url) = redirect.url {
self.url = url;
self.method = redirect.method;
trace!("Redirecting to {} {}", self.method, self.url);
redirects += 1;
continue;
}
}
}
Ok((hyper_response, self.url, self.method))
}
Err(e) => {
info!("Received network level error from hyper: {}", e);
Err(to_fidl_error(&e))
}
};
}
}
.on_timeout(deadline, || Err(net_http::Error::DeadlineExceeded))
.await
}
}
fn calculate_redirect(
old_url: &hyper::Uri,
location: &hyper::header::HeaderValue,
) -> Option<hyper::Uri> {
let old_parts = old_url.clone().into_parts();
let mut new_parts = hyper::Uri::try_from(location.as_bytes()).ok()?.into_parts();
if new_parts.scheme.is_none() {
new_parts.scheme = old_parts.scheme;
}
if new_parts.authority.is_none() {
new_parts.authority = old_parts.authority;
}
Some(hyper::Uri::from_parts(new_parts).ok()?)
}
fn spawn_server(stream: net_http::LoaderRequestStream) {
fasync::Task::spawn(
async move {
stream
.err_into()
.try_for_each_concurrent(None, |message| async move {
match message {
net_http::LoaderRequest::Fetch { request, responder } => {
debug!(
"Fetch request received (url: {}): {:?}",
request
.url
.as_ref()
.and_then(|url| Some(url.as_str()))
.unwrap_or_default(),
request
);
let result = Loader::new(request).await?.fetch().await;
responder.send(match result {
Ok((hyper_response, final_url, final_method)) => {
to_success_response(&final_url, &final_method, hyper_response)
.await?
}
Err(error) => to_error_response(error),
})?;
}
net_http::LoaderRequest::Start { request, client, control_handle } => {
debug!(
"Start request received (url: {}): {:?}",
request
.url
.as_ref()
.and_then(|url| Some(url.as_str()))
.unwrap_or_default(),
request
);
Loader::new(request).await?.start(client.into_proxy()?).await?;
control_handle.shutdown();
}
}
Ok(())
})
.await
}
.unwrap_or_else(|e: anyhow::Error| error!("{:?}", e)),
)
.detach();
}
#[fuchsia::main]
async fn main() -> Result<(), anyhow::Error> {
let mut fs = ServiceFs::new();
let _: &mut ServiceFsDir<'_, _> = fs.dir("svc").add_fidl_service(spawn_server);
let _: &mut ServiceFs<_> = fs.take_and_serve_directory_handle()?;
let () = fs.collect().await;
Ok(())
} | random_line_split |
|
spritecfg.rs | #![allow(dead_code)]
extern crate asar;
use std::path::{PathBuf, Path};
use std::io::prelude::*;
use std::fs::{File, OpenOptions};
use nom::*;
use asar::rom::RomBuf;
use parse_aux::dys_prefix;
use genus::Genus;
use dys_tables::DysTables;
use insert_err::{InsertResult, format_result, warnless_result, single_error};
#[derive(Debug)]
pub struct CfgErr {
explain: String,
}
#[derive(Debug)]
pub struct SpriteCfg {
pub genus: Genus,
pub id: u16,
pub tweak_bytes: [u8; 6],
pub prop_bytes: [u8; 2],
pub clipping: [u8; 4],
dys_option_bytes: [u8; 2],
acts_like: u8,
extra_bytes: u8,
name: String,
desc: String,
name_set: Option<String>,
desc_set: Option<String>,
source_path: PathBuf,
}
#[derive(Debug, Copy, Clone)]
pub struct InsertPoint {
pub main: usize,
pub init: usize,
pub drop: usize,
}
impl SpriteCfg {
pub fn parse(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
if let IResult::Done(rest, vsn) = dys_prefix(buf) {
if vsn != 1 {
return Err(CfgErr { explain: String::from("You have a cfg from the future") });
} else {
parse_newstyle(path, gen, id, rest)
}
} else {
parse_oldstyle(path, gen, id, buf)
}
}
pub fn new() -> SpriteCfg {
SpriteCfg {
genus: Genus::Std,
id: 0,
tweak_bytes: [0, 0, 0, 0, 0, 0],
prop_bytes: [0, 0],
clipping: [0, 0, 0, 0],
dys_option_bytes: [0, 0],
acts_like: 0,
extra_bytes: 0,
name: "".to_string(),
desc: "".to_string(),
name_set: None,
desc_set: None,
source_path: PathBuf::from(""),
}
}
pub fn needs_init(&self) -> bool {
match self.genus {
Genus::Std => true,
_ => false,
}
}
pub fn needs_drop(&self) -> bool {
match self.genus {
Genus::Std => self.dys_option_bytes[1] & 0x80 != 0,
_ => false,
}
}
pub fn placeable(&self) -> bool { self.genus.placeable() }
pub fn assemble(
&self,
rom: &mut RomBuf,
prelude: &str,
source: &Path,
temp: &Path,
iopts: ::insert_opts::InsertOpts
) -> InsertResult<InsertPoint> {
let (mut main, mut init, mut drop) = (0usize, 0usize, 0usize);
{
let mut tempasm = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(temp)
.unwrap();
tempasm.write_all(prelude.as_bytes()).unwrap();
let mut source_buf = Vec::<u8>::with_capacity(8 * 1024); // A wild guess.
let mut srcf = warnless_result(
File::open(source),
|e| format!("error opening \"{}\": {}", source.to_string_lossy(), e)
)?;
srcf.read_to_end(&mut source_buf).unwrap();
tempasm.write_all(&source_buf).unwrap();
}
let warns = match asar::patch(temp, rom) {
Ok((_, mut ws)) => ws.drain(..).map(|w| w.into()).collect(),
Err((mut es, mut ws)) => {
return Err(
(es.drain(..).map(|e| e.into()).collect(),
ws.drain(..).map(|w| w.into()).collect())
)
},
};
for print in asar::prints() {
let mut chunks = print.split_whitespace();
let fst = chunks.next();
let snd = chunks.next();
match fst {
Some("MAIN") => match snd {
Some(ofs) => main = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"MAIN\" declaration"),
},
Some("INIT") => match snd {
Some(ofs) => init = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"INIT\" declaration"),
},
Some("DROP") => match snd {
Some(ofs) => drop = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"DROP\" declaration"),
},
None => (),
_ => return single_error("The sprite printed something other than MAIN, INIT, or DROP"),
}
};
if main == 0 {
return single_error("No main routine");
}
if init == 0 && self.needs_init() {
return single_error("No init routine");
}
if drop == 0 && self.needs_drop() {
return single_error("Drop routine required by dys_opts, but not provided");
}
if drop != 0 && !self.needs_drop() {
return single_error("Sprite has a drop routine, but dys_opts doesn't require one");
}
if self.needs_drop() && !iopts.use_drops {
return single_error("Sprite needs a drop routine, but drop routines aren't enabled");
}
Ok((InsertPoint { main, init, drop }, warns))
}
pub fn apply_cfg(&self, rom: &mut RomBuf, tables: &DysTables) {
match self.genus {
Genus::Std
| Genus::Gen
| Genus::Sht
| Genus::R1s => {
if self.id < 0x200 {
let size_ofs = if self.id < 0x100 {
self.id as usize
} else {
self.id as usize + 0x100
};
let size = self.extra_bytes + 3;
rom.set_byte(tables.sprite_sizes + size_ofs, size).unwrap();
rom.set_byte(tables.sprite_sizes + size_ofs + 0x100, size).unwrap();
let optbase = tables.option_bytes + (self.id as usize * 0x10);
rom.set_byte(optbase, self.genus.to_byte()).unwrap();
rom.set_byte(optbase + 1, self.acts_like).unwrap();
rom.set_bytes(optbase + 2, &self.tweak_bytes).unwrap();
rom.set_bytes(optbase + 8, &self.dys_option_bytes).unwrap();
rom.set_bytes(optbase + 14, &self.prop_bytes).unwrap();
rom.set_bytes(optbase + 10, &self.clipping).unwrap();
};
},
Genus::Cls => {},
_ => unimplemented!(),
};
}
pub fn apply_offsets(&self, rom: &mut RomBuf, tables: &DysTables, ip: InsertPoint) {
let ofs = self.id as usize * 3;
match self.genus {
g if g.placeable() => {
rom.set_long(tables.main_ptrs + ofs, ip.main as u32).unwrap();
rom.set_long(tables.init_ptrs + ofs, ip.init as u32).unwrap();
if tables.drop_ptrs != 0 {
rom.set_long(tables.drop_ptrs + ofs, ip.drop as u32).unwrap();
}
},
Genus::Cls => rom.set_long(tables.cls_ptrs + ofs, ip.main as u32).unwrap(),
_ => unimplemented!(),
};
}
pub fn name(&self, ebit: bool) -> &String {
if ebit && self.name_set.is_some() {
self.name_set.as_ref().unwrap()
} else {
&self.name
}
}
pub fn desc(&self, ebit: bool) -> &String {
if ebit && self.desc_set.is_some() {
self.desc_set.as_ref().unwrap()
} else {
&self.desc
}
}
pub fn uses_ebit(&self) -> bool { self.name_set.is_some() }
pub fn place_mw2(&self, target: &mut Vec<u8>, ebit: bool) {
if !self.placeable() { panic!("Attempted to place unplaceable sprite") };
let b0 = 0x89;
let b1 = 0x80;
let num_extra_bit: u8 = if self.id & 0x100 == 0 { 0 } else { 8 };
let ebit_val: u8 = if !ebit { 0 } else { 4 };
let b0 = b0 | num_extra_bit | ebit_val;
target.push(b0);
target.push(b1);
if self.id >= 0x200 {
target.push(0xf8 + self.extra_bytes);
}
target.push((self.id & 0xff) as u8);
for _ in 0 .. self.extra_bytes { target.push(0); };
}
pub fn dys_option_bytes(&self) -> &[u8] { &self.dys_option_bytes }
pub fn source_path(&self) -> &PathBuf { &self.source_path }
}
fn default_name(path: &Path, gen: Genus, id: u16) -> (String, String) {
let root = match path.file_stem() {
Some(s) => s.to_string_lossy().into_owned(),
None => format!("Custom {} #{:03x}", gen.shortname(), id),
};
(root.clone(), root + " (extra bit set)")
}
fn parse_newstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
let (mut got_name, mut got_desc): (Option<String>, Option<String>) = (None, None);
let mut cfg = SpriteCfg { genus: gen, id: id, .. SpriteCfg::new() };
let mut buf = buf;
while let IResult::Done(rest, (name, value)) = cfg_line(buf) {
buf = rest;
match name {
"acts-like" => cfg.acts_like = try!(read_byte(value)),
"source" => cfg.source_path = path.with_file_name(value),
"props" => try!(read_bytes(value, &mut cfg.tweak_bytes)),
"xbytes" => cfg.extra_bytes = try!(read_byte(value)),
"ext-props" => try!(read_bytes(value, &mut cfg.prop_bytes)),
"dys-opts" => try!(read_bytes(value, &mut cfg.dys_option_bytes)),
"ext-clip" => try!(read_bytes(value, &mut cfg.clipping)),
"name" => got_name = Some(String::from(value)),
"description" => got_desc = Some(String::from(value)),
"desc-set" => cfg.desc_set = Some(String::from(value)),
"name-set" => cfg.name_set = Some(String::from(value)),
"ext-prop-def" | "m16d" | "tilemap" => (),
_ => return Err(CfgErr { explain: format!("bad field name: \"{}\"", name) }),
};
};
if let Some(s) = got_name {
cfg.name = s;
} else {
let t = default_name(path, gen, id);
cfg.name = t.0;
cfg.name_set = Some(t.1);
};
if let Some(s) = got_desc {
cfg.desc = s;
} else {
cfg.desc = cfg.name.clone();
cfg.desc_set = cfg.name_set.clone();
};
if cfg.source_path.file_name() == None {
Err(CfgErr { explain: String::from("Sprite needs a source file") })
} else {
Ok(cfg)
}
}
fn parse_oldstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> |
fn read_byte(s: &str) -> Result<u8, CfgErr> {
let iter = s.trim().chars();
let mut n = 0u32;
let mut read = false;
for ch in iter {
if let Some(v) = ch.to_digit(0x10) {
n *= 0x10;
n += v;
read = true;
} else {
return Err(CfgErr { explain: String::from("Non-byte data in byte field") })
}
}
if !read { Err(CfgErr { explain: String::from("Expected a byte, found nothing") }) } else { Ok(n as u8) }
}
fn read_bytes(s: &str, buf: &mut [u8]) -> Result<(), CfgErr> {
let mut bytes = Vec::<u8>::with_capacity(buf.len());
for b in s.split_whitespace() {
bytes.push(try!(read_byte(b)));
};
if bytes.len() != buf.len() {
Err(CfgErr { explain: format!("Wrong length byte sequence: expected {} bytes, got {}",
buf.len(), bytes.len()) })
} else {
for (i, b) in bytes.iter().enumerate() {
buf[i] = *b;
}
Ok(())
}
}
fn tag_ending_s(ch: char) -> bool { ch == ' ' || ch == ':' }
fn line_ending_s(ch: char) -> bool { ch == '\r' || ch == '\n' }
named!(cfg_line(&str) -> (&str, &str),
chain!(
multispace? ~
name: take_till_s!(tag_ending_s) ~
space? ~
tag_s!(":") ~
space? ~
valu: take_till_s!(line_ending_s) ~
multispace? ,
|| (name, valu)
)
);
| {
let mut it = buf.split_whitespace().skip(1);
let mut d = [0u8; 9];
for output_byte in &mut d {
if let Some(s) = it.next() {
*output_byte = try!(read_byte(s));
} else {
return Err(CfgErr{ explain: String::from("Old-style CFG too short") });
}
};
let (name, name_set) = default_name(path, gen, id);
let (desc, desc_set) = (name.clone(), name_set.clone());
if let Some(s) = it.next() {
Ok(SpriteCfg {
genus: gen,
id: id,
acts_like: d[0],
tweak_bytes: [d[1], d[2], d[3], d[4], d[5], d[6]],
prop_bytes: [d[7], d[8]],
source_path: path.with_file_name(s),
name: name,
name_set: Some(name_set),
desc: desc,
desc_set: Some(desc_set),
.. SpriteCfg::new()
})
} else {
Err(CfgErr { explain: String::from("Old-style CFG too short") })
}
} | identifier_body |
spritecfg.rs | #![allow(dead_code)]
extern crate asar;
use std::path::{PathBuf, Path};
use std::io::prelude::*;
use std::fs::{File, OpenOptions};
use nom::*;
use asar::rom::RomBuf;
use parse_aux::dys_prefix;
use genus::Genus;
use dys_tables::DysTables;
use insert_err::{InsertResult, format_result, warnless_result, single_error};
#[derive(Debug)]
pub struct CfgErr {
explain: String,
}
#[derive(Debug)]
pub struct SpriteCfg {
pub genus: Genus,
pub id: u16,
pub tweak_bytes: [u8; 6],
pub prop_bytes: [u8; 2],
pub clipping: [u8; 4],
dys_option_bytes: [u8; 2],
acts_like: u8,
extra_bytes: u8,
name: String,
desc: String,
name_set: Option<String>,
desc_set: Option<String>,
source_path: PathBuf,
}
#[derive(Debug, Copy, Clone)]
pub struct InsertPoint {
pub main: usize,
pub init: usize,
pub drop: usize,
}
impl SpriteCfg {
pub fn parse(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
if let IResult::Done(rest, vsn) = dys_prefix(buf) {
if vsn != 1 {
return Err(CfgErr { explain: String::from("You have a cfg from the future") });
} else {
parse_newstyle(path, gen, id, rest)
}
} else {
parse_oldstyle(path, gen, id, buf)
}
}
pub fn new() -> SpriteCfg {
SpriteCfg {
genus: Genus::Std,
id: 0,
tweak_bytes: [0, 0, 0, 0, 0, 0],
prop_bytes: [0, 0],
clipping: [0, 0, 0, 0],
dys_option_bytes: [0, 0],
acts_like: 0,
extra_bytes: 0,
name: "".to_string(),
desc: "".to_string(),
name_set: None,
desc_set: None,
source_path: PathBuf::from(""),
}
}
pub fn needs_init(&self) -> bool {
match self.genus {
Genus::Std => true,
_ => false,
}
}
pub fn needs_drop(&self) -> bool {
match self.genus {
Genus::Std => self.dys_option_bytes[1] & 0x80 != 0,
_ => false,
}
}
pub fn placeable(&self) -> bool { self.genus.placeable() }
pub fn assemble(
&self,
rom: &mut RomBuf,
prelude: &str,
source: &Path,
temp: &Path,
iopts: ::insert_opts::InsertOpts
) -> InsertResult<InsertPoint> {
let (mut main, mut init, mut drop) = (0usize, 0usize, 0usize);
{
let mut tempasm = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(temp)
.unwrap();
tempasm.write_all(prelude.as_bytes()).unwrap();
let mut source_buf = Vec::<u8>::with_capacity(8 * 1024); // A wild guess.
let mut srcf = warnless_result(
File::open(source),
|e| format!("error opening \"{}\": {}", source.to_string_lossy(), e)
)?;
srcf.read_to_end(&mut source_buf).unwrap();
tempasm.write_all(&source_buf).unwrap();
}
let warns = match asar::patch(temp, rom) {
Ok((_, mut ws)) => ws.drain(..).map(|w| w.into()).collect(),
Err((mut es, mut ws)) => {
return Err(
(es.drain(..).map(|e| e.into()).collect(),
ws.drain(..).map(|w| w.into()).collect())
)
},
};
for print in asar::prints() {
let mut chunks = print.split_whitespace();
let fst = chunks.next();
let snd = chunks.next();
match fst {
Some("MAIN") => match snd {
Some(ofs) => main = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"MAIN\" declaration"),
},
Some("INIT") => match snd {
Some(ofs) => init = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"INIT\" declaration"),
},
Some("DROP") => match snd {
Some(ofs) => drop = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"DROP\" declaration"),
},
None => (),
_ => return single_error("The sprite printed something other than MAIN, INIT, or DROP"),
}
};
if main == 0 |
if init == 0 && self.needs_init() {
return single_error("No init routine");
}
if drop == 0 && self.needs_drop() {
return single_error("Drop routine required by dys_opts, but not provided");
}
if drop != 0 && !self.needs_drop() {
return single_error("Sprite has a drop routine, but dys_opts doesn't require one");
}
if self.needs_drop() && !iopts.use_drops {
return single_error("Sprite needs a drop routine, but drop routines aren't enabled");
}
Ok((InsertPoint { main, init, drop }, warns))
}
pub fn apply_cfg(&self, rom: &mut RomBuf, tables: &DysTables) {
match self.genus {
Genus::Std
| Genus::Gen
| Genus::Sht
| Genus::R1s => {
if self.id < 0x200 {
let size_ofs = if self.id < 0x100 {
self.id as usize
} else {
self.id as usize + 0x100
};
let size = self.extra_bytes + 3;
rom.set_byte(tables.sprite_sizes + size_ofs, size).unwrap();
rom.set_byte(tables.sprite_sizes + size_ofs + 0x100, size).unwrap();
let optbase = tables.option_bytes + (self.id as usize * 0x10);
rom.set_byte(optbase, self.genus.to_byte()).unwrap();
rom.set_byte(optbase + 1, self.acts_like).unwrap();
rom.set_bytes(optbase + 2, &self.tweak_bytes).unwrap();
rom.set_bytes(optbase + 8, &self.dys_option_bytes).unwrap();
rom.set_bytes(optbase + 14, &self.prop_bytes).unwrap();
rom.set_bytes(optbase + 10, &self.clipping).unwrap();
};
},
Genus::Cls => {},
_ => unimplemented!(),
};
}
pub fn apply_offsets(&self, rom: &mut RomBuf, tables: &DysTables, ip: InsertPoint) {
let ofs = self.id as usize * 3;
match self.genus {
g if g.placeable() => {
rom.set_long(tables.main_ptrs + ofs, ip.main as u32).unwrap();
rom.set_long(tables.init_ptrs + ofs, ip.init as u32).unwrap();
if tables.drop_ptrs != 0 {
rom.set_long(tables.drop_ptrs + ofs, ip.drop as u32).unwrap();
}
},
Genus::Cls => rom.set_long(tables.cls_ptrs + ofs, ip.main as u32).unwrap(),
_ => unimplemented!(),
};
}
pub fn name(&self, ebit: bool) -> &String {
if ebit && self.name_set.is_some() {
self.name_set.as_ref().unwrap()
} else {
&self.name
}
}
pub fn desc(&self, ebit: bool) -> &String {
if ebit && self.desc_set.is_some() {
self.desc_set.as_ref().unwrap()
} else {
&self.desc
}
}
pub fn uses_ebit(&self) -> bool { self.name_set.is_some() }
pub fn place_mw2(&self, target: &mut Vec<u8>, ebit: bool) {
if !self.placeable() { panic!("Attempted to place unplaceable sprite") };
let b0 = 0x89;
let b1 = 0x80;
let num_extra_bit: u8 = if self.id & 0x100 == 0 { 0 } else { 8 };
let ebit_val: u8 = if !ebit { 0 } else { 4 };
let b0 = b0 | num_extra_bit | ebit_val;
target.push(b0);
target.push(b1);
if self.id >= 0x200 {
target.push(0xf8 + self.extra_bytes);
}
target.push((self.id & 0xff) as u8);
for _ in 0 .. self.extra_bytes { target.push(0); };
}
pub fn dys_option_bytes(&self) -> &[u8] { &self.dys_option_bytes }
pub fn source_path(&self) -> &PathBuf { &self.source_path }
}
fn default_name(path: &Path, gen: Genus, id: u16) -> (String, String) {
let root = match path.file_stem() {
Some(s) => s.to_string_lossy().into_owned(),
None => format!("Custom {} #{:03x}", gen.shortname(), id),
};
(root.clone(), root + " (extra bit set)")
}
fn parse_newstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
let (mut got_name, mut got_desc): (Option<String>, Option<String>) = (None, None);
let mut cfg = SpriteCfg { genus: gen, id: id, .. SpriteCfg::new() };
let mut buf = buf;
while let IResult::Done(rest, (name, value)) = cfg_line(buf) {
buf = rest;
match name {
"acts-like" => cfg.acts_like = try!(read_byte(value)),
"source" => cfg.source_path = path.with_file_name(value),
"props" => try!(read_bytes(value, &mut cfg.tweak_bytes)),
"xbytes" => cfg.extra_bytes = try!(read_byte(value)),
"ext-props" => try!(read_bytes(value, &mut cfg.prop_bytes)),
"dys-opts" => try!(read_bytes(value, &mut cfg.dys_option_bytes)),
"ext-clip" => try!(read_bytes(value, &mut cfg.clipping)),
"name" => got_name = Some(String::from(value)),
"description" => got_desc = Some(String::from(value)),
"desc-set" => cfg.desc_set = Some(String::from(value)),
"name-set" => cfg.name_set = Some(String::from(value)),
"ext-prop-def" | "m16d" | "tilemap" => (),
_ => return Err(CfgErr { explain: format!("bad field name: \"{}\"", name) }),
};
};
if let Some(s) = got_name {
cfg.name = s;
} else {
let t = default_name(path, gen, id);
cfg.name = t.0;
cfg.name_set = Some(t.1);
};
if let Some(s) = got_desc {
cfg.desc = s;
} else {
cfg.desc = cfg.name.clone();
cfg.desc_set = cfg.name_set.clone();
};
if cfg.source_path.file_name() == None {
Err(CfgErr { explain: String::from("Sprite needs a source file") })
} else {
Ok(cfg)
}
}
fn parse_oldstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
let mut it = buf.split_whitespace().skip(1);
let mut d = [0u8; 9];
for output_byte in &mut d {
if let Some(s) = it.next() {
*output_byte = try!(read_byte(s));
} else {
return Err(CfgErr{ explain: String::from("Old-style CFG too short") });
}
};
let (name, name_set) = default_name(path, gen, id);
let (desc, desc_set) = (name.clone(), name_set.clone());
if let Some(s) = it.next() {
Ok(SpriteCfg {
genus: gen,
id: id,
acts_like: d[0],
tweak_bytes: [d[1], d[2], d[3], d[4], d[5], d[6]],
prop_bytes: [d[7], d[8]],
source_path: path.with_file_name(s),
name: name,
name_set: Some(name_set),
desc: desc,
desc_set: Some(desc_set),
.. SpriteCfg::new()
})
} else {
Err(CfgErr { explain: String::from("Old-style CFG too short") })
}
}
fn read_byte(s: &str) -> Result<u8, CfgErr> {
let iter = s.trim().chars();
let mut n = 0u32;
let mut read = false;
for ch in iter {
if let Some(v) = ch.to_digit(0x10) {
n *= 0x10;
n += v;
read = true;
} else {
return Err(CfgErr { explain: String::from("Non-byte data in byte field") })
}
}
if !read { Err(CfgErr { explain: String::from("Expected a byte, found nothing") }) } else { Ok(n as u8) }
}
fn read_bytes(s: &str, buf: &mut [u8]) -> Result<(), CfgErr> {
let mut bytes = Vec::<u8>::with_capacity(buf.len());
for b in s.split_whitespace() {
bytes.push(try!(read_byte(b)));
};
if bytes.len() != buf.len() {
Err(CfgErr { explain: format!("Wrong length byte sequence: expected {} bytes, got {}",
buf.len(), bytes.len()) })
} else {
for (i, b) in bytes.iter().enumerate() {
buf[i] = *b;
}
Ok(())
}
}
fn tag_ending_s(ch: char) -> bool { ch == ' ' || ch == ':' }
fn line_ending_s(ch: char) -> bool { ch == '\r' || ch == '\n' }
named!(cfg_line(&str) -> (&str, &str),
chain!(
multispace? ~
name: take_till_s!(tag_ending_s) ~
space? ~
tag_s!(":") ~
space? ~
valu: take_till_s!(line_ending_s) ~
multispace? ,
|| (name, valu)
)
);
| {
return single_error("No main routine");
} | conditional_block |
spritecfg.rs | #![allow(dead_code)]
extern crate asar;
use std::path::{PathBuf, Path};
use std::io::prelude::*;
use std::fs::{File, OpenOptions};
use nom::*;
use asar::rom::RomBuf;
use parse_aux::dys_prefix;
use genus::Genus;
use dys_tables::DysTables;
use insert_err::{InsertResult, format_result, warnless_result, single_error};
#[derive(Debug)]
pub struct CfgErr {
explain: String,
}
#[derive(Debug)]
pub struct SpriteCfg {
pub genus: Genus,
pub id: u16,
pub tweak_bytes: [u8; 6],
pub prop_bytes: [u8; 2],
pub clipping: [u8; 4],
dys_option_bytes: [u8; 2],
acts_like: u8,
extra_bytes: u8,
name: String,
desc: String,
name_set: Option<String>,
desc_set: Option<String>,
source_path: PathBuf,
}
#[derive(Debug, Copy, Clone)]
pub struct InsertPoint {
pub main: usize,
pub init: usize,
pub drop: usize,
}
impl SpriteCfg {
pub fn parse(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
if let IResult::Done(rest, vsn) = dys_prefix(buf) {
if vsn != 1 {
return Err(CfgErr { explain: String::from("You have a cfg from the future") });
} else {
parse_newstyle(path, gen, id, rest)
}
} else {
parse_oldstyle(path, gen, id, buf)
}
}
pub fn new() -> SpriteCfg {
SpriteCfg {
genus: Genus::Std,
id: 0,
tweak_bytes: [0, 0, 0, 0, 0, 0],
prop_bytes: [0, 0],
clipping: [0, 0, 0, 0],
dys_option_bytes: [0, 0],
acts_like: 0,
extra_bytes: 0,
name: "".to_string(),
desc: "".to_string(),
name_set: None,
desc_set: None,
source_path: PathBuf::from(""),
}
}
pub fn needs_init(&self) -> bool {
match self.genus {
Genus::Std => true,
_ => false,
}
}
pub fn needs_drop(&self) -> bool {
match self.genus {
Genus::Std => self.dys_option_bytes[1] & 0x80 != 0,
_ => false,
}
}
pub fn placeable(&self) -> bool { self.genus.placeable() }
pub fn assemble(
&self,
rom: &mut RomBuf,
prelude: &str,
source: &Path,
temp: &Path,
iopts: ::insert_opts::InsertOpts
) -> InsertResult<InsertPoint> {
let (mut main, mut init, mut drop) = (0usize, 0usize, 0usize);
{
let mut tempasm = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(temp)
.unwrap();
tempasm.write_all(prelude.as_bytes()).unwrap();
let mut source_buf = Vec::<u8>::with_capacity(8 * 1024); // A wild guess.
let mut srcf = warnless_result(
File::open(source),
|e| format!("error opening \"{}\": {}", source.to_string_lossy(), e)
)?;
srcf.read_to_end(&mut source_buf).unwrap();
tempasm.write_all(&source_buf).unwrap();
}
let warns = match asar::patch(temp, rom) {
Ok((_, mut ws)) => ws.drain(..).map(|w| w.into()).collect(),
Err((mut es, mut ws)) => {
return Err(
(es.drain(..).map(|e| e.into()).collect(),
ws.drain(..).map(|w| w.into()).collect())
)
},
};
for print in asar::prints() {
let mut chunks = print.split_whitespace();
let fst = chunks.next();
let snd = chunks.next();
match fst {
Some("MAIN") => match snd {
Some(ofs) => main = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"MAIN\" declaration"),
},
Some("INIT") => match snd {
Some(ofs) => init = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"INIT\" declaration"),
},
Some("DROP") => match snd {
Some(ofs) => drop = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"DROP\" declaration"),
},
None => (),
_ => return single_error("The sprite printed something other than MAIN, INIT, or DROP"),
}
};
if main == 0 {
return single_error("No main routine");
}
if init == 0 && self.needs_init() {
return single_error("No init routine");
}
if drop == 0 && self.needs_drop() {
return single_error("Drop routine required by dys_opts, but not provided");
}
if drop != 0 && !self.needs_drop() {
return single_error("Sprite has a drop routine, but dys_opts doesn't require one");
}
if self.needs_drop() && !iopts.use_drops {
return single_error("Sprite needs a drop routine, but drop routines aren't enabled");
}
Ok((InsertPoint { main, init, drop }, warns))
}
pub fn apply_cfg(&self, rom: &mut RomBuf, tables: &DysTables) {
match self.genus {
Genus::Std
| Genus::Gen
| Genus::Sht
| Genus::R1s => {
if self.id < 0x200 {
let size_ofs = if self.id < 0x100 {
self.id as usize
} else {
self.id as usize + 0x100
};
let size = self.extra_bytes + 3;
rom.set_byte(tables.sprite_sizes + size_ofs, size).unwrap();
rom.set_byte(tables.sprite_sizes + size_ofs + 0x100, size).unwrap();
let optbase = tables.option_bytes + (self.id as usize * 0x10);
rom.set_byte(optbase, self.genus.to_byte()).unwrap();
rom.set_byte(optbase + 1, self.acts_like).unwrap();
rom.set_bytes(optbase + 2, &self.tweak_bytes).unwrap();
rom.set_bytes(optbase + 8, &self.dys_option_bytes).unwrap();
rom.set_bytes(optbase + 14, &self.prop_bytes).unwrap();
rom.set_bytes(optbase + 10, &self.clipping).unwrap();
};
},
Genus::Cls => {},
_ => unimplemented!(),
};
}
pub fn apply_offsets(&self, rom: &mut RomBuf, tables: &DysTables, ip: InsertPoint) {
let ofs = self.id as usize * 3;
match self.genus {
g if g.placeable() => {
rom.set_long(tables.main_ptrs + ofs, ip.main as u32).unwrap();
rom.set_long(tables.init_ptrs + ofs, ip.init as u32).unwrap();
if tables.drop_ptrs != 0 {
rom.set_long(tables.drop_ptrs + ofs, ip.drop as u32).unwrap();
}
},
Genus::Cls => rom.set_long(tables.cls_ptrs + ofs, ip.main as u32).unwrap(),
_ => unimplemented!(),
};
}
pub fn | (&self, ebit: bool) -> &String {
if ebit && self.name_set.is_some() {
self.name_set.as_ref().unwrap()
} else {
&self.name
}
}
pub fn desc(&self, ebit: bool) -> &String {
if ebit && self.desc_set.is_some() {
self.desc_set.as_ref().unwrap()
} else {
&self.desc
}
}
pub fn uses_ebit(&self) -> bool { self.name_set.is_some() }
pub fn place_mw2(&self, target: &mut Vec<u8>, ebit: bool) {
if !self.placeable() { panic!("Attempted to place unplaceable sprite") };
let b0 = 0x89;
let b1 = 0x80;
let num_extra_bit: u8 = if self.id & 0x100 == 0 { 0 } else { 8 };
let ebit_val: u8 = if !ebit { 0 } else { 4 };
let b0 = b0 | num_extra_bit | ebit_val;
target.push(b0);
target.push(b1);
if self.id >= 0x200 {
target.push(0xf8 + self.extra_bytes);
}
target.push((self.id & 0xff) as u8);
for _ in 0 .. self.extra_bytes { target.push(0); };
}
pub fn dys_option_bytes(&self) -> &[u8] { &self.dys_option_bytes }
pub fn source_path(&self) -> &PathBuf { &self.source_path }
}
fn default_name(path: &Path, gen: Genus, id: u16) -> (String, String) {
let root = match path.file_stem() {
Some(s) => s.to_string_lossy().into_owned(),
None => format!("Custom {} #{:03x}", gen.shortname(), id),
};
(root.clone(), root + " (extra bit set)")
}
fn parse_newstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
let (mut got_name, mut got_desc): (Option<String>, Option<String>) = (None, None);
let mut cfg = SpriteCfg { genus: gen, id: id, .. SpriteCfg::new() };
let mut buf = buf;
while let IResult::Done(rest, (name, value)) = cfg_line(buf) {
buf = rest;
match name {
"acts-like" => cfg.acts_like = try!(read_byte(value)),
"source" => cfg.source_path = path.with_file_name(value),
"props" => try!(read_bytes(value, &mut cfg.tweak_bytes)),
"xbytes" => cfg.extra_bytes = try!(read_byte(value)),
"ext-props" => try!(read_bytes(value, &mut cfg.prop_bytes)),
"dys-opts" => try!(read_bytes(value, &mut cfg.dys_option_bytes)),
"ext-clip" => try!(read_bytes(value, &mut cfg.clipping)),
"name" => got_name = Some(String::from(value)),
"description" => got_desc = Some(String::from(value)),
"desc-set" => cfg.desc_set = Some(String::from(value)),
"name-set" => cfg.name_set = Some(String::from(value)),
"ext-prop-def" | "m16d" | "tilemap" => (),
_ => return Err(CfgErr { explain: format!("bad field name: \"{}\"", name) }),
};
};
if let Some(s) = got_name {
cfg.name = s;
} else {
let t = default_name(path, gen, id);
cfg.name = t.0;
cfg.name_set = Some(t.1);
};
if let Some(s) = got_desc {
cfg.desc = s;
} else {
cfg.desc = cfg.name.clone();
cfg.desc_set = cfg.name_set.clone();
};
if cfg.source_path.file_name() == None {
Err(CfgErr { explain: String::from("Sprite needs a source file") })
} else {
Ok(cfg)
}
}
fn parse_oldstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
let mut it = buf.split_whitespace().skip(1);
let mut d = [0u8; 9];
for output_byte in &mut d {
if let Some(s) = it.next() {
*output_byte = try!(read_byte(s));
} else {
return Err(CfgErr{ explain: String::from("Old-style CFG too short") });
}
};
let (name, name_set) = default_name(path, gen, id);
let (desc, desc_set) = (name.clone(), name_set.clone());
if let Some(s) = it.next() {
Ok(SpriteCfg {
genus: gen,
id: id,
acts_like: d[0],
tweak_bytes: [d[1], d[2], d[3], d[4], d[5], d[6]],
prop_bytes: [d[7], d[8]],
source_path: path.with_file_name(s),
name: name,
name_set: Some(name_set),
desc: desc,
desc_set: Some(desc_set),
.. SpriteCfg::new()
})
} else {
Err(CfgErr { explain: String::from("Old-style CFG too short") })
}
}
fn read_byte(s: &str) -> Result<u8, CfgErr> {
let iter = s.trim().chars();
let mut n = 0u32;
let mut read = false;
for ch in iter {
if let Some(v) = ch.to_digit(0x10) {
n *= 0x10;
n += v;
read = true;
} else {
return Err(CfgErr { explain: String::from("Non-byte data in byte field") })
}
}
if !read { Err(CfgErr { explain: String::from("Expected a byte, found nothing") }) } else { Ok(n as u8) }
}
fn read_bytes(s: &str, buf: &mut [u8]) -> Result<(), CfgErr> {
let mut bytes = Vec::<u8>::with_capacity(buf.len());
for b in s.split_whitespace() {
bytes.push(try!(read_byte(b)));
};
if bytes.len() != buf.len() {
Err(CfgErr { explain: format!("Wrong length byte sequence: expected {} bytes, got {}",
buf.len(), bytes.len()) })
} else {
for (i, b) in bytes.iter().enumerate() {
buf[i] = *b;
}
Ok(())
}
}
fn tag_ending_s(ch: char) -> bool { ch == ' ' || ch == ':' }
fn line_ending_s(ch: char) -> bool { ch == '\r' || ch == '\n' }
named!(cfg_line(&str) -> (&str, &str),
chain!(
multispace? ~
name: take_till_s!(tag_ending_s) ~
space? ~
tag_s!(":") ~
space? ~
valu: take_till_s!(line_ending_s) ~
multispace? ,
|| (name, valu)
)
);
| name | identifier_name |
spritecfg.rs | #![allow(dead_code)]
extern crate asar;
use std::path::{PathBuf, Path};
use std::io::prelude::*;
use std::fs::{File, OpenOptions};
use nom::*;
use asar::rom::RomBuf;
use parse_aux::dys_prefix;
use genus::Genus;
use dys_tables::DysTables;
use insert_err::{InsertResult, format_result, warnless_result, single_error};
#[derive(Debug)]
pub struct CfgErr {
explain: String,
}
#[derive(Debug)]
pub struct SpriteCfg {
pub genus: Genus,
pub id: u16,
pub tweak_bytes: [u8; 6],
pub prop_bytes: [u8; 2],
pub clipping: [u8; 4],
dys_option_bytes: [u8; 2],
acts_like: u8,
extra_bytes: u8,
name: String,
desc: String,
name_set: Option<String>,
desc_set: Option<String>,
source_path: PathBuf,
}
#[derive(Debug, Copy, Clone)]
pub struct InsertPoint {
pub main: usize,
pub init: usize,
pub drop: usize,
}
impl SpriteCfg {
pub fn parse(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
if let IResult::Done(rest, vsn) = dys_prefix(buf) {
if vsn != 1 {
return Err(CfgErr { explain: String::from("You have a cfg from the future") });
} else {
parse_newstyle(path, gen, id, rest)
}
} else {
parse_oldstyle(path, gen, id, buf)
}
}
pub fn new() -> SpriteCfg {
SpriteCfg {
genus: Genus::Std,
id: 0,
tweak_bytes: [0, 0, 0, 0, 0, 0],
prop_bytes: [0, 0],
clipping: [0, 0, 0, 0],
dys_option_bytes: [0, 0],
acts_like: 0,
extra_bytes: 0,
name: "".to_string(),
desc: "".to_string(),
name_set: None,
desc_set: None,
source_path: PathBuf::from(""),
}
}
pub fn needs_init(&self) -> bool {
match self.genus {
Genus::Std => true,
_ => false,
}
}
pub fn needs_drop(&self) -> bool {
match self.genus {
Genus::Std => self.dys_option_bytes[1] & 0x80 != 0,
_ => false,
}
}
pub fn placeable(&self) -> bool { self.genus.placeable() }
pub fn assemble(
&self,
rom: &mut RomBuf,
prelude: &str,
source: &Path,
temp: &Path,
iopts: ::insert_opts::InsertOpts
) -> InsertResult<InsertPoint> {
let (mut main, mut init, mut drop) = (0usize, 0usize, 0usize);
| {
let mut tempasm = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(temp)
.unwrap();
tempasm.write_all(prelude.as_bytes()).unwrap();
let mut source_buf = Vec::<u8>::with_capacity(8 * 1024); // A wild guess.
let mut srcf = warnless_result(
File::open(source),
|e| format!("error opening \"{}\": {}", source.to_string_lossy(), e)
)?;
srcf.read_to_end(&mut source_buf).unwrap();
tempasm.write_all(&source_buf).unwrap();
}
let warns = match asar::patch(temp, rom) {
Ok((_, mut ws)) => ws.drain(..).map(|w| w.into()).collect(),
Err((mut es, mut ws)) => {
return Err(
(es.drain(..).map(|e| e.into()).collect(),
ws.drain(..).map(|w| w.into()).collect())
)
},
};
for print in asar::prints() {
let mut chunks = print.split_whitespace();
let fst = chunks.next();
let snd = chunks.next();
match fst {
Some("MAIN") => match snd {
Some(ofs) => main = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"MAIN\" declaration"),
},
Some("INIT") => match snd {
Some(ofs) => init = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"INIT\" declaration"),
},
Some("DROP") => match snd {
Some(ofs) => drop = usize::from_str_radix(ofs, 16).unwrap(),
_ => return single_error("No offset after \"DROP\" declaration"),
},
None => (),
_ => return single_error("The sprite printed something other than MAIN, INIT, or DROP"),
}
};
if main == 0 {
return single_error("No main routine");
}
if init == 0 && self.needs_init() {
return single_error("No init routine");
}
if drop == 0 && self.needs_drop() {
return single_error("Drop routine required by dys_opts, but not provided");
}
if drop != 0 && !self.needs_drop() {
return single_error("Sprite has a drop routine, but dys_opts doesn't require one");
}
if self.needs_drop() && !iopts.use_drops {
return single_error("Sprite needs a drop routine, but drop routines aren't enabled");
}
Ok((InsertPoint { main, init, drop }, warns))
}
pub fn apply_cfg(&self, rom: &mut RomBuf, tables: &DysTables) {
match self.genus {
Genus::Std
| Genus::Gen
| Genus::Sht
| Genus::R1s => {
if self.id < 0x200 {
let size_ofs = if self.id < 0x100 {
self.id as usize
} else {
self.id as usize + 0x100
};
let size = self.extra_bytes + 3;
rom.set_byte(tables.sprite_sizes + size_ofs, size).unwrap();
rom.set_byte(tables.sprite_sizes + size_ofs + 0x100, size).unwrap();
let optbase = tables.option_bytes + (self.id as usize * 0x10);
rom.set_byte(optbase, self.genus.to_byte()).unwrap();
rom.set_byte(optbase + 1, self.acts_like).unwrap();
rom.set_bytes(optbase + 2, &self.tweak_bytes).unwrap();
rom.set_bytes(optbase + 8, &self.dys_option_bytes).unwrap();
rom.set_bytes(optbase + 14, &self.prop_bytes).unwrap();
rom.set_bytes(optbase + 10, &self.clipping).unwrap();
};
},
Genus::Cls => {},
_ => unimplemented!(),
};
}
pub fn apply_offsets(&self, rom: &mut RomBuf, tables: &DysTables, ip: InsertPoint) {
let ofs = self.id as usize * 3;
match self.genus {
g if g.placeable() => {
rom.set_long(tables.main_ptrs + ofs, ip.main as u32).unwrap();
rom.set_long(tables.init_ptrs + ofs, ip.init as u32).unwrap();
if tables.drop_ptrs != 0 {
rom.set_long(tables.drop_ptrs + ofs, ip.drop as u32).unwrap();
}
},
Genus::Cls => rom.set_long(tables.cls_ptrs + ofs, ip.main as u32).unwrap(),
_ => unimplemented!(),
};
}
pub fn name(&self, ebit: bool) -> &String {
if ebit && self.name_set.is_some() {
self.name_set.as_ref().unwrap()
} else {
&self.name
}
}
pub fn desc(&self, ebit: bool) -> &String {
if ebit && self.desc_set.is_some() {
self.desc_set.as_ref().unwrap()
} else {
&self.desc
}
}
pub fn uses_ebit(&self) -> bool { self.name_set.is_some() }
pub fn place_mw2(&self, target: &mut Vec<u8>, ebit: bool) {
if !self.placeable() { panic!("Attempted to place unplaceable sprite") };
let b0 = 0x89;
let b1 = 0x80;
let num_extra_bit: u8 = if self.id & 0x100 == 0 { 0 } else { 8 };
let ebit_val: u8 = if !ebit { 0 } else { 4 };
let b0 = b0 | num_extra_bit | ebit_val;
target.push(b0);
target.push(b1);
if self.id >= 0x200 {
target.push(0xf8 + self.extra_bytes);
}
target.push((self.id & 0xff) as u8);
for _ in 0 .. self.extra_bytes { target.push(0); };
}
pub fn dys_option_bytes(&self) -> &[u8] { &self.dys_option_bytes }
pub fn source_path(&self) -> &PathBuf { &self.source_path }
}
fn default_name(path: &Path, gen: Genus, id: u16) -> (String, String) {
let root = match path.file_stem() {
Some(s) => s.to_string_lossy().into_owned(),
None => format!("Custom {} #{:03x}", gen.shortname(), id),
};
(root.clone(), root + " (extra bit set)")
}
fn parse_newstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
let (mut got_name, mut got_desc): (Option<String>, Option<String>) = (None, None);
let mut cfg = SpriteCfg { genus: gen, id: id, .. SpriteCfg::new() };
let mut buf = buf;
while let IResult::Done(rest, (name, value)) = cfg_line(buf) {
buf = rest;
match name {
"acts-like" => cfg.acts_like = try!(read_byte(value)),
"source" => cfg.source_path = path.with_file_name(value),
"props" => try!(read_bytes(value, &mut cfg.tweak_bytes)),
"xbytes" => cfg.extra_bytes = try!(read_byte(value)),
"ext-props" => try!(read_bytes(value, &mut cfg.prop_bytes)),
"dys-opts" => try!(read_bytes(value, &mut cfg.dys_option_bytes)),
"ext-clip" => try!(read_bytes(value, &mut cfg.clipping)),
"name" => got_name = Some(String::from(value)),
"description" => got_desc = Some(String::from(value)),
"desc-set" => cfg.desc_set = Some(String::from(value)),
"name-set" => cfg.name_set = Some(String::from(value)),
"ext-prop-def" | "m16d" | "tilemap" => (),
_ => return Err(CfgErr { explain: format!("bad field name: \"{}\"", name) }),
};
};
if let Some(s) = got_name {
cfg.name = s;
} else {
let t = default_name(path, gen, id);
cfg.name = t.0;
cfg.name_set = Some(t.1);
};
if let Some(s) = got_desc {
cfg.desc = s;
} else {
cfg.desc = cfg.name.clone();
cfg.desc_set = cfg.name_set.clone();
};
if cfg.source_path.file_name() == None {
Err(CfgErr { explain: String::from("Sprite needs a source file") })
} else {
Ok(cfg)
}
}
fn parse_oldstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> {
let mut it = buf.split_whitespace().skip(1);
let mut d = [0u8; 9];
for output_byte in &mut d {
if let Some(s) = it.next() {
*output_byte = try!(read_byte(s));
} else {
return Err(CfgErr{ explain: String::from("Old-style CFG too short") });
}
};
let (name, name_set) = default_name(path, gen, id);
let (desc, desc_set) = (name.clone(), name_set.clone());
if let Some(s) = it.next() {
Ok(SpriteCfg {
genus: gen,
id: id,
acts_like: d[0],
tweak_bytes: [d[1], d[2], d[3], d[4], d[5], d[6]],
prop_bytes: [d[7], d[8]],
source_path: path.with_file_name(s),
name: name,
name_set: Some(name_set),
desc: desc,
desc_set: Some(desc_set),
.. SpriteCfg::new()
})
} else {
Err(CfgErr { explain: String::from("Old-style CFG too short") })
}
}
fn read_byte(s: &str) -> Result<u8, CfgErr> {
let iter = s.trim().chars();
let mut n = 0u32;
let mut read = false;
for ch in iter {
if let Some(v) = ch.to_digit(0x10) {
n *= 0x10;
n += v;
read = true;
} else {
return Err(CfgErr { explain: String::from("Non-byte data in byte field") })
}
}
if !read { Err(CfgErr { explain: String::from("Expected a byte, found nothing") }) } else { Ok(n as u8) }
}
fn read_bytes(s: &str, buf: &mut [u8]) -> Result<(), CfgErr> {
let mut bytes = Vec::<u8>::with_capacity(buf.len());
for b in s.split_whitespace() {
bytes.push(try!(read_byte(b)));
};
if bytes.len() != buf.len() {
Err(CfgErr { explain: format!("Wrong length byte sequence: expected {} bytes, got {}",
buf.len(), bytes.len()) })
} else {
for (i, b) in bytes.iter().enumerate() {
buf[i] = *b;
}
Ok(())
}
}
fn tag_ending_s(ch: char) -> bool { ch == ' ' || ch == ':' }
fn line_ending_s(ch: char) -> bool { ch == '\r' || ch == '\n' }
named!(cfg_line(&str) -> (&str, &str),
chain!(
multispace? ~
name: take_till_s!(tag_ending_s) ~
space? ~
tag_s!(":") ~
space? ~
valu: take_till_s!(line_ending_s) ~
multispace? ,
|| (name, valu)
)
); | random_line_split |
|
models.py | # encoding: utf-8
import datetime
from django.db import models
from django.contrib.auth.models import User
from django.db.models import signals
from django.core import validators
from django.core.exceptions import ValidationError
import time
from django.db.models import Q
from datetime import date
import model_audit
import helpers
from collections import namedtuple
from tinymce import models as tinymce_models
from ortoloco import settings
class Depot(models.Model):
"""
Location where stuff is picked up.
"""
code = models.CharField("Code", max_length=100, validators=[validators.validate_slug], unique=True)
name = models.CharField("Depot Name", max_length=100, unique=True)
contact = models.ForeignKey("Loco", on_delete=models.PROTECT)
weekday = models.PositiveIntegerField("Wochentag", choices=helpers.weekday_choices)
latitude = models.CharField("Latitude", max_length=100, default="")
longitude = models.CharField("Longitude", max_length=100, default="")
addr_street = models.CharField("Strasse", max_length=100)
addr_zipcode = models.CharField("PLZ", max_length=10)
addr_location = models.CharField("Ort", max_length=50)
def __unicode__(self):
return u"%s %s" % (self.id, self.name)
def active_abos(self):
return self.abo_set.filter(active=True)
def wochentag(self):
day = "Unbekannt"
if self.weekday < 8 and self.weekday > 0:
day = helpers.weekdays[self.weekday]
return day
def get_abo_by_size(self, abo_size):
return len(self.active_abos().filter(groesse=abo_size))
def get_abos_by_sizes(self):
result = {}
for abo_size in Abo.abo_types:
if abo_size is not Abo.SIZE_NONE:
result[abo_size] = len(self.active_abos().filter(groesse=abo_size))
print 'get_abos_by_size', self, result
return result
"""
def small_abos(self):
return len(self.active_abos().filter(Q(groesse=1) | Q(groesse=3)))
def big_abos(self):
return len(self.active_abos().filter(Q(groesse=2) | Q(groesse=3) | Q(groesse=4))) + len(self.active_abos().filter(groesse=4))
"""
class Meta:
verbose_name = "Depot"
verbose_name_plural = "Depots"
class ExtraAboType(models.Model):
"""
Types of extra abos, e.g. eggs, cheese, fruit
"""
name = models.CharField("Name", max_length=100, unique=True)
description = models.TextField("Beschreibung", max_length=1000)
def __unicode__(self):
return u"%s %s" % (self.id, self.name)
class Meta:
verbose_name = "Zusatz-Abo"
verbose_name_plural = "Zusatz-Abos"
class Abo(models.Model):
"""
One Abo that may be shared among several people.
"""
SIZE_NONE = 0
SIZE_HALF = 1
SIZE_SMALL = 2
SIZE_BIG = 4
SIZE_HOUSE = 10
# required_bohnen are per abo, not per person
AboTyp = namedtuple('AboTyp', ['size', 'name_short', 'name_long', 'description', 'min_anteilsscheine', 'visible', 'required_bohnen', 'cost']);
abo_types = {
SIZE_NONE: AboTyp( size=SIZE_NONE, name_short='Keins', name_long='Kein Abo',
min_anteilsscheine=1, visible=True, required_bohnen = 0,
cost = 0,
description=u"Du kannst auch ohne Gemüseabo "+settings.SITE_NAME+"-GenossenschafterIn sein. Bleibe auf dem Laufenden und mach mit, wenn du Lust hast"),
SIZE_HALF: AboTyp( size=SIZE_HALF, name_short='Halb', name_long='Halbes Abo',
min_anteilsscheine=1, visible=False, required_bohnen = 10,
cost = 550,
description=u"Halbe Abos können in Ausnahmefällen vergeben werden"),
SIZE_SMALL: AboTyp( size=SIZE_SMALL, name_short='Klein', name_long='Kleines Abo',
min_anteilsscheine=2, visible=True, required_bohnen = 20,
cost = 1100,
description=u"Das kleine Abo ist für 2-3 Personen geeignet und benötigt mindestens zwei Anteilscheine"),
SIZE_BIG: AboTyp( size=SIZE_BIG, name_short='Gross', name_long='Grosses Abo',
min_anteilsscheine=4, visible=True, required_bohnen = 40,
cost = 2200,
description=u"Das grosse Abo empfiehlt sich für WG's oder Familien (ca. 4-6 Personen) und benötigt vier Anteilscheine")
}
SIZE_CHOICES = ((k, v.name_short) for k, v in abo_types.iteritems())
number = models.CharField("Abo-Nummer", blank=True, max_length=6)
number.help_text = "Interne Abo-Nummer"
depot = models.ForeignKey(Depot, on_delete=models.PROTECT)
groesse = models.PositiveIntegerField(choices=SIZE_CHOICES,default=SIZE_SMALL)
extra_abos = models.ManyToManyField(ExtraAboType, null=True, blank=True)
extra_abos.help_text = "Zusatz-Abos existieren vorderhand nicht, dieses Feld bleibt leer."
primary_loco = models.ForeignKey("Loco", related_name="abo_primary", null=True, blank=True,
on_delete=models.PROTECT)
primary_loco.help_text = "Primärer Ansprechpartner dieses Abos"
paid = models.BooleanField(default=False, verbose_name="Bezahlt")
paid.help_text = "Dieses Abo wurde vollständig bezahlt."
active = models.BooleanField(default=False, verbose_name="Aktiv")
active.help_text = "An dieses Abo wird Gemüse geliefert"
def __unicode__(self):
if self.SIZE_HALF == self.groesse:
namelist = ["1/2 Einheit"]
elif self.SIZE_SMALL == self.groesse:
namelist = ["1 Einheit"]
else:
namelist = ["%i Einheiten" % int(self.groesse / float(self.SIZE_SMALL))]
namelist.extend(extra.name for extra in self.extra_abos.all())
return u"Abo (%s) #%s" % (" + ".join(namelist), self.number)
def bezieher(self):
locos = self.locos.all()
return ", ".join(unicode(loco) for loco in locos)
def andere_bezieher(self):
locos = self.bezieher_locos().exclude(email=self.primary_loco.email)
return ", ".join(unicode(loco) for loco in locos)
def bezieher_locos(self):
return self.locos.all()
def verantwortlicher_bezieher(self):
loco = self.primary_loco
return unicode(loco) if loco is not None else ""
def groesse_name(self):
return self.abo_types[self.groesse].name_long
def groesse_name_short(self):
return self.get_groesse_display()
class Meta:
verbose_name = "Abo"
verbose_name_plural = "Abos"
class Loco(models.Model):
"""
Additional fields for Django's default user class.
"""
# user class is only used for logins, permissions, and other builtin django stuff
# all user information should be stored in the Loco model
user = models.OneToOneField(User, related_name='loco', null=True, blank=True)
first_name = models.CharField("Vorname", max_length=30)
last_name = models.CharField("Nachname", max_length=30)
email = models.EmailField(unique=True)
SEX = [
("M", "Herr"),
("F", "Frau")
]
sex = models.CharField("Geschlecht", max_length=1, choices=SEX, default='F')
addr_street = models.CharField("Strasse", max_length=100)
addr_zipcode = models.CharField("PLZ", max_length=10)
addr_location = models.CharField("Ort", max_length=50)
birthday = models.DateField("Geburtsdatum", null=True, blank=True)
phone = models.CharField("Telefonnr", max_length=50)
mobile_phone = models.CharField("Mobile", max_length=50, null=True, blank=True)
abo = models.ForeignKey(Abo, related_name="locos", null=True, blank=True,
on_delete=models.SET_NULL)
abo.help_text = "Um dieses Mitglied einem Abo zuzuweisen oder die Abozuordnung zu ändern, bitte wie die Abo-Seite gehen."
confirmed = models.BooleanField("bestätigt", default=True)
confirmed.help_text = "Neu-Anmeldungen über die Webseite sind zuerst nicht bestätigt. Dieses Feld muss danach manuell gesetzt werden."
def get_salutation(self):
if self.sex is 'M':
return 'Herr'
else:
return 'Frau'
def get_full_salutation(self):
if self.sex is 'M':
return 'Lieber '+self.get_name()
else:
return 'Liebe '+self.get_name()
def get_taetigkeitsbereiche(self):
tbs = []
for tb in Taetigkeitsbereich.objects.all():
if tb.locos.all().filter(id=self.id).__len__() > 0:
tbs.append(tb)
return tbs
def __unicode__(self):
return self.get_name()
@classmethod
def create(cls, sender, instance, created, **kdws):
"""
Callback to create corresponding loco when new user is created.
"""
if created:
username = helpers.make_username(instance.first_name, instance.last_name, instance.email)
user = User(username=username)
user.save()
user = User.objects.get(username=username)
instance.user = user
instance.save()
@classmethod
def post_delete(cls, sender, instance, **kwds):
instance.user.delete()
class Meta:
verbose_name = "Mitglied"
verbose_name_plural = "Mitglieder"
def get_name(self):
return u"%s %s" % (self.first_name, self.last_name)
def get_phone(self):
if self.mobile_phone != "":
return self.mobile_phone
return self.phone
class Anteilschein(models.Model):
number = models.CharField("Anteilsschein-Nummer", blank=True, max_length=6)
number.help_text = "Interne Anteilsschein-Nummer"
loco = models.ForeignKey(Loco, null=True, blank=True, on_delete=models.SET_NULL)
loco.help_text = "Eigner des Anteilsscheins"
paid = models.BooleanField(default=False, verbose_name="Bezahlt")
paid.help_text = "Dieser Anteilsschein wurde vollständig bezahlt."
canceled = models.BooleanField(default=False, verbose_name="Gekündigt")
def __unicode__(self):
return u"Anteilschein #%s" % (self.number)
class Meta:
verbose_name = "Anteilschein"
ver | :
name = models.CharField("Name", max_length=100, unique=True)
description = models.TextField("Beschreibung", max_length=1000, default="")
core = models.BooleanField("Kernbereich", default=False)
hidden = models.BooleanField("versteckt", default=False)
coordinator = models.ForeignKey(Loco, on_delete=models.PROTECT)
locos = models.ManyToManyField(Loco, related_name="areas", blank=True, null=True)
def __unicode__(self):
return u'%s' % self.name
class Meta:
verbose_name = 'Tätigkeitsbereich'
verbose_name_plural = 'Tätigkeitsbereiche'
class JobTyp(models.Model):
"""
Recurring type of job.
"""
name = models.CharField("Name", max_length=100, unique=True)
displayed_name = models.CharField("Angezeigter Name", max_length=100, blank=True, null=True)
description = tinymce_models.HTMLField("Beschreibung", max_length=1000, default="")
bereich = models.ForeignKey(Taetigkeitsbereich, on_delete=models.PROTECT)
duration = models.PositiveIntegerField("Dauer in Stunden")
duration.help_text = "Diese Dauer wird bei allen Einsätzen dieses Typs verwendet werden."
location = models.CharField("Ort", max_length=100, default="")
location.help_text = "Dieser Ort wird bei allen Einsätzen dieses Typs angezeigt werden."
car_needed = models.BooleanField("Auto benötigt", default=False)
def __unicode__(self):
return u'%s - %s (%s h)' % (self.bereich, self.name, str(self.duration))
def get_name(self):
if self.displayed_name is not None:
return self.displayed_name
return self.name
class Meta:
verbose_name = 'Jobart'
verbose_name_plural = 'Jobarten'
class Job(models.Model):
typ = models.ForeignKey(JobTyp, on_delete=models.PROTECT)
typ.help_text = "Bei einmaligen Einsätzen bitte einen neuen Typ erstellen (auf das Plus klicken) und dort im Titel das Datum angeben"
# A job can now count for more (or less) than 1 boehnli / ruebli
multiplier = models.PositiveIntegerField("Anzahl Rüebli", default=1)
multiplier.help_text = "Anzahl Rüebli, die jede Person für ihre Teilnahme an diesem Einsatz erhält."
# Slots are in number of people, indipendent of multiplier
slots = models.PositiveIntegerField("Plaetze")
slots.help_text = "Anzahl Personen, die sich für diesen Einsatz eintragen können."
time = models.DateTimeField("Anfangszeit")
time.help_text = "Die Endzeit wird berechnet aus Anfangszeit plus Dauer (übernommen vom Typ)"
reminder_sent = models.BooleanField("Reminder verschickt", default=False)
reminder_sent.help_text = "Wenn gesetzt, wurde die automatische Erinnerung 24h vor dem Einsatz bereits versandt. Sollte nur in Ausnahmefällen von Hand geändert werden müssen."
def __unicode__(self):
return u'Job #%s (%s)' % (self.id, self.typ.name)
def wochentag(self):
weekday = helpers.weekdays[self.time.isoweekday()]
return weekday
def time_stamp(self):
return int(time.mktime(self.time.timetuple()) * 1000)
def freie_plaetze(self):
return self.slots - self.besetzte_plaetze()
def end_time(self):
return self.time + datetime.timedelta(hours=self.typ.duration)
def besetzte_plaetze(self):
return self.boehnli_set.count()
def needs_car(self):
return self.typ.car_needed
def status_class(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
if participants >= self.slots:
return 'full'
to_be_filled = self.slots - participants
time_left = self.time - date.today()
days_left = time_left.days
if days_left <= to_be_filled:
return 'urgent'
return ''
def get_car_status(self):
text = self.get_car_status_text()
needed = self.typ.car_needed
if needed:
available = Boehnli.objects.filter(job_id=self.id,with_car=True)
if available.count():
return '<img src="/static/img/auto_green.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
else:
return '<img src="/static/img/auto_red.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
else:
return '<img src="/static/img/auto_grey.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
def get_car_status_text(self):
needed = self.typ.car_needed
if needed:
available = Boehnli.objects.filter(job_id=self.id,with_car=True)
if available.count():
return 'Ein Auto ist bereits verfügbar'
else:
return 'Ein Auto wird noch benötigt'
else:
return 'Kein Auto benötigt'
def get_status_bohne_bar(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
status = self.get_status_bohne_text()
result = ''
for i in range(self.slots):
if participants > i:
result += '<img title="{status}" src="/static/img/erbse_voll.png"/>'.format(status=status)
else:
result += '<img title="{status}" src="/static/img/erbse_leer.png"/>'.format(status=status)
return result
def get_status_bohne_text(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
return "%d von %d gebucht" % (participants, self.slots)
def get_status_bohne(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
pctfull = participants * 100 / self.slots
if pctfull >= 100:
return "erbse_voll.png"
elif pctfull >= 75:
return "erbse_fast_voll.png"
elif pctfull >= 50:
return "erbse_halb.png"
else:
return "erbse_fast_leer.png"
class Meta:
verbose_name = 'Job'
verbose_name_plural = 'Jobs'
class JobComment(models.Model):
job = models.ForeignKey(Job, related_name="comments")
loco = models.ForeignKey(Loco, related_name="comments")
time = models.DateTimeField("Erstellungs-Zeitpunkt", auto_now_add=True)
text = models.TextField("Kommentar")
class Meta:
verbose_name = 'Job-Kommentar'
verbose_name_plural = 'Job-Kommentare'
class Boehnli(models.Model):
"""
Single boehnli (work unit).
"""
job = models.ForeignKey(Job, on_delete=models.CASCADE)
loco = models.ForeignKey(Loco, on_delete=models.PROTECT)
with_car = models.BooleanField("Auto verfügbar", default=False)
def __unicode__(self):
return u'Einsatz #%s' % self.id
def zeit(self):
return self.job.time
class Meta:
verbose_name = 'Einsatz'
verbose_name_plural = 'Einsätze'
#model_audit.m2m(Abo.users)
model_audit.m2m(Abo.extra_abos)
model_audit.fk(Abo.depot)
model_audit.fk(Anteilschein.loco)
signals.post_save.connect(Loco.create, sender=Loco)
signals.post_delete.connect(Loco.post_delete, sender=Loco)
| bose_name_plural = "Anteilscheine"
class Taetigkeitsbereich(models.Model) | identifier_body |
models.py | # encoding: utf-8
import datetime
from django.db import models
from django.contrib.auth.models import User
from django.db.models import signals
from django.core import validators
from django.core.exceptions import ValidationError
import time
from django.db.models import Q
from datetime import date
import model_audit
import helpers
from collections import namedtuple
from tinymce import models as tinymce_models
from ortoloco import settings
class Depot(models.Model):
"""
Location where stuff is picked up.
"""
code = models.CharField("Code", max_length=100, validators=[validators.validate_slug], unique=True)
name = models.CharField("Depot Name", max_length=100, unique=True)
contact = models.ForeignKey("Loco", on_delete=models.PROTECT)
weekday = models.PositiveIntegerField("Wochentag", choices=helpers.weekday_choices)
latitude = models.CharField("Latitude", max_length=100, default="")
longitude = models.CharField("Longitude", max_length=100, default="")
addr_street = models.CharField("Strasse", max_length=100)
addr_zipcode = models.CharField("PLZ", max_length=10)
addr_location = models.CharField("Ort", max_length=50)
def __unicode__(self):
return u"%s %s" % (self.id, self.name)
def active_abos(self):
return self.abo_set.filter(active=True)
def wochentag(self):
day = "Unbekannt"
if self.weekday < 8 and self.weekday > 0:
day = helpers.weekdays[self.weekday]
return day
def get_abo_by_size(self, abo_size):
return len(self.active_abos().filter(groesse=abo_size))
def get_abos_by_sizes(self):
result = {}
for abo_size in Abo.abo_types:
|
print 'get_abos_by_size', self, result
return result
"""
def small_abos(self):
return len(self.active_abos().filter(Q(groesse=1) | Q(groesse=3)))
def big_abos(self):
return len(self.active_abos().filter(Q(groesse=2) | Q(groesse=3) | Q(groesse=4))) + len(self.active_abos().filter(groesse=4))
"""
class Meta:
verbose_name = "Depot"
verbose_name_plural = "Depots"
class ExtraAboType(models.Model):
"""
Types of extra abos, e.g. eggs, cheese, fruit
"""
name = models.CharField("Name", max_length=100, unique=True)
description = models.TextField("Beschreibung", max_length=1000)
def __unicode__(self):
return u"%s %s" % (self.id, self.name)
class Meta:
verbose_name = "Zusatz-Abo"
verbose_name_plural = "Zusatz-Abos"
class Abo(models.Model):
"""
One Abo that may be shared among several people.
"""
SIZE_NONE = 0
SIZE_HALF = 1
SIZE_SMALL = 2
SIZE_BIG = 4
SIZE_HOUSE = 10
# required_bohnen are per abo, not per person
AboTyp = namedtuple('AboTyp', ['size', 'name_short', 'name_long', 'description', 'min_anteilsscheine', 'visible', 'required_bohnen', 'cost']);
abo_types = {
SIZE_NONE: AboTyp( size=SIZE_NONE, name_short='Keins', name_long='Kein Abo',
min_anteilsscheine=1, visible=True, required_bohnen = 0,
cost = 0,
description=u"Du kannst auch ohne Gemüseabo "+settings.SITE_NAME+"-GenossenschafterIn sein. Bleibe auf dem Laufenden und mach mit, wenn du Lust hast"),
SIZE_HALF: AboTyp( size=SIZE_HALF, name_short='Halb', name_long='Halbes Abo',
min_anteilsscheine=1, visible=False, required_bohnen = 10,
cost = 550,
description=u"Halbe Abos können in Ausnahmefällen vergeben werden"),
SIZE_SMALL: AboTyp( size=SIZE_SMALL, name_short='Klein', name_long='Kleines Abo',
min_anteilsscheine=2, visible=True, required_bohnen = 20,
cost = 1100,
description=u"Das kleine Abo ist für 2-3 Personen geeignet und benötigt mindestens zwei Anteilscheine"),
SIZE_BIG: AboTyp( size=SIZE_BIG, name_short='Gross', name_long='Grosses Abo',
min_anteilsscheine=4, visible=True, required_bohnen = 40,
cost = 2200,
description=u"Das grosse Abo empfiehlt sich für WG's oder Familien (ca. 4-6 Personen) und benötigt vier Anteilscheine")
}
SIZE_CHOICES = ((k, v.name_short) for k, v in abo_types.iteritems())
number = models.CharField("Abo-Nummer", blank=True, max_length=6)
number.help_text = "Interne Abo-Nummer"
depot = models.ForeignKey(Depot, on_delete=models.PROTECT)
groesse = models.PositiveIntegerField(choices=SIZE_CHOICES,default=SIZE_SMALL)
extra_abos = models.ManyToManyField(ExtraAboType, null=True, blank=True)
extra_abos.help_text = "Zusatz-Abos existieren vorderhand nicht, dieses Feld bleibt leer."
primary_loco = models.ForeignKey("Loco", related_name="abo_primary", null=True, blank=True,
on_delete=models.PROTECT)
primary_loco.help_text = "Primärer Ansprechpartner dieses Abos"
paid = models.BooleanField(default=False, verbose_name="Bezahlt")
paid.help_text = "Dieses Abo wurde vollständig bezahlt."
active = models.BooleanField(default=False, verbose_name="Aktiv")
active.help_text = "An dieses Abo wird Gemüse geliefert"
def __unicode__(self):
if self.SIZE_HALF == self.groesse:
namelist = ["1/2 Einheit"]
elif self.SIZE_SMALL == self.groesse:
namelist = ["1 Einheit"]
else:
namelist = ["%i Einheiten" % int(self.groesse / float(self.SIZE_SMALL))]
namelist.extend(extra.name for extra in self.extra_abos.all())
return u"Abo (%s) #%s" % (" + ".join(namelist), self.number)
def bezieher(self):
locos = self.locos.all()
return ", ".join(unicode(loco) for loco in locos)
def andere_bezieher(self):
locos = self.bezieher_locos().exclude(email=self.primary_loco.email)
return ", ".join(unicode(loco) for loco in locos)
def bezieher_locos(self):
return self.locos.all()
def verantwortlicher_bezieher(self):
loco = self.primary_loco
return unicode(loco) if loco is not None else ""
def groesse_name(self):
return self.abo_types[self.groesse].name_long
def groesse_name_short(self):
return self.get_groesse_display()
class Meta:
verbose_name = "Abo"
verbose_name_plural = "Abos"
class Loco(models.Model):
"""
Additional fields for Django's default user class.
"""
# user class is only used for logins, permissions, and other builtin django stuff
# all user information should be stored in the Loco model
user = models.OneToOneField(User, related_name='loco', null=True, blank=True)
first_name = models.CharField("Vorname", max_length=30)
last_name = models.CharField("Nachname", max_length=30)
email = models.EmailField(unique=True)
SEX = [
("M", "Herr"),
("F", "Frau")
]
sex = models.CharField("Geschlecht", max_length=1, choices=SEX, default='F')
addr_street = models.CharField("Strasse", max_length=100)
addr_zipcode = models.CharField("PLZ", max_length=10)
addr_location = models.CharField("Ort", max_length=50)
birthday = models.DateField("Geburtsdatum", null=True, blank=True)
phone = models.CharField("Telefonnr", max_length=50)
mobile_phone = models.CharField("Mobile", max_length=50, null=True, blank=True)
abo = models.ForeignKey(Abo, related_name="locos", null=True, blank=True,
on_delete=models.SET_NULL)
abo.help_text = "Um dieses Mitglied einem Abo zuzuweisen oder die Abozuordnung zu ändern, bitte wie die Abo-Seite gehen."
confirmed = models.BooleanField("bestätigt", default=True)
confirmed.help_text = "Neu-Anmeldungen über die Webseite sind zuerst nicht bestätigt. Dieses Feld muss danach manuell gesetzt werden."
def get_salutation(self):
if self.sex is 'M':
return 'Herr'
else:
return 'Frau'
def get_full_salutation(self):
if self.sex is 'M':
return 'Lieber '+self.get_name()
else:
return 'Liebe '+self.get_name()
def get_taetigkeitsbereiche(self):
tbs = []
for tb in Taetigkeitsbereich.objects.all():
if tb.locos.all().filter(id=self.id).__len__() > 0:
tbs.append(tb)
return tbs
def __unicode__(self):
return self.get_name()
@classmethod
def create(cls, sender, instance, created, **kdws):
"""
Callback to create corresponding loco when new user is created.
"""
if created:
username = helpers.make_username(instance.first_name, instance.last_name, instance.email)
user = User(username=username)
user.save()
user = User.objects.get(username=username)
instance.user = user
instance.save()
@classmethod
def post_delete(cls, sender, instance, **kwds):
instance.user.delete()
class Meta:
verbose_name = "Mitglied"
verbose_name_plural = "Mitglieder"
def get_name(self):
return u"%s %s" % (self.first_name, self.last_name)
def get_phone(self):
if self.mobile_phone != "":
return self.mobile_phone
return self.phone
class Anteilschein(models.Model):
number = models.CharField("Anteilsschein-Nummer", blank=True, max_length=6)
number.help_text = "Interne Anteilsschein-Nummer"
loco = models.ForeignKey(Loco, null=True, blank=True, on_delete=models.SET_NULL)
loco.help_text = "Eigner des Anteilsscheins"
paid = models.BooleanField(default=False, verbose_name="Bezahlt")
paid.help_text = "Dieser Anteilsschein wurde vollständig bezahlt."
canceled = models.BooleanField(default=False, verbose_name="Gekündigt")
def __unicode__(self):
return u"Anteilschein #%s" % (self.number)
class Meta:
verbose_name = "Anteilschein"
verbose_name_plural = "Anteilscheine"
class Taetigkeitsbereich(models.Model):
name = models.CharField("Name", max_length=100, unique=True)
description = models.TextField("Beschreibung", max_length=1000, default="")
core = models.BooleanField("Kernbereich", default=False)
hidden = models.BooleanField("versteckt", default=False)
coordinator = models.ForeignKey(Loco, on_delete=models.PROTECT)
locos = models.ManyToManyField(Loco, related_name="areas", blank=True, null=True)
def __unicode__(self):
return u'%s' % self.name
class Meta:
verbose_name = 'Tätigkeitsbereich'
verbose_name_plural = 'Tätigkeitsbereiche'
class JobTyp(models.Model):
"""
Recurring type of job.
"""
name = models.CharField("Name", max_length=100, unique=True)
displayed_name = models.CharField("Angezeigter Name", max_length=100, blank=True, null=True)
description = tinymce_models.HTMLField("Beschreibung", max_length=1000, default="")
bereich = models.ForeignKey(Taetigkeitsbereich, on_delete=models.PROTECT)
duration = models.PositiveIntegerField("Dauer in Stunden")
duration.help_text = "Diese Dauer wird bei allen Einsätzen dieses Typs verwendet werden."
location = models.CharField("Ort", max_length=100, default="")
location.help_text = "Dieser Ort wird bei allen Einsätzen dieses Typs angezeigt werden."
car_needed = models.BooleanField("Auto benötigt", default=False)
def __unicode__(self):
return u'%s - %s (%s h)' % (self.bereich, self.name, str(self.duration))
def get_name(self):
if self.displayed_name is not None:
return self.displayed_name
return self.name
class Meta:
verbose_name = 'Jobart'
verbose_name_plural = 'Jobarten'
class Job(models.Model):
typ = models.ForeignKey(JobTyp, on_delete=models.PROTECT)
typ.help_text = "Bei einmaligen Einsätzen bitte einen neuen Typ erstellen (auf das Plus klicken) und dort im Titel das Datum angeben"
# A job can now count for more (or less) than 1 boehnli / ruebli
multiplier = models.PositiveIntegerField("Anzahl Rüebli", default=1)
multiplier.help_text = "Anzahl Rüebli, die jede Person für ihre Teilnahme an diesem Einsatz erhält."
# Slots are in number of people, indipendent of multiplier
slots = models.PositiveIntegerField("Plaetze")
slots.help_text = "Anzahl Personen, die sich für diesen Einsatz eintragen können."
time = models.DateTimeField("Anfangszeit")
time.help_text = "Die Endzeit wird berechnet aus Anfangszeit plus Dauer (übernommen vom Typ)"
reminder_sent = models.BooleanField("Reminder verschickt", default=False)
reminder_sent.help_text = "Wenn gesetzt, wurde die automatische Erinnerung 24h vor dem Einsatz bereits versandt. Sollte nur in Ausnahmefällen von Hand geändert werden müssen."
def __unicode__(self):
return u'Job #%s (%s)' % (self.id, self.typ.name)
def wochentag(self):
weekday = helpers.weekdays[self.time.isoweekday()]
return weekday
def time_stamp(self):
return int(time.mktime(self.time.timetuple()) * 1000)
def freie_plaetze(self):
return self.slots - self.besetzte_plaetze()
def end_time(self):
return self.time + datetime.timedelta(hours=self.typ.duration)
def besetzte_plaetze(self):
return self.boehnli_set.count()
def needs_car(self):
return self.typ.car_needed
def status_class(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
if participants >= self.slots:
return 'full'
to_be_filled = self.slots - participants
time_left = self.time - date.today()
days_left = time_left.days
if days_left <= to_be_filled:
return 'urgent'
return ''
def get_car_status(self):
text = self.get_car_status_text()
needed = self.typ.car_needed
if needed:
available = Boehnli.objects.filter(job_id=self.id,with_car=True)
if available.count():
return '<img src="/static/img/auto_green.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
else:
return '<img src="/static/img/auto_red.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
else:
return '<img src="/static/img/auto_grey.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
def get_car_status_text(self):
needed = self.typ.car_needed
if needed:
available = Boehnli.objects.filter(job_id=self.id,with_car=True)
if available.count():
return 'Ein Auto ist bereits verfügbar'
else:
return 'Ein Auto wird noch benötigt'
else:
return 'Kein Auto benötigt'
def get_status_bohne_bar(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
status = self.get_status_bohne_text()
result = ''
for i in range(self.slots):
if participants > i:
result += '<img title="{status}" src="/static/img/erbse_voll.png"/>'.format(status=status)
else:
result += '<img title="{status}" src="/static/img/erbse_leer.png"/>'.format(status=status)
return result
def get_status_bohne_text(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
return "%d von %d gebucht" % (participants, self.slots)
def get_status_bohne(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
pctfull = participants * 100 / self.slots
if pctfull >= 100:
return "erbse_voll.png"
elif pctfull >= 75:
return "erbse_fast_voll.png"
elif pctfull >= 50:
return "erbse_halb.png"
else:
return "erbse_fast_leer.png"
class Meta:
verbose_name = 'Job'
verbose_name_plural = 'Jobs'
class JobComment(models.Model):
job = models.ForeignKey(Job, related_name="comments")
loco = models.ForeignKey(Loco, related_name="comments")
time = models.DateTimeField("Erstellungs-Zeitpunkt", auto_now_add=True)
text = models.TextField("Kommentar")
class Meta:
verbose_name = 'Job-Kommentar'
verbose_name_plural = 'Job-Kommentare'
class Boehnli(models.Model):
"""
Single boehnli (work unit).
"""
job = models.ForeignKey(Job, on_delete=models.CASCADE)
loco = models.ForeignKey(Loco, on_delete=models.PROTECT)
with_car = models.BooleanField("Auto verfügbar", default=False)
def __unicode__(self):
return u'Einsatz #%s' % self.id
def zeit(self):
return self.job.time
class Meta:
verbose_name = 'Einsatz'
verbose_name_plural = 'Einsätze'
#model_audit.m2m(Abo.users)
model_audit.m2m(Abo.extra_abos)
model_audit.fk(Abo.depot)
model_audit.fk(Anteilschein.loco)
signals.post_save.connect(Loco.create, sender=Loco)
signals.post_delete.connect(Loco.post_delete, sender=Loco)
| if abo_size is not Abo.SIZE_NONE:
result[abo_size] = len(self.active_abos().filter(groesse=abo_size)) | conditional_block |
models.py | # encoding: utf-8
import datetime
from django.db import models
from django.contrib.auth.models import User
from django.db.models import signals
from django.core import validators
from django.core.exceptions import ValidationError
import time
from django.db.models import Q
from datetime import date
import model_audit
import helpers
from collections import namedtuple
from tinymce import models as tinymce_models
from ortoloco import settings
class Depot(models.Model):
"""
Location where stuff is picked up.
"""
code = models.CharField("Code", max_length=100, validators=[validators.validate_slug], unique=True)
name = models.CharField("Depot Name", max_length=100, unique=True)
contact = models.ForeignKey("Loco", on_delete=models.PROTECT)
weekday = models.PositiveIntegerField("Wochentag", choices=helpers.weekday_choices)
latitude = models.CharField("Latitude", max_length=100, default="")
longitude = models.CharField("Longitude", max_length=100, default="")
addr_street = models.CharField("Strasse", max_length=100)
addr_zipcode = models.CharField("PLZ", max_length=10)
addr_location = models.CharField("Ort", max_length=50)
def __unicode__(self):
return u"%s %s" % (self.id, self.name)
def active_abos(self):
return self.abo_set.filter(active=True)
def wochentag(self):
day = "Unbekannt"
if self.weekday < 8 and self.weekday > 0:
day = helpers.weekdays[self.weekday]
return day
def get_abo_by_size(self, abo_size):
return len(self.active_abos().filter(groesse=abo_size))
def get_abos_by_sizes(self):
result = {}
for abo_size in Abo.abo_types:
if abo_size is not Abo.SIZE_NONE:
result[abo_size] = len(self.active_abos().filter(groesse=abo_size))
print 'get_abos_by_size', self, result
return result
"""
def small_abos(self):
return len(self.active_abos().filter(Q(groesse=1) | Q(groesse=3)))
def big_abos(self):
return len(self.active_abos().filter(Q(groesse=2) | Q(groesse=3) | Q(groesse=4))) + len(self.active_abos().filter(groesse=4))
"""
class Meta:
verbose_name = "Depot"
verbose_name_plural = "Depots"
class ExtraAboType(models.Model):
"""
Types of extra abos, e.g. eggs, cheese, fruit
"""
name = models.CharField("Name", max_length=100, unique=True)
description = models.TextField("Beschreibung", max_length=1000)
def __unicode__(self):
return u"%s %s" % (self.id, self.name)
class Meta:
verbose_name = "Zusatz-Abo"
verbose_name_plural = "Zusatz-Abos"
class Abo(models.Model):
"""
One Abo that may be shared among several people.
"""
SIZE_NONE = 0
SIZE_HALF = 1
SIZE_SMALL = 2
SIZE_BIG = 4
SIZE_HOUSE = 10
# required_bohnen are per abo, not per person
AboTyp = namedtuple('AboTyp', ['size', 'name_short', 'name_long', 'description', 'min_anteilsscheine', 'visible', 'required_bohnen', 'cost']);
abo_types = {
SIZE_NONE: AboTyp( size=SIZE_NONE, name_short='Keins', name_long='Kein Abo',
min_anteilsscheine=1, visible=True, required_bohnen = 0,
cost = 0,
description=u"Du kannst auch ohne Gemüseabo "+settings.SITE_NAME+"-GenossenschafterIn sein. Bleibe auf dem Laufenden und mach mit, wenn du Lust hast"),
SIZE_HALF: AboTyp( size=SIZE_HALF, name_short='Halb', name_long='Halbes Abo',
min_anteilsscheine=1, visible=False, required_bohnen = 10,
cost = 550,
description=u"Halbe Abos können in Ausnahmefällen vergeben werden"),
SIZE_SMALL: AboTyp( size=SIZE_SMALL, name_short='Klein', name_long='Kleines Abo',
min_anteilsscheine=2, visible=True, required_bohnen = 20,
cost = 1100,
description=u"Das kleine Abo ist für 2-3 Personen geeignet und benötigt mindestens zwei Anteilscheine"),
SIZE_BIG: AboTyp( size=SIZE_BIG, name_short='Gross', name_long='Grosses Abo',
min_anteilsscheine=4, visible=True, required_bohnen = 40,
cost = 2200,
description=u"Das grosse Abo empfiehlt sich für WG's oder Familien (ca. 4-6 Personen) und benötigt vier Anteilscheine")
}
SIZE_CHOICES = ((k, v.name_short) for k, v in abo_types.iteritems())
number = models.CharField("Abo-Nummer", blank=True, max_length=6)
number.help_text = "Interne Abo-Nummer"
depot = models.ForeignKey(Depot, on_delete=models.PROTECT)
groesse = models.PositiveIntegerField(choices=SIZE_CHOICES,default=SIZE_SMALL)
extra_abos = models.ManyToManyField(ExtraAboType, null=True, blank=True)
extra_abos.help_text = "Zusatz-Abos existieren vorderhand nicht, dieses Feld bleibt leer."
primary_loco = models.ForeignKey("Loco", related_name="abo_primary", null=True, blank=True,
on_delete=models.PROTECT)
primary_loco.help_text = "Primärer Ansprechpartner dieses Abos"
paid = models.BooleanField(default=False, verbose_name="Bezahlt")
paid.help_text = "Dieses Abo wurde vollständig bezahlt."
active = models.BooleanField(default=False, verbose_name="Aktiv")
active.help_text = "An dieses Abo wird Gemüse geliefert"
def __unicode__(self):
if self.SIZE_HALF == self.groesse:
namelist = ["1/2 Einheit"]
elif self.SIZE_SMALL == self.groesse:
namelist = ["1 Einheit"]
else:
namelist = ["%i Einheiten" % int(self.groesse / float(self.SIZE_SMALL))]
namelist.extend(extra.name for extra in self.extra_abos.all())
return u"Abo (%s) #%s" % (" + ".join(namelist), self.number)
def bezieher(self):
locos = self.locos.all()
return ", ".join(unicode(loco) for loco in locos)
def andere_bezieher(self):
locos = self.bezieher_locos().exclude(email=self.primary_loco.email)
return ", ".join(unicode(loco) for loco in locos)
def bezieher_locos(self):
return self.locos.all()
def verantwortlicher_bezieher(self):
loco = self.primary_loco
return unicode(loco) if loco is not None else ""
def groesse_name(self):
return self.abo_types[self.groesse].name_long
def groesse_name_short(self):
return self.get_groesse_display()
class Meta:
verbose_name = "Abo"
verbose_name_plural = "Abos"
class Loco(models.Model):
"""
Additional fields for Django's default user class.
"""
| # user class is only used for logins, permissions, and other builtin django stuff
# all user information should be stored in the Loco model
user = models.OneToOneField(User, related_name='loco', null=True, blank=True)
first_name = models.CharField("Vorname", max_length=30)
last_name = models.CharField("Nachname", max_length=30)
email = models.EmailField(unique=True)
SEX = [
("M", "Herr"),
("F", "Frau")
]
sex = models.CharField("Geschlecht", max_length=1, choices=SEX, default='F')
addr_street = models.CharField("Strasse", max_length=100)
addr_zipcode = models.CharField("PLZ", max_length=10)
addr_location = models.CharField("Ort", max_length=50)
birthday = models.DateField("Geburtsdatum", null=True, blank=True)
phone = models.CharField("Telefonnr", max_length=50)
mobile_phone = models.CharField("Mobile", max_length=50, null=True, blank=True)
abo = models.ForeignKey(Abo, related_name="locos", null=True, blank=True,
on_delete=models.SET_NULL)
abo.help_text = "Um dieses Mitglied einem Abo zuzuweisen oder die Abozuordnung zu ändern, bitte wie die Abo-Seite gehen."
confirmed = models.BooleanField("bestätigt", default=True)
confirmed.help_text = "Neu-Anmeldungen über die Webseite sind zuerst nicht bestätigt. Dieses Feld muss danach manuell gesetzt werden."
def get_salutation(self):
if self.sex is 'M':
return 'Herr'
else:
return 'Frau'
def get_full_salutation(self):
if self.sex is 'M':
return 'Lieber '+self.get_name()
else:
return 'Liebe '+self.get_name()
def get_taetigkeitsbereiche(self):
tbs = []
for tb in Taetigkeitsbereich.objects.all():
if tb.locos.all().filter(id=self.id).__len__() > 0:
tbs.append(tb)
return tbs
def __unicode__(self):
return self.get_name()
@classmethod
def create(cls, sender, instance, created, **kdws):
"""
Callback to create corresponding loco when new user is created.
"""
if created:
username = helpers.make_username(instance.first_name, instance.last_name, instance.email)
user = User(username=username)
user.save()
user = User.objects.get(username=username)
instance.user = user
instance.save()
@classmethod
def post_delete(cls, sender, instance, **kwds):
instance.user.delete()
class Meta:
verbose_name = "Mitglied"
verbose_name_plural = "Mitglieder"
def get_name(self):
return u"%s %s" % (self.first_name, self.last_name)
def get_phone(self):
if self.mobile_phone != "":
return self.mobile_phone
return self.phone
class Anteilschein(models.Model):
number = models.CharField("Anteilsschein-Nummer", blank=True, max_length=6)
number.help_text = "Interne Anteilsschein-Nummer"
loco = models.ForeignKey(Loco, null=True, blank=True, on_delete=models.SET_NULL)
loco.help_text = "Eigner des Anteilsscheins"
paid = models.BooleanField(default=False, verbose_name="Bezahlt")
paid.help_text = "Dieser Anteilsschein wurde vollständig bezahlt."
canceled = models.BooleanField(default=False, verbose_name="Gekündigt")
def __unicode__(self):
return u"Anteilschein #%s" % (self.number)
class Meta:
verbose_name = "Anteilschein"
verbose_name_plural = "Anteilscheine"
class Taetigkeitsbereich(models.Model):
name = models.CharField("Name", max_length=100, unique=True)
description = models.TextField("Beschreibung", max_length=1000, default="")
core = models.BooleanField("Kernbereich", default=False)
hidden = models.BooleanField("versteckt", default=False)
coordinator = models.ForeignKey(Loco, on_delete=models.PROTECT)
locos = models.ManyToManyField(Loco, related_name="areas", blank=True, null=True)
def __unicode__(self):
return u'%s' % self.name
class Meta:
verbose_name = 'Tätigkeitsbereich'
verbose_name_plural = 'Tätigkeitsbereiche'
class JobTyp(models.Model):
"""
Recurring type of job.
"""
name = models.CharField("Name", max_length=100, unique=True)
displayed_name = models.CharField("Angezeigter Name", max_length=100, blank=True, null=True)
description = tinymce_models.HTMLField("Beschreibung", max_length=1000, default="")
bereich = models.ForeignKey(Taetigkeitsbereich, on_delete=models.PROTECT)
duration = models.PositiveIntegerField("Dauer in Stunden")
duration.help_text = "Diese Dauer wird bei allen Einsätzen dieses Typs verwendet werden."
location = models.CharField("Ort", max_length=100, default="")
location.help_text = "Dieser Ort wird bei allen Einsätzen dieses Typs angezeigt werden."
car_needed = models.BooleanField("Auto benötigt", default=False)
def __unicode__(self):
return u'%s - %s (%s h)' % (self.bereich, self.name, str(self.duration))
def get_name(self):
if self.displayed_name is not None:
return self.displayed_name
return self.name
class Meta:
verbose_name = 'Jobart'
verbose_name_plural = 'Jobarten'
class Job(models.Model):
typ = models.ForeignKey(JobTyp, on_delete=models.PROTECT)
typ.help_text = "Bei einmaligen Einsätzen bitte einen neuen Typ erstellen (auf das Plus klicken) und dort im Titel das Datum angeben"
# A job can now count for more (or less) than 1 boehnli / ruebli
multiplier = models.PositiveIntegerField("Anzahl Rüebli", default=1)
multiplier.help_text = "Anzahl Rüebli, die jede Person für ihre Teilnahme an diesem Einsatz erhält."
# Slots are in number of people, indipendent of multiplier
slots = models.PositiveIntegerField("Plaetze")
slots.help_text = "Anzahl Personen, die sich für diesen Einsatz eintragen können."
time = models.DateTimeField("Anfangszeit")
time.help_text = "Die Endzeit wird berechnet aus Anfangszeit plus Dauer (übernommen vom Typ)"
reminder_sent = models.BooleanField("Reminder verschickt", default=False)
reminder_sent.help_text = "Wenn gesetzt, wurde die automatische Erinnerung 24h vor dem Einsatz bereits versandt. Sollte nur in Ausnahmefällen von Hand geändert werden müssen."
def __unicode__(self):
return u'Job #%s (%s)' % (self.id, self.typ.name)
def wochentag(self):
weekday = helpers.weekdays[self.time.isoweekday()]
return weekday
def time_stamp(self):
return int(time.mktime(self.time.timetuple()) * 1000)
def freie_plaetze(self):
return self.slots - self.besetzte_plaetze()
def end_time(self):
return self.time + datetime.timedelta(hours=self.typ.duration)
def besetzte_plaetze(self):
return self.boehnli_set.count()
def needs_car(self):
return self.typ.car_needed
def status_class(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
if participants >= self.slots:
return 'full'
to_be_filled = self.slots - participants
time_left = self.time - date.today()
days_left = time_left.days
if days_left <= to_be_filled:
return 'urgent'
return ''
def get_car_status(self):
text = self.get_car_status_text()
needed = self.typ.car_needed
if needed:
available = Boehnli.objects.filter(job_id=self.id,with_car=True)
if available.count():
return '<img src="/static/img/auto_green.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
else:
return '<img src="/static/img/auto_red.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
else:
return '<img src="/static/img/auto_grey.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
def get_car_status_text(self):
needed = self.typ.car_needed
if needed:
available = Boehnli.objects.filter(job_id=self.id,with_car=True)
if available.count():
return 'Ein Auto ist bereits verfügbar'
else:
return 'Ein Auto wird noch benötigt'
else:
return 'Kein Auto benötigt'
def get_status_bohne_bar(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
status = self.get_status_bohne_text()
result = ''
for i in range(self.slots):
if participants > i:
result += '<img title="{status}" src="/static/img/erbse_voll.png"/>'.format(status=status)
else:
result += '<img title="{status}" src="/static/img/erbse_leer.png"/>'.format(status=status)
return result
def get_status_bohne_text(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
return "%d von %d gebucht" % (participants, self.slots)
def get_status_bohne(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
pctfull = participants * 100 / self.slots
if pctfull >= 100:
return "erbse_voll.png"
elif pctfull >= 75:
return "erbse_fast_voll.png"
elif pctfull >= 50:
return "erbse_halb.png"
else:
return "erbse_fast_leer.png"
class Meta:
verbose_name = 'Job'
verbose_name_plural = 'Jobs'
class JobComment(models.Model):
job = models.ForeignKey(Job, related_name="comments")
loco = models.ForeignKey(Loco, related_name="comments")
time = models.DateTimeField("Erstellungs-Zeitpunkt", auto_now_add=True)
text = models.TextField("Kommentar")
class Meta:
verbose_name = 'Job-Kommentar'
verbose_name_plural = 'Job-Kommentare'
class Boehnli(models.Model):
"""
Single boehnli (work unit).
"""
job = models.ForeignKey(Job, on_delete=models.CASCADE)
loco = models.ForeignKey(Loco, on_delete=models.PROTECT)
with_car = models.BooleanField("Auto verfügbar", default=False)
def __unicode__(self):
return u'Einsatz #%s' % self.id
def zeit(self):
return self.job.time
class Meta:
verbose_name = 'Einsatz'
verbose_name_plural = 'Einsätze'
#model_audit.m2m(Abo.users)
model_audit.m2m(Abo.extra_abos)
model_audit.fk(Abo.depot)
model_audit.fk(Anteilschein.loco)
signals.post_save.connect(Loco.create, sender=Loco)
signals.post_delete.connect(Loco.post_delete, sender=Loco) | random_line_split |
|
models.py | # encoding: utf-8
import datetime
from django.db import models
from django.contrib.auth.models import User
from django.db.models import signals
from django.core import validators
from django.core.exceptions import ValidationError
import time
from django.db.models import Q
from datetime import date
import model_audit
import helpers
from collections import namedtuple
from tinymce import models as tinymce_models
from ortoloco import settings
class Depot(models.Model):
"""
Location where stuff is picked up.
"""
code = models.CharField("Code", max_length=100, validators=[validators.validate_slug], unique=True)
name = models.CharField("Depot Name", max_length=100, unique=True)
contact = models.ForeignKey("Loco", on_delete=models.PROTECT)
weekday = models.PositiveIntegerField("Wochentag", choices=helpers.weekday_choices)
latitude = models.CharField("Latitude", max_length=100, default="")
longitude = models.CharField("Longitude", max_length=100, default="")
addr_street = models.CharField("Strasse", max_length=100)
addr_zipcode = models.CharField("PLZ", max_length=10)
addr_location = models.CharField("Ort", max_length=50)
def __unicode__(self):
return u"%s %s" % (self.id, self.name)
def active_abos(self):
return self.abo_set.filter(active=True)
def wochentag(self):
day = "Unbekannt"
if self.weekday < 8 and self.weekday > 0:
day = helpers.weekdays[self.weekday]
return day
def get_abo_by_size(self, abo_size):
return len(self.active_abos().filter(groesse=abo_size))
def get_abos_by_sizes(self):
result = {}
for abo_size in Abo.abo_types:
if abo_size is not Abo.SIZE_NONE:
result[abo_size] = len(self.active_abos().filter(groesse=abo_size))
print 'get_abos_by_size', self, result
return result
"""
def small_abos(self):
return len(self.active_abos().filter(Q(groesse=1) | Q(groesse=3)))
def big_abos(self):
return len(self.active_abos().filter(Q(groesse=2) | Q(groesse=3) | Q(groesse=4))) + len(self.active_abos().filter(groesse=4))
"""
class Meta:
verbose_name = "Depot"
verbose_name_plural = "Depots"
class ExtraAboType(models.Model):
"""
Types of extra abos, e.g. eggs, cheese, fruit
"""
name = models.CharField("Name", max_length=100, unique=True)
description = models.TextField("Beschreibung", max_length=1000)
def __unicode__(self):
return u"%s %s" % (self.id, self.name)
class Meta:
verbose_name = "Zusatz-Abo"
verbose_name_plural = "Zusatz-Abos"
class Abo(models.Model):
"""
One Abo that may be shared among several people.
"""
SIZE_NONE = 0
SIZE_HALF = 1
SIZE_SMALL = 2
SIZE_BIG = 4
SIZE_HOUSE = 10
# required_bohnen are per abo, not per person
AboTyp = namedtuple('AboTyp', ['size', 'name_short', 'name_long', 'description', 'min_anteilsscheine', 'visible', 'required_bohnen', 'cost']);
abo_types = {
SIZE_NONE: AboTyp( size=SIZE_NONE, name_short='Keins', name_long='Kein Abo',
min_anteilsscheine=1, visible=True, required_bohnen = 0,
cost = 0,
description=u"Du kannst auch ohne Gemüseabo "+settings.SITE_NAME+"-GenossenschafterIn sein. Bleibe auf dem Laufenden und mach mit, wenn du Lust hast"),
SIZE_HALF: AboTyp( size=SIZE_HALF, name_short='Halb', name_long='Halbes Abo',
min_anteilsscheine=1, visible=False, required_bohnen = 10,
cost = 550,
description=u"Halbe Abos können in Ausnahmefällen vergeben werden"),
SIZE_SMALL: AboTyp( size=SIZE_SMALL, name_short='Klein', name_long='Kleines Abo',
min_anteilsscheine=2, visible=True, required_bohnen = 20,
cost = 1100,
description=u"Das kleine Abo ist für 2-3 Personen geeignet und benötigt mindestens zwei Anteilscheine"),
SIZE_BIG: AboTyp( size=SIZE_BIG, name_short='Gross', name_long='Grosses Abo',
min_anteilsscheine=4, visible=True, required_bohnen = 40,
cost = 2200,
description=u"Das grosse Abo empfiehlt sich für WG's oder Familien (ca. 4-6 Personen) und benötigt vier Anteilscheine")
}
SIZE_CHOICES = ((k, v.name_short) for k, v in abo_types.iteritems())
number = models.CharField("Abo-Nummer", blank=True, max_length=6)
number.help_text = "Interne Abo-Nummer"
depot = models.ForeignKey(Depot, on_delete=models.PROTECT)
groesse = models.PositiveIntegerField(choices=SIZE_CHOICES,default=SIZE_SMALL)
extra_abos = models.ManyToManyField(ExtraAboType, null=True, blank=True)
extra_abos.help_text = "Zusatz-Abos existieren vorderhand nicht, dieses Feld bleibt leer."
primary_loco = models.ForeignKey("Loco", related_name="abo_primary", null=True, blank=True,
on_delete=models.PROTECT)
primary_loco.help_text = "Primärer Ansprechpartner dieses Abos"
paid = models.BooleanField(default=False, verbose_name="Bezahlt")
paid.help_text = "Dieses Abo wurde vollständig bezahlt."
active = models.BooleanField(default=False, verbose_name="Aktiv")
active.help_text = "An dieses Abo wird Gemüse geliefert"
def __unicode__(self):
if self.SIZE_HALF == self.groesse:
namelist = ["1/2 Einheit"]
elif self.SIZE_SMALL == self.groesse:
namelist = ["1 Einheit"]
else:
namelist = ["%i Einheiten" % int(self.groesse / float(self.SIZE_SMALL))]
namelist.extend(extra.name for extra in self.extra_abos.all())
return u"Abo (%s) #%s" % (" + ".join(namelist), self.number)
def bezieher(self):
locos = self.locos.all()
return ", ".join(unicode(loco) for loco in locos)
def andere_bezieher(self):
locos = self.bezieher_locos().exclude(email=self.primary_loco.email)
return ", ".join(unicode(loco) for loco in locos)
def bezieher_locos(self):
return self.locos.all()
def verantwortlicher_bezieher(self):
loco = self.primary_loco
return unicode(loco) if loco is not None else ""
def groesse_name(self):
return self.abo_types[self.groesse].name_long
def groesse_name_short(self):
return self.get_groesse_display()
class Meta:
verbose_name = "Abo"
verbose_name_plural = "Abos"
class Loco(models.Model):
"""
Additional fields for Django's default user class.
"""
# user class is only used for logins, permissions, and other builtin django stuff
# all user information should be stored in the Loco model
user = models.OneToOneField(User, related_name='loco', null=True, blank=True)
first_name = models.CharField("Vorname", max_length=30)
last_name = models.CharField("Nachname", max_length=30)
email = models.EmailField(unique=True)
SEX = [
("M", "Herr"),
("F", "Frau")
]
sex = models.CharField("Geschlecht", max_length=1, choices=SEX, default='F')
addr_street = models.CharField("Strasse", max_length=100)
addr_zipcode = models.CharField("PLZ", max_length=10)
addr_location = models.CharField("Ort", max_length=50)
birthday = models.DateField("Geburtsdatum", null=True, blank=True)
phone = models.CharField("Telefonnr", max_length=50)
mobile_phone = models.CharField("Mobile", max_length=50, null=True, blank=True)
abo = models.ForeignKey(Abo, related_name="locos", null=True, blank=True,
on_delete=models.SET_NULL)
abo.help_text = "Um dieses Mitglied einem Abo zuzuweisen oder die Abozuordnung zu ändern, bitte wie die Abo-Seite gehen."
confirmed = models.BooleanField("bestätigt", default=True)
confirmed.help_text = "Neu-Anmeldungen über die Webseite sind zuerst nicht bestätigt. Dieses Feld muss danach manuell gesetzt werden."
def get_salutation(self):
if self.sex is 'M':
return 'Herr'
else:
return 'Frau'
def get_full_salutation(self):
if self.sex is 'M':
return 'Lieber '+self.get_name()
else:
return 'Liebe '+self.get_name()
def get_taetigkeitsbereiche(self):
tbs = []
for tb in Taetigkeitsbereich.objects.all():
if tb.locos.all().filter(id=self.id).__len__() > 0:
tbs.append(tb)
return tbs
def __unicode__(self):
return self.get_name()
@classmethod
def create(cls, sender, instance, created, | ):
"""
Callback to create corresponding loco when new user is created.
"""
if created:
username = helpers.make_username(instance.first_name, instance.last_name, instance.email)
user = User(username=username)
user.save()
user = User.objects.get(username=username)
instance.user = user
instance.save()
@classmethod
def post_delete(cls, sender, instance, **kwds):
instance.user.delete()
class Meta:
verbose_name = "Mitglied"
verbose_name_plural = "Mitglieder"
def get_name(self):
return u"%s %s" % (self.first_name, self.last_name)
def get_phone(self):
if self.mobile_phone != "":
return self.mobile_phone
return self.phone
class Anteilschein(models.Model):
number = models.CharField("Anteilsschein-Nummer", blank=True, max_length=6)
number.help_text = "Interne Anteilsschein-Nummer"
loco = models.ForeignKey(Loco, null=True, blank=True, on_delete=models.SET_NULL)
loco.help_text = "Eigner des Anteilsscheins"
paid = models.BooleanField(default=False, verbose_name="Bezahlt")
paid.help_text = "Dieser Anteilsschein wurde vollständig bezahlt."
canceled = models.BooleanField(default=False, verbose_name="Gekündigt")
def __unicode__(self):
return u"Anteilschein #%s" % (self.number)
class Meta:
verbose_name = "Anteilschein"
verbose_name_plural = "Anteilscheine"
class Taetigkeitsbereich(models.Model):
name = models.CharField("Name", max_length=100, unique=True)
description = models.TextField("Beschreibung", max_length=1000, default="")
core = models.BooleanField("Kernbereich", default=False)
hidden = models.BooleanField("versteckt", default=False)
coordinator = models.ForeignKey(Loco, on_delete=models.PROTECT)
locos = models.ManyToManyField(Loco, related_name="areas", blank=True, null=True)
def __unicode__(self):
return u'%s' % self.name
class Meta:
verbose_name = 'Tätigkeitsbereich'
verbose_name_plural = 'Tätigkeitsbereiche'
class JobTyp(models.Model):
"""
Recurring type of job.
"""
name = models.CharField("Name", max_length=100, unique=True)
displayed_name = models.CharField("Angezeigter Name", max_length=100, blank=True, null=True)
description = tinymce_models.HTMLField("Beschreibung", max_length=1000, default="")
bereich = models.ForeignKey(Taetigkeitsbereich, on_delete=models.PROTECT)
duration = models.PositiveIntegerField("Dauer in Stunden")
duration.help_text = "Diese Dauer wird bei allen Einsätzen dieses Typs verwendet werden."
location = models.CharField("Ort", max_length=100, default="")
location.help_text = "Dieser Ort wird bei allen Einsätzen dieses Typs angezeigt werden."
car_needed = models.BooleanField("Auto benötigt", default=False)
def __unicode__(self):
return u'%s - %s (%s h)' % (self.bereich, self.name, str(self.duration))
def get_name(self):
if self.displayed_name is not None:
return self.displayed_name
return self.name
class Meta:
verbose_name = 'Jobart'
verbose_name_plural = 'Jobarten'
class Job(models.Model):
typ = models.ForeignKey(JobTyp, on_delete=models.PROTECT)
typ.help_text = "Bei einmaligen Einsätzen bitte einen neuen Typ erstellen (auf das Plus klicken) und dort im Titel das Datum angeben"
# A job can now count for more (or less) than 1 boehnli / ruebli
multiplier = models.PositiveIntegerField("Anzahl Rüebli", default=1)
multiplier.help_text = "Anzahl Rüebli, die jede Person für ihre Teilnahme an diesem Einsatz erhält."
# Slots are in number of people, indipendent of multiplier
slots = models.PositiveIntegerField("Plaetze")
slots.help_text = "Anzahl Personen, die sich für diesen Einsatz eintragen können."
time = models.DateTimeField("Anfangszeit")
time.help_text = "Die Endzeit wird berechnet aus Anfangszeit plus Dauer (übernommen vom Typ)"
reminder_sent = models.BooleanField("Reminder verschickt", default=False)
reminder_sent.help_text = "Wenn gesetzt, wurde die automatische Erinnerung 24h vor dem Einsatz bereits versandt. Sollte nur in Ausnahmefällen von Hand geändert werden müssen."
def __unicode__(self):
return u'Job #%s (%s)' % (self.id, self.typ.name)
def wochentag(self):
weekday = helpers.weekdays[self.time.isoweekday()]
return weekday
def time_stamp(self):
return int(time.mktime(self.time.timetuple()) * 1000)
def freie_plaetze(self):
return self.slots - self.besetzte_plaetze()
def end_time(self):
return self.time + datetime.timedelta(hours=self.typ.duration)
def besetzte_plaetze(self):
return self.boehnli_set.count()
def needs_car(self):
return self.typ.car_needed
def status_class(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
if participants >= self.slots:
return 'full'
to_be_filled = self.slots - participants
time_left = self.time - date.today()
days_left = time_left.days
if days_left <= to_be_filled:
return 'urgent'
return ''
def get_car_status(self):
text = self.get_car_status_text()
needed = self.typ.car_needed
if needed:
available = Boehnli.objects.filter(job_id=self.id,with_car=True)
if available.count():
return '<img src="/static/img/auto_green.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
else:
return '<img src="/static/img/auto_red.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
else:
return '<img src="/static/img/auto_grey.png" style="width:32px; height:22px;" width="32" height="22" title="%s" />' % text
def get_car_status_text(self):
needed = self.typ.car_needed
if needed:
available = Boehnli.objects.filter(job_id=self.id,with_car=True)
if available.count():
return 'Ein Auto ist bereits verfügbar'
else:
return 'Ein Auto wird noch benötigt'
else:
return 'Kein Auto benötigt'
def get_status_bohne_bar(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
status = self.get_status_bohne_text()
result = ''
for i in range(self.slots):
if participants > i:
result += '<img title="{status}" src="/static/img/erbse_voll.png"/>'.format(status=status)
else:
result += '<img title="{status}" src="/static/img/erbse_leer.png"/>'.format(status=status)
return result
def get_status_bohne_text(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
return "%d von %d gebucht" % (participants, self.slots)
def get_status_bohne(self):
boehnlis = Boehnli.objects.filter(job_id=self.id)
participants = boehnlis.count()
pctfull = participants * 100 / self.slots
if pctfull >= 100:
return "erbse_voll.png"
elif pctfull >= 75:
return "erbse_fast_voll.png"
elif pctfull >= 50:
return "erbse_halb.png"
else:
return "erbse_fast_leer.png"
class Meta:
verbose_name = 'Job'
verbose_name_plural = 'Jobs'
class JobComment(models.Model):
job = models.ForeignKey(Job, related_name="comments")
loco = models.ForeignKey(Loco, related_name="comments")
time = models.DateTimeField("Erstellungs-Zeitpunkt", auto_now_add=True)
text = models.TextField("Kommentar")
class Meta:
verbose_name = 'Job-Kommentar'
verbose_name_plural = 'Job-Kommentare'
class Boehnli(models.Model):
"""
Single boehnli (work unit).
"""
job = models.ForeignKey(Job, on_delete=models.CASCADE)
loco = models.ForeignKey(Loco, on_delete=models.PROTECT)
with_car = models.BooleanField("Auto verfügbar", default=False)
def __unicode__(self):
return u'Einsatz #%s' % self.id
def zeit(self):
return self.job.time
class Meta:
verbose_name = 'Einsatz'
verbose_name_plural = 'Einsätze'
#model_audit.m2m(Abo.users)
model_audit.m2m(Abo.extra_abos)
model_audit.fk(Abo.depot)
model_audit.fk(Anteilschein.loco)
signals.post_save.connect(Loco.create, sender=Loco)
signals.post_delete.connect(Loco.post_delete, sender=Loco)
| **kdws | identifier_name |
jwt.rs | //! # A Firestore Auth Session token is a Javascript Web Token (JWT). This module contains JWT helper functions.
use crate::credentials::Credentials;
use crate::errors::FirebaseError;
use biscuit::jwa::SignatureAlgorithm;
use biscuit::{ClaimPresenceOptions, SingleOrMultiple, StringOrUri, ValidationOptions};
use chrono::{Duration, Utc};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde_json::Value;
use std::collections::{HashMap, HashSet};
use std::ops::Add;
use std::ops::Deref;
use std::slice::Iter;
use std::str::FromStr;
type Error = super::errors::FirebaseError;
pub static JWT_AUDIENCE_FIRESTORE: &str =
"https://firestore.googleapis.com/google.firestore.v1.Firestore";
pub static JWT_AUDIENCE_IDENTITY: &str =
"https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit";
pub trait PrivateClaims
where
Self: Serialize + DeserializeOwned + Clone + Default,
{
fn get_scopes(&self) -> HashSet<String>;
fn get_client_id(&self) -> Option<String>;
fn get_uid(&self) -> Option<String>;
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct JwtOAuthPrivateClaims {
#[serde(skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub uid: Option<String>, // Probably the firebase User ID if set
}
impl JwtOAuthPrivateClaims {
pub fn new<S: AsRef<str>>(
scope: Option<Iter<S>>,
client_id: Option<String>,
user_id: Option<String>,
) -> Self {
JwtOAuthPrivateClaims {
scope: scope.and_then(|f| {
Some(f.fold(String::new(), |acc, x| {
let x: &str = x.as_ref();
return acc + x + " ";
}))
}),
client_id,
uid: user_id,
}
}
}
impl PrivateClaims for JwtOAuthPrivateClaims {
fn get_scopes(&self) -> HashSet<String> {
match self.scope {
Some(ref v) => v.split(" ").map(|f| f.to_owned()).collect(),
None => HashSet::new(),
}
}
fn get_client_id(&self) -> Option<String> {
self.client_id.clone()
}
fn get_uid(&self) -> Option<String> {
self.uid.clone()
}
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct JwtCustomClaims {
pub uid: String,
pub claims: HashMap<String, Value>,
}
impl JwtCustomClaims {
pub fn new<T: Serialize>(uid: &str, claims: T) -> Self {
let dev_claims = {
let val = serde_json::to_string(&claims).unwrap_or("".to_string());
serde_json::from_str::<HashMap<String, Value>>(&val).unwrap_or_default()
};
JwtCustomClaims {
claims: dev_claims,
uid: uid.to_string(),
}
}
}
impl PrivateClaims for JwtCustomClaims {
fn get_scopes(&self) -> HashSet<String> {
HashSet::new()
}
fn get_client_id(&self) -> Option<String> {
None
}
fn get_uid(&self) -> Option<String> {
Some(self.uid.clone())
}
}
pub(crate) type AuthClaimsJWT = biscuit::JWT<JwtOAuthPrivateClaims, biscuit::Empty>;
#[derive(Serialize, Deserialize, Default, Clone)]
pub struct JWSEntry {
#[serde(flatten)]
pub(crate) headers: biscuit::jws::RegisteredHeader,
#[serde(flatten)]
pub(crate) ne: biscuit::jwk::RSAKeyParameters,
}
#[derive(Serialize, Deserialize)]
pub struct JWKSetDTO {
pub keys: Vec<JWSEntry>,
}
/// Download the Google JWK Set for a given service account.
/// The resulting set of JWKs need to be added to a credentials object
/// for jwk verifications.
pub fn download_google_jwks(account_mail: &str) -> Result<JWKSetDTO, Error> {
let resp = reqwest::blocking::Client::new()
.get(&format!(
"https://www.googleapis.com/service_accounts/v1/jwk/{}",
account_mail
))
.send()?;
let jwk_set: JWKSetDTO = resp.json()?;
Ok(jwk_set)
}
/// Download the Google JWK Set for a given service account.
/// The resulting set of JWKs need to be added to a credentials object
/// for jwk verifications. | .get(&format!(
"https://www.googleapis.com/service_accounts/v1/jwk/{}",
account_mail
))
.send()
.await?;
let jwk_set: JWKSetDTO = resp.json().await?;
Ok(jwk_set)
}
/// Returns true if the access token (assumed to be a jwt) has expired
///
/// An error is returned if the given access token string is not a jwt
pub(crate) fn is_expired(
access_token: &str,
tolerance_in_minutes: i64,
) -> Result<bool, FirebaseError> {
let token = AuthClaimsJWT::new_encoded(&access_token);
let claims = token.unverified_payload()?;
if let Some(expiry) = claims.registered.expiry.as_ref() {
let diff: Duration = Utc::now().signed_duration_since(expiry.deref().clone());
return Ok(diff.num_minutes() - tolerance_in_minutes > 0);
}
Ok(true)
}
/// Returns true if the jwt was updated and needs signing
pub(crate) fn jwt_update_expiry_if(jwt: &mut AuthClaimsJWT, expire_in_minutes: i64) -> bool {
let ref mut claims = jwt.payload_mut().unwrap().registered;
let now = biscuit::Timestamp::from(Utc::now());
if let Some(issued_at) = claims.issued_at.as_ref() {
let diff: Duration = Utc::now().signed_duration_since(issued_at.deref().clone());
if diff.num_minutes() > expire_in_minutes {
claims.issued_at = Some(now);
} else {
return false;
}
} else {
claims.issued_at = Some(now);
}
true
}
pub(crate) fn create_jwt<S>(
credentials: &Credentials,
scope: Option<Iter<S>>,
duration: chrono::Duration,
client_id: Option<String>,
user_id: Option<String>,
audience: &str,
) -> Result<AuthClaimsJWT, Error>
where
S: AsRef<str>,
{
let claims = JwtOAuthPrivateClaims::new(scope, client_id, user_id);
create_jwt_with_claims(credentials, duration, audience, claims)
}
pub(crate) fn create_jwt_encoded<S: AsRef<str>>(
credentials: &Credentials,
scope: Option<Iter<S>>,
duration: chrono::Duration,
client_id: Option<String>,
user_id: Option<String>,
audience: &str,
) -> Result<String, Error> {
let jwt = create_jwt(credentials, scope, duration, client_id, user_id, audience)?;
let secret = credentials
.keys
.secret
.as_ref()
.ok_or(Error::Generic("No private key added via add_keypair_key!"))?;
Ok(jwt.encode(&secret.deref())?.encoded()?.encode())
}
fn create_jwt_with_claims<T>(
credentials: &Credentials,
duration: chrono::Duration,
audience: &str,
claims: T,
) -> Result<biscuit::JWT<T, biscuit::Empty>, Error>
where
T: Serialize + DeserializeOwned,
{
use biscuit::{
jws::{Header, RegisteredHeader},
ClaimsSet, Empty, RegisteredClaims,
};
let header: Header<Empty> = Header::from(RegisteredHeader {
algorithm: SignatureAlgorithm::RS256,
key_id: Some(credentials.private_key_id.to_owned()),
..Default::default()
});
let expected_claims = ClaimsSet::<T> {
registered: RegisteredClaims {
issuer: Some(FromStr::from_str(&credentials.client_email)?),
audience: Some(SingleOrMultiple::Single(StringOrUri::from_str(audience)?)),
expiry: Some(biscuit::Timestamp::from(Utc::now().add(duration))),
subject: Some(StringOrUri::from_str(&credentials.client_email)?),
issued_at: Some(biscuit::Timestamp::from(Utc::now())),
..Default::default()
},
private: claims,
};
Ok(biscuit::JWT::new_decoded(header, expected_claims))
}
pub fn create_custom_jwt_encoded<T: PrivateClaims>(
credentials: &Credentials,
claims: T,
) -> Result<String, Error> {
let jwt = create_jwt_with_claims(
&credentials,
Duration::hours(1),
JWT_AUDIENCE_IDENTITY,
claims,
)?;
let secret = credentials
.keys
.secret
.as_ref()
.ok_or(FirebaseError::Generic(
"No private key added via add_keypair_key!",
))?;
Ok(jwt.encode(&secret.deref())?.encoded()?.encode())
}
pub struct TokenValidationResult<T: PrivateClaims = JwtOAuthPrivateClaims> {
pub claims: T,
pub audience: String,
pub subject: String,
}
impl TokenValidationResult {
pub fn get_scopes(&self) -> HashSet<String> {
self.claims.get_scopes()
}
}
pub(crate) fn verify_access_token(
credentials: &Credentials,
access_token: &str,
) -> Result<TokenValidationResult, Error> {
verify_access_token_with_claims(credentials, access_token)
}
pub fn verify_access_token_with_claims<T: PrivateClaims>(
credentials: &Credentials,
access_token: &str,
) -> Result<TokenValidationResult<T>, Error> {
let token = biscuit::JWT::<T, biscuit::Empty>::new_encoded(&access_token);
let header = token.unverified_header()?;
let kid = header
.registered
.key_id
.as_ref()
.ok_or(FirebaseError::Generic("No jwt kid"))?;
let secret = credentials
.decode_secret(kid)
.ok_or(FirebaseError::Generic("No secret for kid"))?;
let token = token.into_decoded(&secret.deref(), SignatureAlgorithm::RS256)?;
use biscuit::Presence::*;
let o = ValidationOptions {
claim_presence_options: ClaimPresenceOptions {
issued_at: Required,
not_before: Optional,
expiry: Required,
issuer: Required,
audience: Required,
subject: Required,
id: Optional,
},
// audience: Validation::Validate(StringOrUri::from_str(JWT_SUBJECT)?),
..Default::default()
};
let claims = token.payload()?;
claims.registered.validate(o)?;
let audience = match claims.registered.audience.as_ref().unwrap() {
SingleOrMultiple::Single(v) => v.to_string(),
SingleOrMultiple::Multiple(v) => v.get(0).unwrap().to_string(),
};
Ok(TokenValidationResult {
claims: claims.private.clone(),
subject: claims.registered.subject.as_ref().unwrap().to_string(),
audience,
})
} | pub async fn download_google_jwks_async(account_mail: &str) -> Result<JWKSetDTO, Error> {
let resp = reqwest::Client::new() | random_line_split |
jwt.rs | //! # A Firestore Auth Session token is a Javascript Web Token (JWT). This module contains JWT helper functions.
use crate::credentials::Credentials;
use crate::errors::FirebaseError;
use biscuit::jwa::SignatureAlgorithm;
use biscuit::{ClaimPresenceOptions, SingleOrMultiple, StringOrUri, ValidationOptions};
use chrono::{Duration, Utc};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde_json::Value;
use std::collections::{HashMap, HashSet};
use std::ops::Add;
use std::ops::Deref;
use std::slice::Iter;
use std::str::FromStr;
type Error = super::errors::FirebaseError;
pub static JWT_AUDIENCE_FIRESTORE: &str =
"https://firestore.googleapis.com/google.firestore.v1.Firestore";
pub static JWT_AUDIENCE_IDENTITY: &str =
"https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit";
pub trait PrivateClaims
where
Self: Serialize + DeserializeOwned + Clone + Default,
{
fn get_scopes(&self) -> HashSet<String>;
fn get_client_id(&self) -> Option<String>;
fn get_uid(&self) -> Option<String>;
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct JwtOAuthPrivateClaims {
#[serde(skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub uid: Option<String>, // Probably the firebase User ID if set
}
impl JwtOAuthPrivateClaims {
pub fn new<S: AsRef<str>>(
scope: Option<Iter<S>>,
client_id: Option<String>,
user_id: Option<String>,
) -> Self {
JwtOAuthPrivateClaims {
scope: scope.and_then(|f| {
Some(f.fold(String::new(), |acc, x| {
let x: &str = x.as_ref();
return acc + x + " ";
}))
}),
client_id,
uid: user_id,
}
}
}
impl PrivateClaims for JwtOAuthPrivateClaims {
fn get_scopes(&self) -> HashSet<String> {
match self.scope {
Some(ref v) => v.split(" ").map(|f| f.to_owned()).collect(),
None => HashSet::new(),
}
}
fn get_client_id(&self) -> Option<String> {
self.client_id.clone()
}
fn get_uid(&self) -> Option<String> {
self.uid.clone()
}
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct JwtCustomClaims {
pub uid: String,
pub claims: HashMap<String, Value>,
}
impl JwtCustomClaims {
pub fn new<T: Serialize>(uid: &str, claims: T) -> Self {
let dev_claims = {
let val = serde_json::to_string(&claims).unwrap_or("".to_string());
serde_json::from_str::<HashMap<String, Value>>(&val).unwrap_or_default()
};
JwtCustomClaims {
claims: dev_claims,
uid: uid.to_string(),
}
}
}
impl PrivateClaims for JwtCustomClaims {
fn get_scopes(&self) -> HashSet<String> {
HashSet::new()
}
fn get_client_id(&self) -> Option<String> {
None
}
fn get_uid(&self) -> Option<String> {
Some(self.uid.clone())
}
}
pub(crate) type AuthClaimsJWT = biscuit::JWT<JwtOAuthPrivateClaims, biscuit::Empty>;
#[derive(Serialize, Deserialize, Default, Clone)]
pub struct | {
#[serde(flatten)]
pub(crate) headers: biscuit::jws::RegisteredHeader,
#[serde(flatten)]
pub(crate) ne: biscuit::jwk::RSAKeyParameters,
}
#[derive(Serialize, Deserialize)]
pub struct JWKSetDTO {
pub keys: Vec<JWSEntry>,
}
/// Download the Google JWK Set for a given service account.
/// The resulting set of JWKs need to be added to a credentials object
/// for jwk verifications.
pub fn download_google_jwks(account_mail: &str) -> Result<JWKSetDTO, Error> {
let resp = reqwest::blocking::Client::new()
.get(&format!(
"https://www.googleapis.com/service_accounts/v1/jwk/{}",
account_mail
))
.send()?;
let jwk_set: JWKSetDTO = resp.json()?;
Ok(jwk_set)
}
/// Download the Google JWK Set for a given service account.
/// The resulting set of JWKs need to be added to a credentials object
/// for jwk verifications.
pub async fn download_google_jwks_async(account_mail: &str) -> Result<JWKSetDTO, Error> {
let resp = reqwest::Client::new()
.get(&format!(
"https://www.googleapis.com/service_accounts/v1/jwk/{}",
account_mail
))
.send()
.await?;
let jwk_set: JWKSetDTO = resp.json().await?;
Ok(jwk_set)
}
/// Returns true if the access token (assumed to be a jwt) has expired
///
/// An error is returned if the given access token string is not a jwt
pub(crate) fn is_expired(
access_token: &str,
tolerance_in_minutes: i64,
) -> Result<bool, FirebaseError> {
let token = AuthClaimsJWT::new_encoded(&access_token);
let claims = token.unverified_payload()?;
if let Some(expiry) = claims.registered.expiry.as_ref() {
let diff: Duration = Utc::now().signed_duration_since(expiry.deref().clone());
return Ok(diff.num_minutes() - tolerance_in_minutes > 0);
}
Ok(true)
}
/// Returns true if the jwt was updated and needs signing
pub(crate) fn jwt_update_expiry_if(jwt: &mut AuthClaimsJWT, expire_in_minutes: i64) -> bool {
let ref mut claims = jwt.payload_mut().unwrap().registered;
let now = biscuit::Timestamp::from(Utc::now());
if let Some(issued_at) = claims.issued_at.as_ref() {
let diff: Duration = Utc::now().signed_duration_since(issued_at.deref().clone());
if diff.num_minutes() > expire_in_minutes {
claims.issued_at = Some(now);
} else {
return false;
}
} else {
claims.issued_at = Some(now);
}
true
}
pub(crate) fn create_jwt<S>(
credentials: &Credentials,
scope: Option<Iter<S>>,
duration: chrono::Duration,
client_id: Option<String>,
user_id: Option<String>,
audience: &str,
) -> Result<AuthClaimsJWT, Error>
where
S: AsRef<str>,
{
let claims = JwtOAuthPrivateClaims::new(scope, client_id, user_id);
create_jwt_with_claims(credentials, duration, audience, claims)
}
pub(crate) fn create_jwt_encoded<S: AsRef<str>>(
credentials: &Credentials,
scope: Option<Iter<S>>,
duration: chrono::Duration,
client_id: Option<String>,
user_id: Option<String>,
audience: &str,
) -> Result<String, Error> {
let jwt = create_jwt(credentials, scope, duration, client_id, user_id, audience)?;
let secret = credentials
.keys
.secret
.as_ref()
.ok_or(Error::Generic("No private key added via add_keypair_key!"))?;
Ok(jwt.encode(&secret.deref())?.encoded()?.encode())
}
fn create_jwt_with_claims<T>(
credentials: &Credentials,
duration: chrono::Duration,
audience: &str,
claims: T,
) -> Result<biscuit::JWT<T, biscuit::Empty>, Error>
where
T: Serialize + DeserializeOwned,
{
use biscuit::{
jws::{Header, RegisteredHeader},
ClaimsSet, Empty, RegisteredClaims,
};
let header: Header<Empty> = Header::from(RegisteredHeader {
algorithm: SignatureAlgorithm::RS256,
key_id: Some(credentials.private_key_id.to_owned()),
..Default::default()
});
let expected_claims = ClaimsSet::<T> {
registered: RegisteredClaims {
issuer: Some(FromStr::from_str(&credentials.client_email)?),
audience: Some(SingleOrMultiple::Single(StringOrUri::from_str(audience)?)),
expiry: Some(biscuit::Timestamp::from(Utc::now().add(duration))),
subject: Some(StringOrUri::from_str(&credentials.client_email)?),
issued_at: Some(biscuit::Timestamp::from(Utc::now())),
..Default::default()
},
private: claims,
};
Ok(biscuit::JWT::new_decoded(header, expected_claims))
}
pub fn create_custom_jwt_encoded<T: PrivateClaims>(
credentials: &Credentials,
claims: T,
) -> Result<String, Error> {
let jwt = create_jwt_with_claims(
&credentials,
Duration::hours(1),
JWT_AUDIENCE_IDENTITY,
claims,
)?;
let secret = credentials
.keys
.secret
.as_ref()
.ok_or(FirebaseError::Generic(
"No private key added via add_keypair_key!",
))?;
Ok(jwt.encode(&secret.deref())?.encoded()?.encode())
}
pub struct TokenValidationResult<T: PrivateClaims = JwtOAuthPrivateClaims> {
pub claims: T,
pub audience: String,
pub subject: String,
}
impl TokenValidationResult {
pub fn get_scopes(&self) -> HashSet<String> {
self.claims.get_scopes()
}
}
pub(crate) fn verify_access_token(
credentials: &Credentials,
access_token: &str,
) -> Result<TokenValidationResult, Error> {
verify_access_token_with_claims(credentials, access_token)
}
pub fn verify_access_token_with_claims<T: PrivateClaims>(
credentials: &Credentials,
access_token: &str,
) -> Result<TokenValidationResult<T>, Error> {
let token = biscuit::JWT::<T, biscuit::Empty>::new_encoded(&access_token);
let header = token.unverified_header()?;
let kid = header
.registered
.key_id
.as_ref()
.ok_or(FirebaseError::Generic("No jwt kid"))?;
let secret = credentials
.decode_secret(kid)
.ok_or(FirebaseError::Generic("No secret for kid"))?;
let token = token.into_decoded(&secret.deref(), SignatureAlgorithm::RS256)?;
use biscuit::Presence::*;
let o = ValidationOptions {
claim_presence_options: ClaimPresenceOptions {
issued_at: Required,
not_before: Optional,
expiry: Required,
issuer: Required,
audience: Required,
subject: Required,
id: Optional,
},
// audience: Validation::Validate(StringOrUri::from_str(JWT_SUBJECT)?),
..Default::default()
};
let claims = token.payload()?;
claims.registered.validate(o)?;
let audience = match claims.registered.audience.as_ref().unwrap() {
SingleOrMultiple::Single(v) => v.to_string(),
SingleOrMultiple::Multiple(v) => v.get(0).unwrap().to_string(),
};
Ok(TokenValidationResult {
claims: claims.private.clone(),
subject: claims.registered.subject.as_ref().unwrap().to_string(),
audience,
})
}
| JWSEntry | identifier_name |
jwt.rs | //! # A Firestore Auth Session token is a Javascript Web Token (JWT). This module contains JWT helper functions.
use crate::credentials::Credentials;
use crate::errors::FirebaseError;
use biscuit::jwa::SignatureAlgorithm;
use biscuit::{ClaimPresenceOptions, SingleOrMultiple, StringOrUri, ValidationOptions};
use chrono::{Duration, Utc};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde_json::Value;
use std::collections::{HashMap, HashSet};
use std::ops::Add;
use std::ops::Deref;
use std::slice::Iter;
use std::str::FromStr;
type Error = super::errors::FirebaseError;
pub static JWT_AUDIENCE_FIRESTORE: &str =
"https://firestore.googleapis.com/google.firestore.v1.Firestore";
pub static JWT_AUDIENCE_IDENTITY: &str =
"https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit";
pub trait PrivateClaims
where
Self: Serialize + DeserializeOwned + Clone + Default,
{
fn get_scopes(&self) -> HashSet<String>;
fn get_client_id(&self) -> Option<String>;
fn get_uid(&self) -> Option<String>;
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct JwtOAuthPrivateClaims {
#[serde(skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub uid: Option<String>, // Probably the firebase User ID if set
}
impl JwtOAuthPrivateClaims {
pub fn new<S: AsRef<str>>(
scope: Option<Iter<S>>,
client_id: Option<String>,
user_id: Option<String>,
) -> Self {
JwtOAuthPrivateClaims {
scope: scope.and_then(|f| {
Some(f.fold(String::new(), |acc, x| {
let x: &str = x.as_ref();
return acc + x + " ";
}))
}),
client_id,
uid: user_id,
}
}
}
impl PrivateClaims for JwtOAuthPrivateClaims {
fn get_scopes(&self) -> HashSet<String> {
match self.scope {
Some(ref v) => v.split(" ").map(|f| f.to_owned()).collect(),
None => HashSet::new(),
}
}
fn get_client_id(&self) -> Option<String> {
self.client_id.clone()
}
fn get_uid(&self) -> Option<String> {
self.uid.clone()
}
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct JwtCustomClaims {
pub uid: String,
pub claims: HashMap<String, Value>,
}
impl JwtCustomClaims {
pub fn new<T: Serialize>(uid: &str, claims: T) -> Self {
let dev_claims = {
let val = serde_json::to_string(&claims).unwrap_or("".to_string());
serde_json::from_str::<HashMap<String, Value>>(&val).unwrap_or_default()
};
JwtCustomClaims {
claims: dev_claims,
uid: uid.to_string(),
}
}
}
impl PrivateClaims for JwtCustomClaims {
fn get_scopes(&self) -> HashSet<String> {
HashSet::new()
}
fn get_client_id(&self) -> Option<String> {
None
}
fn get_uid(&self) -> Option<String> {
Some(self.uid.clone())
}
}
pub(crate) type AuthClaimsJWT = biscuit::JWT<JwtOAuthPrivateClaims, biscuit::Empty>;
#[derive(Serialize, Deserialize, Default, Clone)]
pub struct JWSEntry {
#[serde(flatten)]
pub(crate) headers: biscuit::jws::RegisteredHeader,
#[serde(flatten)]
pub(crate) ne: biscuit::jwk::RSAKeyParameters,
}
#[derive(Serialize, Deserialize)]
pub struct JWKSetDTO {
pub keys: Vec<JWSEntry>,
}
/// Download the Google JWK Set for a given service account.
/// The resulting set of JWKs need to be added to a credentials object
/// for jwk verifications.
pub fn download_google_jwks(account_mail: &str) -> Result<JWKSetDTO, Error> {
let resp = reqwest::blocking::Client::new()
.get(&format!(
"https://www.googleapis.com/service_accounts/v1/jwk/{}",
account_mail
))
.send()?;
let jwk_set: JWKSetDTO = resp.json()?;
Ok(jwk_set)
}
/// Download the Google JWK Set for a given service account.
/// The resulting set of JWKs need to be added to a credentials object
/// for jwk verifications.
pub async fn download_google_jwks_async(account_mail: &str) -> Result<JWKSetDTO, Error> {
let resp = reqwest::Client::new()
.get(&format!(
"https://www.googleapis.com/service_accounts/v1/jwk/{}",
account_mail
))
.send()
.await?;
let jwk_set: JWKSetDTO = resp.json().await?;
Ok(jwk_set)
}
/// Returns true if the access token (assumed to be a jwt) has expired
///
/// An error is returned if the given access token string is not a jwt
pub(crate) fn is_expired(
access_token: &str,
tolerance_in_minutes: i64,
) -> Result<bool, FirebaseError> {
let token = AuthClaimsJWT::new_encoded(&access_token);
let claims = token.unverified_payload()?;
if let Some(expiry) = claims.registered.expiry.as_ref() {
let diff: Duration = Utc::now().signed_duration_since(expiry.deref().clone());
return Ok(diff.num_minutes() - tolerance_in_minutes > 0);
}
Ok(true)
}
/// Returns true if the jwt was updated and needs signing
pub(crate) fn jwt_update_expiry_if(jwt: &mut AuthClaimsJWT, expire_in_minutes: i64) -> bool {
let ref mut claims = jwt.payload_mut().unwrap().registered;
let now = biscuit::Timestamp::from(Utc::now());
if let Some(issued_at) = claims.issued_at.as_ref() {
let diff: Duration = Utc::now().signed_duration_since(issued_at.deref().clone());
if diff.num_minutes() > expire_in_minutes {
claims.issued_at = Some(now);
} else {
return false;
}
} else {
claims.issued_at = Some(now);
}
true
}
pub(crate) fn create_jwt<S>(
credentials: &Credentials,
scope: Option<Iter<S>>,
duration: chrono::Duration,
client_id: Option<String>,
user_id: Option<String>,
audience: &str,
) -> Result<AuthClaimsJWT, Error>
where
S: AsRef<str>,
{
let claims = JwtOAuthPrivateClaims::new(scope, client_id, user_id);
create_jwt_with_claims(credentials, duration, audience, claims)
}
pub(crate) fn create_jwt_encoded<S: AsRef<str>>(
credentials: &Credentials,
scope: Option<Iter<S>>,
duration: chrono::Duration,
client_id: Option<String>,
user_id: Option<String>,
audience: &str,
) -> Result<String, Error> {
let jwt = create_jwt(credentials, scope, duration, client_id, user_id, audience)?;
let secret = credentials
.keys
.secret
.as_ref()
.ok_or(Error::Generic("No private key added via add_keypair_key!"))?;
Ok(jwt.encode(&secret.deref())?.encoded()?.encode())
}
fn create_jwt_with_claims<T>(
credentials: &Credentials,
duration: chrono::Duration,
audience: &str,
claims: T,
) -> Result<biscuit::JWT<T, biscuit::Empty>, Error>
where
T: Serialize + DeserializeOwned,
{
use biscuit::{
jws::{Header, RegisteredHeader},
ClaimsSet, Empty, RegisteredClaims,
};
let header: Header<Empty> = Header::from(RegisteredHeader {
algorithm: SignatureAlgorithm::RS256,
key_id: Some(credentials.private_key_id.to_owned()),
..Default::default()
});
let expected_claims = ClaimsSet::<T> {
registered: RegisteredClaims {
issuer: Some(FromStr::from_str(&credentials.client_email)?),
audience: Some(SingleOrMultiple::Single(StringOrUri::from_str(audience)?)),
expiry: Some(biscuit::Timestamp::from(Utc::now().add(duration))),
subject: Some(StringOrUri::from_str(&credentials.client_email)?),
issued_at: Some(biscuit::Timestamp::from(Utc::now())),
..Default::default()
},
private: claims,
};
Ok(biscuit::JWT::new_decoded(header, expected_claims))
}
pub fn create_custom_jwt_encoded<T: PrivateClaims>(
credentials: &Credentials,
claims: T,
) -> Result<String, Error> {
let jwt = create_jwt_with_claims(
&credentials,
Duration::hours(1),
JWT_AUDIENCE_IDENTITY,
claims,
)?;
let secret = credentials
.keys
.secret
.as_ref()
.ok_or(FirebaseError::Generic(
"No private key added via add_keypair_key!",
))?;
Ok(jwt.encode(&secret.deref())?.encoded()?.encode())
}
pub struct TokenValidationResult<T: PrivateClaims = JwtOAuthPrivateClaims> {
pub claims: T,
pub audience: String,
pub subject: String,
}
impl TokenValidationResult {
pub fn get_scopes(&self) -> HashSet<String> |
}
pub(crate) fn verify_access_token(
credentials: &Credentials,
access_token: &str,
) -> Result<TokenValidationResult, Error> {
verify_access_token_with_claims(credentials, access_token)
}
pub fn verify_access_token_with_claims<T: PrivateClaims>(
credentials: &Credentials,
access_token: &str,
) -> Result<TokenValidationResult<T>, Error> {
let token = biscuit::JWT::<T, biscuit::Empty>::new_encoded(&access_token);
let header = token.unverified_header()?;
let kid = header
.registered
.key_id
.as_ref()
.ok_or(FirebaseError::Generic("No jwt kid"))?;
let secret = credentials
.decode_secret(kid)
.ok_or(FirebaseError::Generic("No secret for kid"))?;
let token = token.into_decoded(&secret.deref(), SignatureAlgorithm::RS256)?;
use biscuit::Presence::*;
let o = ValidationOptions {
claim_presence_options: ClaimPresenceOptions {
issued_at: Required,
not_before: Optional,
expiry: Required,
issuer: Required,
audience: Required,
subject: Required,
id: Optional,
},
// audience: Validation::Validate(StringOrUri::from_str(JWT_SUBJECT)?),
..Default::default()
};
let claims = token.payload()?;
claims.registered.validate(o)?;
let audience = match claims.registered.audience.as_ref().unwrap() {
SingleOrMultiple::Single(v) => v.to_string(),
SingleOrMultiple::Multiple(v) => v.get(0).unwrap().to_string(),
};
Ok(TokenValidationResult {
claims: claims.private.clone(),
subject: claims.registered.subject.as_ref().unwrap().to_string(),
audience,
})
}
| {
self.claims.get_scopes()
} | identifier_body |
jwt.rs | //! # A Firestore Auth Session token is a Javascript Web Token (JWT). This module contains JWT helper functions.
use crate::credentials::Credentials;
use crate::errors::FirebaseError;
use biscuit::jwa::SignatureAlgorithm;
use biscuit::{ClaimPresenceOptions, SingleOrMultiple, StringOrUri, ValidationOptions};
use chrono::{Duration, Utc};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde_json::Value;
use std::collections::{HashMap, HashSet};
use std::ops::Add;
use std::ops::Deref;
use std::slice::Iter;
use std::str::FromStr;
type Error = super::errors::FirebaseError;
pub static JWT_AUDIENCE_FIRESTORE: &str =
"https://firestore.googleapis.com/google.firestore.v1.Firestore";
pub static JWT_AUDIENCE_IDENTITY: &str =
"https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit";
pub trait PrivateClaims
where
Self: Serialize + DeserializeOwned + Clone + Default,
{
fn get_scopes(&self) -> HashSet<String>;
fn get_client_id(&self) -> Option<String>;
fn get_uid(&self) -> Option<String>;
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct JwtOAuthPrivateClaims {
#[serde(skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub uid: Option<String>, // Probably the firebase User ID if set
}
impl JwtOAuthPrivateClaims {
pub fn new<S: AsRef<str>>(
scope: Option<Iter<S>>,
client_id: Option<String>,
user_id: Option<String>,
) -> Self {
JwtOAuthPrivateClaims {
scope: scope.and_then(|f| {
Some(f.fold(String::new(), |acc, x| {
let x: &str = x.as_ref();
return acc + x + " ";
}))
}),
client_id,
uid: user_id,
}
}
}
impl PrivateClaims for JwtOAuthPrivateClaims {
fn get_scopes(&self) -> HashSet<String> {
match self.scope {
Some(ref v) => v.split(" ").map(|f| f.to_owned()).collect(),
None => HashSet::new(),
}
}
fn get_client_id(&self) -> Option<String> {
self.client_id.clone()
}
fn get_uid(&self) -> Option<String> {
self.uid.clone()
}
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct JwtCustomClaims {
pub uid: String,
pub claims: HashMap<String, Value>,
}
impl JwtCustomClaims {
pub fn new<T: Serialize>(uid: &str, claims: T) -> Self {
let dev_claims = {
let val = serde_json::to_string(&claims).unwrap_or("".to_string());
serde_json::from_str::<HashMap<String, Value>>(&val).unwrap_or_default()
};
JwtCustomClaims {
claims: dev_claims,
uid: uid.to_string(),
}
}
}
impl PrivateClaims for JwtCustomClaims {
fn get_scopes(&self) -> HashSet<String> {
HashSet::new()
}
fn get_client_id(&self) -> Option<String> {
None
}
fn get_uid(&self) -> Option<String> {
Some(self.uid.clone())
}
}
pub(crate) type AuthClaimsJWT = biscuit::JWT<JwtOAuthPrivateClaims, biscuit::Empty>;
#[derive(Serialize, Deserialize, Default, Clone)]
pub struct JWSEntry {
#[serde(flatten)]
pub(crate) headers: biscuit::jws::RegisteredHeader,
#[serde(flatten)]
pub(crate) ne: biscuit::jwk::RSAKeyParameters,
}
#[derive(Serialize, Deserialize)]
pub struct JWKSetDTO {
pub keys: Vec<JWSEntry>,
}
/// Download the Google JWK Set for a given service account.
/// The resulting set of JWKs need to be added to a credentials object
/// for jwk verifications.
pub fn download_google_jwks(account_mail: &str) -> Result<JWKSetDTO, Error> {
let resp = reqwest::blocking::Client::new()
.get(&format!(
"https://www.googleapis.com/service_accounts/v1/jwk/{}",
account_mail
))
.send()?;
let jwk_set: JWKSetDTO = resp.json()?;
Ok(jwk_set)
}
/// Download the Google JWK Set for a given service account.
/// The resulting set of JWKs need to be added to a credentials object
/// for jwk verifications.
pub async fn download_google_jwks_async(account_mail: &str) -> Result<JWKSetDTO, Error> {
let resp = reqwest::Client::new()
.get(&format!(
"https://www.googleapis.com/service_accounts/v1/jwk/{}",
account_mail
))
.send()
.await?;
let jwk_set: JWKSetDTO = resp.json().await?;
Ok(jwk_set)
}
/// Returns true if the access token (assumed to be a jwt) has expired
///
/// An error is returned if the given access token string is not a jwt
pub(crate) fn is_expired(
access_token: &str,
tolerance_in_minutes: i64,
) -> Result<bool, FirebaseError> {
let token = AuthClaimsJWT::new_encoded(&access_token);
let claims = token.unverified_payload()?;
if let Some(expiry) = claims.registered.expiry.as_ref() |
Ok(true)
}
/// Returns true if the jwt was updated and needs signing
pub(crate) fn jwt_update_expiry_if(jwt: &mut AuthClaimsJWT, expire_in_minutes: i64) -> bool {
let ref mut claims = jwt.payload_mut().unwrap().registered;
let now = biscuit::Timestamp::from(Utc::now());
if let Some(issued_at) = claims.issued_at.as_ref() {
let diff: Duration = Utc::now().signed_duration_since(issued_at.deref().clone());
if diff.num_minutes() > expire_in_minutes {
claims.issued_at = Some(now);
} else {
return false;
}
} else {
claims.issued_at = Some(now);
}
true
}
pub(crate) fn create_jwt<S>(
credentials: &Credentials,
scope: Option<Iter<S>>,
duration: chrono::Duration,
client_id: Option<String>,
user_id: Option<String>,
audience: &str,
) -> Result<AuthClaimsJWT, Error>
where
S: AsRef<str>,
{
let claims = JwtOAuthPrivateClaims::new(scope, client_id, user_id);
create_jwt_with_claims(credentials, duration, audience, claims)
}
pub(crate) fn create_jwt_encoded<S: AsRef<str>>(
credentials: &Credentials,
scope: Option<Iter<S>>,
duration: chrono::Duration,
client_id: Option<String>,
user_id: Option<String>,
audience: &str,
) -> Result<String, Error> {
let jwt = create_jwt(credentials, scope, duration, client_id, user_id, audience)?;
let secret = credentials
.keys
.secret
.as_ref()
.ok_or(Error::Generic("No private key added via add_keypair_key!"))?;
Ok(jwt.encode(&secret.deref())?.encoded()?.encode())
}
fn create_jwt_with_claims<T>(
credentials: &Credentials,
duration: chrono::Duration,
audience: &str,
claims: T,
) -> Result<biscuit::JWT<T, biscuit::Empty>, Error>
where
T: Serialize + DeserializeOwned,
{
use biscuit::{
jws::{Header, RegisteredHeader},
ClaimsSet, Empty, RegisteredClaims,
};
let header: Header<Empty> = Header::from(RegisteredHeader {
algorithm: SignatureAlgorithm::RS256,
key_id: Some(credentials.private_key_id.to_owned()),
..Default::default()
});
let expected_claims = ClaimsSet::<T> {
registered: RegisteredClaims {
issuer: Some(FromStr::from_str(&credentials.client_email)?),
audience: Some(SingleOrMultiple::Single(StringOrUri::from_str(audience)?)),
expiry: Some(biscuit::Timestamp::from(Utc::now().add(duration))),
subject: Some(StringOrUri::from_str(&credentials.client_email)?),
issued_at: Some(biscuit::Timestamp::from(Utc::now())),
..Default::default()
},
private: claims,
};
Ok(biscuit::JWT::new_decoded(header, expected_claims))
}
pub fn create_custom_jwt_encoded<T: PrivateClaims>(
credentials: &Credentials,
claims: T,
) -> Result<String, Error> {
let jwt = create_jwt_with_claims(
&credentials,
Duration::hours(1),
JWT_AUDIENCE_IDENTITY,
claims,
)?;
let secret = credentials
.keys
.secret
.as_ref()
.ok_or(FirebaseError::Generic(
"No private key added via add_keypair_key!",
))?;
Ok(jwt.encode(&secret.deref())?.encoded()?.encode())
}
pub struct TokenValidationResult<T: PrivateClaims = JwtOAuthPrivateClaims> {
pub claims: T,
pub audience: String,
pub subject: String,
}
impl TokenValidationResult {
pub fn get_scopes(&self) -> HashSet<String> {
self.claims.get_scopes()
}
}
pub(crate) fn verify_access_token(
credentials: &Credentials,
access_token: &str,
) -> Result<TokenValidationResult, Error> {
verify_access_token_with_claims(credentials, access_token)
}
pub fn verify_access_token_with_claims<T: PrivateClaims>(
credentials: &Credentials,
access_token: &str,
) -> Result<TokenValidationResult<T>, Error> {
let token = biscuit::JWT::<T, biscuit::Empty>::new_encoded(&access_token);
let header = token.unverified_header()?;
let kid = header
.registered
.key_id
.as_ref()
.ok_or(FirebaseError::Generic("No jwt kid"))?;
let secret = credentials
.decode_secret(kid)
.ok_or(FirebaseError::Generic("No secret for kid"))?;
let token = token.into_decoded(&secret.deref(), SignatureAlgorithm::RS256)?;
use biscuit::Presence::*;
let o = ValidationOptions {
claim_presence_options: ClaimPresenceOptions {
issued_at: Required,
not_before: Optional,
expiry: Required,
issuer: Required,
audience: Required,
subject: Required,
id: Optional,
},
// audience: Validation::Validate(StringOrUri::from_str(JWT_SUBJECT)?),
..Default::default()
};
let claims = token.payload()?;
claims.registered.validate(o)?;
let audience = match claims.registered.audience.as_ref().unwrap() {
SingleOrMultiple::Single(v) => v.to_string(),
SingleOrMultiple::Multiple(v) => v.get(0).unwrap().to_string(),
};
Ok(TokenValidationResult {
claims: claims.private.clone(),
subject: claims.registered.subject.as_ref().unwrap().to_string(),
audience,
})
}
| {
let diff: Duration = Utc::now().signed_duration_since(expiry.deref().clone());
return Ok(diff.num_minutes() - tolerance_in_minutes > 0);
} | conditional_block |
spy.rs | use crate::{backend::Backend, error::error};
use cloudevents::{
event::{Data, ExtensionValue},
AttributesReader, Event,
};
use drogue_cloud_service_api::{EXT_APPLICATION, EXT_DEVICE};
use itertools::Itertools;
use patternfly_yew::*;
use unicode_segmentation::UnicodeSegmentation;
use wasm_bindgen::{closure::Closure, JsValue};
use web_sys::{EventSource, EventSourceInit};
use yew::prelude::*;
pub struct Spy {
link: ComponentLink<Self>,
source: Option<EventSource>,
events: SharedTableModel<Entry>,
application: String,
running: bool,
total_received: usize,
}
pub enum Msg {
Start(Option<String>),
StartPressed,
Stop,
Event(Box<Event>),
/// Failed when processing an event
Error(String),
/// Source failed
Failed,
SetApplication(String),
}
const DEFAULT_MAX_SIZE: usize = 200;
#[derive(Clone, Debug, PartialEq)]
pub struct Entry(pub Event);
impl TableRenderer for Entry {
fn render(&self, col: ColumnIndex) -> Html {
match col.index {
// timestamp
0 => render_timestamp(&self.0),
// device id
1 => self.device().into(),
// payload
2 => render_data_short(&self.0),
// ignore
_ => html! {},
}
}
fn render_details(&self) -> Vec<Span> {
vec![Span::max(render_details(&self.0)).truncate()]
}
}
impl Entry {
fn device(&self) -> String {
let app_id = self.extension_as_string(EXT_APPLICATION);
let device_id = self.extension_as_string(EXT_DEVICE);
format!("{} / {}", app_id, device_id)
}
fn extension_as_string(&self, name: &str) -> String {
self.0
.extension(name)
.map(|s| match s {
ExtensionValue::String(s) => s.clone(),
ExtensionValue::Integer(i) => i.to_string(),
ExtensionValue::Boolean(true) => "true".into(),
ExtensionValue::Boolean(false) => "false".into(),
})
.unwrap_or_default()
}
}
impl Component for Spy {
type Message = Msg;
type Properties = ();
fn create(_props: Self::Properties, link: ComponentLink<Self>) -> Self {
Self {
events: Default::default(),
link,
source: None,
running: false,
total_received: 0,
application: String::new(),
}
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Msg::Start(app_id) => {
log::info!("Starting: {:?}", app_id);
self.start(app_id);
}
Msg::StartPressed => {
self.link.send_message(Msg::Start(self.app_id_filter()));
}
Msg::Stop => {
self.stop();
}
Msg::Event(event) => {
// log::debug!("Pushing event: {:?}", event);
self.total_received += 1;
self.events.insert(0, Entry(*event));
while self.events.len() > DEFAULT_MAX_SIZE {
self.events.pop();
}
}
Msg::Error(err) => {
error("Failed to process event", err);
}
Msg::Failed => {
error("Source error", "Failed to connect to the event source");
self.running = false;
}
Msg::SetApplication(application) => {
self.application = application;
}
}
true
}
fn change(&mut self, _props: Self::Properties) -> ShouldRender {
false
}
fn view(&self) -> Html {
let is_valid = self.app_id_filter().is_some();
let is_running = self.running;
let v = |value: &str| match value {
"" => InputState::Error,
_ => InputState::Default,
};
return html! {
<>
<PageSection variant=PageSectionVariant::Light limit_width=true>
<Content>
<Title>{"Device Message Spy"}</Title>
</Content>
</PageSection>
<PageSection>
<Toolbar>
<ToolbarGroup>
<ToolbarItem>
<TextInput
disabled=self.running
onchange=self.link.callback(|app|Msg::SetApplication(app))
validator=Validator::from(v)
placeholder="Application ID to spy on"/>
</ToolbarItem>
<ToolbarItem>
{if is_running {
html!{<Button
disabled=!is_valid
label="Stop"
icon=Icon::Pause
variant=Variant::Secondary
onclick=self.link.callback(|_|Msg::Stop)
/>}
} else {
html!{<Button
disabled=!is_valid
label="Start"
icon=Icon::Play
variant=Variant::Primary
onclick=self.link.callback(|_|Msg::StartPressed)
/>}
}}
</ToolbarItem>
</ToolbarGroup>
<ToolbarItem modifiers=vec![ToolbarElementModifier::Right.all()]>
{ if self.running { html!{
<strong>{"events received: "}{self.total_received}</strong>
} } else { html!{} } }
</ToolbarItem>
</Toolbar>
<Table<SharedTableModel<Entry>>
entries=self.events.clone()
mode=TableMode::CompactExpandable
header={html_nested!{
<TableHeader>
<TableColumn label="Timestamp (UTC)"/>
<TableColumn label="Device ID"/>
<TableColumn label="Payload"/>
</TableHeader>
}}
>
</Table<SharedTableModel<Entry>>>
{ if self.events.is_empty() {
self.render_empty()
} else {
html!{}
}}
</PageSection>
</>
};
}
fn destroy(&mut self) {
if let Some(source) = self.source.take() {
source.close();
}
}
}
impl Spy {
fn app_id_filter(&self) -> Option<String> {
let value = self.application.clone();
match value.is_empty() {
true => None,
false => Some(value),
}
}
fn start(&mut self, app_id: Option<String>) {
let mut url = Backend::url("/api/console/v1alpha1/spy").unwrap();
// add optional filter
if let Some(app_id) = &app_id {
url.query_pairs_mut().append_pair("app", app_id);
}
// EventSource doesn't support passing headers, so we cannot send
// the bearer token the normal way
url.query_pairs_mut()
.append_pair("token", &Backend::access_token().unwrap_or_default());
// create source
let source =
EventSource::new_with_event_source_init_dict(&url.to_string(), &EventSourceInit::new())
.unwrap();
// setup onmessage
let link = self.link.clone();
let on_message = Closure::wrap(Box::new(move |msg: &JsValue| {
let msg = extract_event(msg);
link.send_message(msg);
}) as Box<dyn FnMut(&JsValue)>);
source.set_onmessage(Some(&on_message.into_js_value().into()));
| let on_error = Closure::wrap(Box::new(move || {
link.send_message(Msg::Failed);
}) as Box<dyn FnMut()>);
source.set_onerror(Some(&on_error.into_js_value().into()));
// store result
self.running = true;
self.source = Some(source);
}
fn stop(&mut self) {
if let Some(source) = self.source.take() {
source.close();
}
self.running = false
}
fn render_empty(&self) -> Html {
return html! {
<div style="padding-bottom: 10rem; height: 100%;">
<Bullseye>
<EmptyState
title="No new messages"
icon=Icon::Pending
size=Size::XLarge
>
{ "The " } <q> {"message spy"} </q> { " will only show "} <strong> {"new"} </strong> {" messages received by the system.
When the next message arrives, you will see it right here." }
</EmptyState>
</Bullseye>
</div>
};
}
}
fn extract_event(msg: &JsValue) -> Msg {
// web_sys::console::debug_2(&JsValue::from("event: "), msg);
let data: String = js_sys::Reflect::get(msg, &JsValue::from("data"))
.unwrap()
.as_string()
.unwrap();
match serde_json::from_str(&data) {
Ok(event) => Msg::Event(event),
Err(e) => Msg::Error(e.to_string()),
}
}
fn render_data(event: &Event) -> Html {
// let data: Option<Data> = event.get_data();
match event.data() {
None => html! {},
Some(Data::String(text)) => html! { <pre> {text} </pre> },
Some(Data::Binary(blob)) => html! { <>
<pre> { pretty_hex::pretty_hex(&blob) } </pre>
<pre> { base64_block(&blob) } </pre>
</> },
Some(Data::Json(value)) => {
let value = serde_json::to_string_pretty(&value).unwrap();
return html! { <pre> {value} </pre> };
}
}
}
fn base64_block(input: &[u8]) -> String {
base64::encode(input)
.chars()
.collect::<Vec<_>>()
.chunks(120)
.map(|chunk| chunk.iter().collect::<String>())
.join("\n")
}
fn render_blob(blob: &[u8]) -> String {
let max = blob.len().min(25);
let ellipsis = if blob.len() > max { ", …" } else { "" };
format!("[{}; {:02x?}{}]", blob.len(), &blob[0..max], ellipsis)
}
fn truncate_str(len: usize, string: &str) -> String {
let mut r = String::new();
for c in string.graphemes(true) {
if r.len() > len || r.contains('\n') || r.contains('\r') {
r.push('…');
break;
}
r.push_str(c);
}
r
}
fn render_data_short(event: &Event) -> Html {
match event.data() {
None => html! {},
Some(Data::String(text)) => html! {
<pre>
<Label label="String" color=Color::Purple/>{" "}{truncate_str(100, text)}
</pre>
},
Some(Data::Binary(blob)) => html! {
<pre>
<Label label="BLOB" color=Color::Blue/>{" "}{render_blob(&blob)}
</pre>
},
Some(Data::Json(value)) => html! {
<pre>
<Label label="JSON" color=Color::Cyan/>{" "}{truncate_str(100, &value.to_string())}
</pre>
},
}
}
fn render_timestamp(event: &Event) -> Html {
event
.time()
.map(|ts| {
return html! {
<span>
<pre>{ts.format("%H:%M:%S%.3f %Y-%m-%d")}</pre>
</span>
};
})
.unwrap_or_default()
}
#[derive(Clone, Debug, PartialEq)]
struct AttributeEntry(pub String, pub Html);
impl TableRenderer for AttributeEntry {
fn render(&self, index: ColumnIndex) -> Html {
match index.index {
0 => html! {&self.0},
1 => self.1.clone(),
_ => html! {},
}
}
}
fn render_details(event: &Event) -> Html {
let mut attrs: Vec<AttributeEntry> = event
.iter()
.map(|(key, value)| {
(
key.to_string(),
html! {
<pre class="pf-c-table__text">{ value.to_string() }</pre>
},
)
})
.map(|(key, value)| AttributeEntry(key, value))
.collect();
attrs.sort_by(|a, b| a.0.cmp(&b.0));
return html! {
<>
<h3>{"Attributes"}</h3>
<Table<SimpleTableModel<AttributeEntry>>
entries=SimpleTableModel::from(attrs)
mode=TableMode::CompactNoBorders
header=html_nested!{
<TableHeader>
<TableColumn label="Key"/>
<TableColumn label="Value"/>
</TableHeader>
}
>
</Table<SimpleTableModel<AttributeEntry>>>
<h3>{"Payload"}</h3>
{ render_data(event) }
</>
};
} | // setup onerror
let link = self.link.clone(); | random_line_split |
spy.rs | use crate::{backend::Backend, error::error};
use cloudevents::{
event::{Data, ExtensionValue},
AttributesReader, Event,
};
use drogue_cloud_service_api::{EXT_APPLICATION, EXT_DEVICE};
use itertools::Itertools;
use patternfly_yew::*;
use unicode_segmentation::UnicodeSegmentation;
use wasm_bindgen::{closure::Closure, JsValue};
use web_sys::{EventSource, EventSourceInit};
use yew::prelude::*;
pub struct Spy {
link: ComponentLink<Self>,
source: Option<EventSource>,
events: SharedTableModel<Entry>,
application: String,
running: bool,
total_received: usize,
}
pub enum Msg {
Start(Option<String>),
StartPressed,
Stop,
Event(Box<Event>),
/// Failed when processing an event
Error(String),
/// Source failed
Failed,
SetApplication(String),
}
const DEFAULT_MAX_SIZE: usize = 200;
#[derive(Clone, Debug, PartialEq)]
pub struct Entry(pub Event);
impl TableRenderer for Entry {
fn render(&self, col: ColumnIndex) -> Html {
match col.index {
// timestamp
0 => render_timestamp(&self.0),
// device id
1 => self.device().into(),
// payload
2 => render_data_short(&self.0),
// ignore
_ => html! {},
}
}
fn render_details(&self) -> Vec<Span> {
vec![Span::max(render_details(&self.0)).truncate()]
}
}
impl Entry {
fn device(&self) -> String {
let app_id = self.extension_as_string(EXT_APPLICATION);
let device_id = self.extension_as_string(EXT_DEVICE);
format!("{} / {}", app_id, device_id)
}
fn extension_as_string(&self, name: &str) -> String {
self.0
.extension(name)
.map(|s| match s {
ExtensionValue::String(s) => s.clone(),
ExtensionValue::Integer(i) => i.to_string(),
ExtensionValue::Boolean(true) => "true".into(),
ExtensionValue::Boolean(false) => "false".into(),
})
.unwrap_or_default()
}
}
impl Component for Spy {
type Message = Msg;
type Properties = ();
fn create(_props: Self::Properties, link: ComponentLink<Self>) -> Self {
Self {
events: Default::default(),
link,
source: None,
running: false,
total_received: 0,
application: String::new(),
}
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Msg::Start(app_id) => {
log::info!("Starting: {:?}", app_id);
self.start(app_id);
}
Msg::StartPressed => {
self.link.send_message(Msg::Start(self.app_id_filter()));
}
Msg::Stop => {
self.stop();
}
Msg::Event(event) => {
// log::debug!("Pushing event: {:?}", event);
self.total_received += 1;
self.events.insert(0, Entry(*event));
while self.events.len() > DEFAULT_MAX_SIZE {
self.events.pop();
}
}
Msg::Error(err) => {
error("Failed to process event", err);
}
Msg::Failed => {
error("Source error", "Failed to connect to the event source");
self.running = false;
}
Msg::SetApplication(application) => {
self.application = application;
}
}
true
}
fn change(&mut self, _props: Self::Properties) -> ShouldRender {
false
}
fn view(&self) -> Html {
let is_valid = self.app_id_filter().is_some();
let is_running = self.running;
let v = |value: &str| match value {
"" => InputState::Error,
_ => InputState::Default,
};
return html! {
<>
<PageSection variant=PageSectionVariant::Light limit_width=true>
<Content>
<Title>{"Device Message Spy"}</Title>
</Content>
</PageSection>
<PageSection>
<Toolbar>
<ToolbarGroup>
<ToolbarItem>
<TextInput
disabled=self.running
onchange=self.link.callback(|app|Msg::SetApplication(app))
validator=Validator::from(v)
placeholder="Application ID to spy on"/>
</ToolbarItem>
<ToolbarItem>
{if is_running {
html!{<Button
disabled=!is_valid
label="Stop"
icon=Icon::Pause
variant=Variant::Secondary
onclick=self.link.callback(|_|Msg::Stop)
/>}
} else {
html!{<Button
disabled=!is_valid
label="Start"
icon=Icon::Play
variant=Variant::Primary
onclick=self.link.callback(|_|Msg::StartPressed)
/>}
}}
</ToolbarItem>
</ToolbarGroup>
<ToolbarItem modifiers=vec![ToolbarElementModifier::Right.all()]>
{ if self.running { html!{
<strong>{"events received: "}{self.total_received}</strong>
} } else { html!{} } }
</ToolbarItem>
</Toolbar>
<Table<SharedTableModel<Entry>>
entries=self.events.clone()
mode=TableMode::CompactExpandable
header={html_nested!{
<TableHeader>
<TableColumn label="Timestamp (UTC)"/>
<TableColumn label="Device ID"/>
<TableColumn label="Payload"/>
</TableHeader>
}}
>
</Table<SharedTableModel<Entry>>>
{ if self.events.is_empty() {
self.render_empty()
} else {
html!{}
}}
</PageSection>
</>
};
}
fn destroy(&mut self) {
if let Some(source) = self.source.take() {
source.close();
}
}
}
impl Spy {
fn app_id_filter(&self) -> Option<String> {
let value = self.application.clone();
match value.is_empty() {
true => None,
false => Some(value),
}
}
fn start(&mut self, app_id: Option<String>) {
let mut url = Backend::url("/api/console/v1alpha1/spy").unwrap();
// add optional filter
if let Some(app_id) = &app_id {
url.query_pairs_mut().append_pair("app", app_id);
}
// EventSource doesn't support passing headers, so we cannot send
// the bearer token the normal way
url.query_pairs_mut()
.append_pair("token", &Backend::access_token().unwrap_or_default());
// create source
let source =
EventSource::new_with_event_source_init_dict(&url.to_string(), &EventSourceInit::new())
.unwrap();
// setup onmessage
let link = self.link.clone();
let on_message = Closure::wrap(Box::new(move |msg: &JsValue| {
let msg = extract_event(msg);
link.send_message(msg);
}) as Box<dyn FnMut(&JsValue)>);
source.set_onmessage(Some(&on_message.into_js_value().into()));
// setup onerror
let link = self.link.clone();
let on_error = Closure::wrap(Box::new(move || {
link.send_message(Msg::Failed);
}) as Box<dyn FnMut()>);
source.set_onerror(Some(&on_error.into_js_value().into()));
// store result
self.running = true;
self.source = Some(source);
}
fn stop(&mut self) {
if let Some(source) = self.source.take() {
source.close();
}
self.running = false
}
fn render_empty(&self) -> Html {
return html! {
<div style="padding-bottom: 10rem; height: 100%;">
<Bullseye>
<EmptyState
title="No new messages"
icon=Icon::Pending
size=Size::XLarge
>
{ "The " } <q> {"message spy"} </q> { " will only show "} <strong> {"new"} </strong> {" messages received by the system.
When the next message arrives, you will see it right here." }
</EmptyState>
</Bullseye>
</div>
};
}
}
fn extract_event(msg: &JsValue) -> Msg {
// web_sys::console::debug_2(&JsValue::from("event: "), msg);
let data: String = js_sys::Reflect::get(msg, &JsValue::from("data"))
.unwrap()
.as_string()
.unwrap();
match serde_json::from_str(&data) {
Ok(event) => Msg::Event(event),
Err(e) => Msg::Error(e.to_string()),
}
}
fn render_data(event: &Event) -> Html {
// let data: Option<Data> = event.get_data();
match event.data() {
None => html! {},
Some(Data::String(text)) => html! { <pre> {text} </pre> },
Some(Data::Binary(blob)) => html! { <>
<pre> { pretty_hex::pretty_hex(&blob) } </pre>
<pre> { base64_block(&blob) } </pre>
</> },
Some(Data::Json(value)) => {
let value = serde_json::to_string_pretty(&value).unwrap();
return html! { <pre> {value} </pre> };
}
}
}
fn base64_block(input: &[u8]) -> String {
base64::encode(input)
.chars()
.collect::<Vec<_>>()
.chunks(120)
.map(|chunk| chunk.iter().collect::<String>())
.join("\n")
}
fn render_blob(blob: &[u8]) -> String {
let max = blob.len().min(25);
let ellipsis = if blob.len() > max { ", …" } else { "" };
format!("[{}; {:02x?}{}]", blob.len(), &blob[0..max], ellipsis)
}
fn truncate_str(len: usize, string: &str) -> String {
let mut r = String::new();
for c in string.graphemes(true) {
if r.len() > len || r.contains('\n') || r.contains('\r') {
r.push('…');
break;
}
r.push_str(c);
}
r
}
fn render_data_short(event: &Event) -> Html {
match event.data() {
None => html! {},
Some(Data::String(text)) => html! {
<pre>
<Label label="String" color=Color::Purple/>{" "}{truncate_str(100, text)}
</pre>
},
Some(Data::Binary(blob)) => html! {
<pre>
<Label label="BLOB" color=Color::Blue/>{" "}{render_blob(&blob)}
</pre>
},
Some(Data::Json(value)) => html! {
<pre>
<Label label="JSON" color=Color::Cyan/>{" "}{truncate_str(100, &value.to_string())}
</pre>
},
}
}
fn render_timestamp(event: &Event) -> Html {
event
.time()
.map(|ts| {
return html! {
<span>
<pre>{ts.format("%H:%M:%S%.3f %Y-%m-%d")}</pre>
</span>
};
})
.unwrap_or_default()
}
#[derive(Clone, Debug, PartialEq)]
struct Attr | String, pub Html);
impl TableRenderer for AttributeEntry {
fn render(&self, index: ColumnIndex) -> Html {
match index.index {
0 => html! {&self.0},
1 => self.1.clone(),
_ => html! {},
}
}
}
fn render_details(event: &Event) -> Html {
let mut attrs: Vec<AttributeEntry> = event
.iter()
.map(|(key, value)| {
(
key.to_string(),
html! {
<pre class="pf-c-table__text">{ value.to_string() }</pre>
},
)
})
.map(|(key, value)| AttributeEntry(key, value))
.collect();
attrs.sort_by(|a, b| a.0.cmp(&b.0));
return html! {
<>
<h3>{"Attributes"}</h3>
<Table<SimpleTableModel<AttributeEntry>>
entries=SimpleTableModel::from(attrs)
mode=TableMode::CompactNoBorders
header=html_nested!{
<TableHeader>
<TableColumn label="Key"/>
<TableColumn label="Value"/>
</TableHeader>
}
>
</Table<SimpleTableModel<AttributeEntry>>>
<h3>{"Payload"}</h3>
{ render_data(event) }
</>
};
}
| ibuteEntry(pub | identifier_name |
service.rs | use std::io::Read;
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use futures::{self, Future, BoxFuture};
use curl::easy::{Easy, List};
use tokio_core::reactor::Handle;
use tokio_curl::{Session, PerformError};
use serde_json::{from_value, from_str, Value};
pub type Fut<T> = BoxFuture<T, PerformError>;
#[derive(Debug)]
pub struct App {
pub name: String,
pub max_mem_usage: f64,
pub max_cpu_usage: f64,
pub max_instances: i64,
pub instances: i64,
pub tasks: HashMap<String, String>,
}
#[derive(Debug)]
pub struct Statistic {
pub timestamp: f64,
pub cpu_time: f64,
pub cpu_usage: f64,
pub mem_usage: f64,
}
#[derive(Debug, Deserialize)]
struct TaskStatistic {
cpus_limit: f64,
cpus_system_time_secs: f64,
cpus_user_time_secs: f64,
mem_limit_bytes: i64,
mem_rss_bytes: i64,
timestamp: f64,
}
pub struct Service {
handle: Handle,
marathon_url: String,
mesos_url: String,
max_mem_usage: f64,
max_cpu_usage: f64,
multiplier: f64,
max_instances: i64,
}
impl Service {
pub fn new(handle: Handle, marathon_url: String, mesos_url: String,
max_mem_usage: f64, max_cpu_usage: f64,
multiplier: f64, max_instances: i64)
-> Service {
Service {
handle: handle,
marathon_url: marathon_url,
mesos_url: mesos_url,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
multiplier: multiplier,
max_instances: max_instances,
}
}
pub fn get_apps(&mut self) -> Fut<Vec<String>> {
let url = format!("{}/v2/apps", &self.marathon_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["apps"].as_array().unwrap();
let mut apps = Vec::new();
for x in data.iter() {
let id = x["id"].as_str().unwrap();
apps.push(id[1..].to_string());
}
apps
}).boxed()
}
pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> {
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app);
let app = app.to_string();
let mut max_instances = self.max_instances.clone();
let mut max_mem_usage = self.max_mem_usage.clone();
let mut max_cpu_usage = self.max_cpu_usage.clone();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let instances = data.pointer("/app/instances").unwrap();
let instances = instances.as_i64().unwrap();
let labels = data.pointer("/app/labels").unwrap();
let labels = labels.as_object().unwrap();
for (label, value) in labels {
match (label.as_ref(), value) {
("AUTOSCALE_MAX_INSTANCES", v) => {
max_instances = from_value(v.clone()).unwrap();
}
("AUTOSCALE_MEM_PERCENT", v) => {
max_mem_usage = from_value(v.clone()).unwrap();
}
("AUTOSCALE_CPU_PERCENT", v) => {
max_cpu_usage = from_value(v.clone()).unwrap();
}
_ => {}
}
}
let xs = data.pointer("/app/tasks").unwrap();
let xs = xs.as_array().unwrap();
let mut tasks = HashMap::new();
for x in xs.iter() {
let id = x["id"].as_str().unwrap();
let slave_id = x["slaveId"].as_str().unwrap();
tasks.insert(id.clone().to_string(),
slave_id.clone().to_string());
}
Some(App {
name: app,
max_instances: max_instances,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
instances: instances,
tasks: tasks,
})
}).boxed()
}
pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> {
let url = format!("{}/master/slaves", &self.mesos_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["slaves"].as_array().unwrap();
let mut slaves = HashMap::new();
for slave in data.iter() {
let id = slave["id"].as_str().unwrap();
let hostname = slave["hostname"].as_str().unwrap();
let port = slave["port"].as_i64().unwrap();
let addr = format!("{}:{}", hostname, port);
slaves.insert(id.clone().to_string(), addr.to_string());
}
slaves
}).boxed()
}
pub fn get_statistic(&mut self, app: &App,
slaves: &HashMap<String, String>,
prev: Option<&Statistic>)
-> Fut<Statistic> {
let mut futs = Vec::new();
for (id, slave_id) in &app.tasks {
let url = slaves.get::<String>(&slave_id).unwrap().to_string();
futs.push(self.get_task_statistic(url, id));
}
let mut prev_timestamp = 0.0;
let mut prev_cpu_time = 0.0;
if let Some(p) = prev {
prev_timestamp = p.timestamp;
prev_cpu_time = p.cpu_time;
}
futures::collect(futs).map(move |tasks| {
let mut mems: Vec<f64> = Vec::new();
let mut cpus: Vec<f64> = Vec::new(); | if task.is_none() {
continue;
}
let task = task.unwrap();
timestamp = task.timestamp;
cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs);
mems.push(100.0 * task.mem_rss_bytes as f64 /
task.mem_limit_bytes as f64);
}
let mem_usage = mems.iter()
.fold(0.0, |a, &b| a + b) / mems.len() as f64;
let cpu_time = cpus.iter()
.fold(0.0, |a, &b| a + b) / cpus.len() as f64;
let sampling_duration = timestamp - prev_timestamp;
let cpu_time_usage = cpu_time - prev_cpu_time;
let cpu_usage = cpu_time_usage / sampling_duration * 100.0;
Statistic {
timestamp: timestamp,
cpu_time: cpu_time,
mem_usage: mem_usage,
cpu_usage: cpu_usage,
}
}).boxed()
}
pub fn scale(&mut self, app: &App) -> Fut<()> {
let instances = (app.instances as f64 * self.multiplier).ceil() as i64;
if instances > app.max_instances {
info!("Cannot scale {}, reached maximum instances of: {}",
app.name, app.max_instances);
return futures::done(Ok(())).boxed();
}
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name);
let body = format!(r#"{{"instances": {}}}"#, instances);
let session = Session::new(self.handle.clone());
let mut req = Easy::new();
req.url(&url).unwrap();
req.put(true).unwrap();
let mut list = List::new();
list.append("Content-Type: application/json").unwrap();
req.http_headers(list).unwrap();
req.post_field_size(body.as_bytes().len() as u64).unwrap();
req.read_function(move |buf| {
let mut data = body.as_bytes();
Ok(data.read(buf).unwrap_or(0))
}).unwrap();
session.perform(req).map(|mut r| {
info!("Scaling response code: {}", r.response_code().unwrap());
}).boxed()
}
fn get_task_statistic(&mut self, slave: String, id: &str)
-> Fut<Option<TaskStatistic>> {
let url = format!("http://{}/monitor/statistics", &slave);
let id = id.to_string();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let data = data.as_array().unwrap();
data.iter().find(|x| {
x["executor_id"].as_str().unwrap() == id
}).map(|x| {
from_value(x["statistics"].clone()).unwrap()
})
}).boxed()
}
fn send_get(&mut self, url: &str) -> Fut<String> {
let session = Session::new(self.handle.clone());
let response = Arc::new(Mutex::new(Vec::new()));
let headers = Arc::new(Mutex::new(Vec::new()));
let mut req = Easy::new();
req.get(true).unwrap();
req.url(url).unwrap();
let response2 = response.clone();
req.write_function(move |data| {
response2.lock().unwrap().extend_from_slice(data);
Ok(data.len())
}).unwrap();
let headers2 = headers.clone();
req.header_function(move |header| {
headers2.lock().unwrap().push(header.to_vec());
true
}).unwrap();
session.perform(req).map(move |_| {
let response = response.lock().unwrap();
let response = String::from_utf8_lossy(&response);
response.into_owned()
}).boxed()
}
}
#[cfg(test)]
mod tests {
use tokio_core::reactor::Core;
#[test]
#[ignore]
fn test() {
let marathon_url = "http://localhost:8080";
let mesos_url = "http://localhost:5050";
let mut evloop = Core::new().unwrap();
let mut service = ::Service::new(evloop.handle(),
marathon_url.to_string(),
mesos_url.to_string(),
80.0, 80.0, 1.5, 10);
let fut = service.get_slaves();
let slaves = evloop.run(fut).unwrap();
let fut = service.get_apps();
let apps = evloop.run(fut).unwrap();
for id in apps {
let fut = service.get_app(&id);
let app = evloop.run(fut).unwrap().unwrap();
let fut = service.get_statistic(&app, &slaves, None);
let stat = evloop.run(fut).unwrap();
if app.name == "api" {
let fut = service.scale(&app);
evloop.run(fut).unwrap();
}
}
}
} | let mut timestamp: f64 = 0.0;
for task in tasks { | random_line_split |
service.rs | use std::io::Read;
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use futures::{self, Future, BoxFuture};
use curl::easy::{Easy, List};
use tokio_core::reactor::Handle;
use tokio_curl::{Session, PerformError};
use serde_json::{from_value, from_str, Value};
pub type Fut<T> = BoxFuture<T, PerformError>;
#[derive(Debug)]
pub struct App {
pub name: String,
pub max_mem_usage: f64,
pub max_cpu_usage: f64,
pub max_instances: i64,
pub instances: i64,
pub tasks: HashMap<String, String>,
}
#[derive(Debug)]
pub struct Statistic {
pub timestamp: f64,
pub cpu_time: f64,
pub cpu_usage: f64,
pub mem_usage: f64,
}
#[derive(Debug, Deserialize)]
struct TaskStatistic {
cpus_limit: f64,
cpus_system_time_secs: f64,
cpus_user_time_secs: f64,
mem_limit_bytes: i64,
mem_rss_bytes: i64,
timestamp: f64,
}
pub struct Service {
handle: Handle,
marathon_url: String,
mesos_url: String,
max_mem_usage: f64,
max_cpu_usage: f64,
multiplier: f64,
max_instances: i64,
}
impl Service {
pub fn new(handle: Handle, marathon_url: String, mesos_url: String,
max_mem_usage: f64, max_cpu_usage: f64,
multiplier: f64, max_instances: i64)
-> Service {
Service {
handle: handle,
marathon_url: marathon_url,
mesos_url: mesos_url,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
multiplier: multiplier,
max_instances: max_instances,
}
}
pub fn get_apps(&mut self) -> Fut<Vec<String>> {
let url = format!("{}/v2/apps", &self.marathon_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["apps"].as_array().unwrap();
let mut apps = Vec::new();
for x in data.iter() {
let id = x["id"].as_str().unwrap();
apps.push(id[1..].to_string());
}
apps
}).boxed()
}
pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> {
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app);
let app = app.to_string();
let mut max_instances = self.max_instances.clone();
let mut max_mem_usage = self.max_mem_usage.clone();
let mut max_cpu_usage = self.max_cpu_usage.clone();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let instances = data.pointer("/app/instances").unwrap();
let instances = instances.as_i64().unwrap();
let labels = data.pointer("/app/labels").unwrap();
let labels = labels.as_object().unwrap();
for (label, value) in labels {
match (label.as_ref(), value) {
("AUTOSCALE_MAX_INSTANCES", v) => {
max_instances = from_value(v.clone()).unwrap();
}
("AUTOSCALE_MEM_PERCENT", v) => {
max_mem_usage = from_value(v.clone()).unwrap();
}
("AUTOSCALE_CPU_PERCENT", v) => {
max_cpu_usage = from_value(v.clone()).unwrap();
}
_ => {}
}
}
let xs = data.pointer("/app/tasks").unwrap();
let xs = xs.as_array().unwrap();
let mut tasks = HashMap::new();
for x in xs.iter() {
let id = x["id"].as_str().unwrap();
let slave_id = x["slaveId"].as_str().unwrap();
tasks.insert(id.clone().to_string(),
slave_id.clone().to_string());
}
Some(App {
name: app,
max_instances: max_instances,
max_mem_usage: max_mem_usage,
max_cpu_usage: max_cpu_usage,
instances: instances,
tasks: tasks,
})
}).boxed()
}
pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> {
let url = format!("{}/master/slaves", &self.mesos_url);
self.send_get(&url).map(|body| {
let data = from_str::<Value>(&body).unwrap();
let data = data["slaves"].as_array().unwrap();
let mut slaves = HashMap::new();
for slave in data.iter() {
let id = slave["id"].as_str().unwrap();
let hostname = slave["hostname"].as_str().unwrap();
let port = slave["port"].as_i64().unwrap();
let addr = format!("{}:{}", hostname, port);
slaves.insert(id.clone().to_string(), addr.to_string());
}
slaves
}).boxed()
}
pub fn get_statistic(&mut self, app: &App,
slaves: &HashMap<String, String>,
prev: Option<&Statistic>)
-> Fut<Statistic> |
pub fn scale(&mut self, app: &App) -> Fut<()> {
let instances = (app.instances as f64 * self.multiplier).ceil() as i64;
if instances > app.max_instances {
info!("Cannot scale {}, reached maximum instances of: {}",
app.name, app.max_instances);
return futures::done(Ok(())).boxed();
}
let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name);
let body = format!(r#"{{"instances": {}}}"#, instances);
let session = Session::new(self.handle.clone());
let mut req = Easy::new();
req.url(&url).unwrap();
req.put(true).unwrap();
let mut list = List::new();
list.append("Content-Type: application/json").unwrap();
req.http_headers(list).unwrap();
req.post_field_size(body.as_bytes().len() as u64).unwrap();
req.read_function(move |buf| {
let mut data = body.as_bytes();
Ok(data.read(buf).unwrap_or(0))
}).unwrap();
session.perform(req).map(|mut r| {
info!("Scaling response code: {}", r.response_code().unwrap());
}).boxed()
}
fn get_task_statistic(&mut self, slave: String, id: &str)
-> Fut<Option<TaskStatistic>> {
let url = format!("http://{}/monitor/statistics", &slave);
let id = id.to_string();
self.send_get(&url).map(move |body| {
let data = from_str::<Value>(&body).unwrap();
let data = data.as_array().unwrap();
data.iter().find(|x| {
x["executor_id"].as_str().unwrap() == id
}).map(|x| {
from_value(x["statistics"].clone()).unwrap()
})
}).boxed()
}
fn send_get(&mut self, url: &str) -> Fut<String> {
let session = Session::new(self.handle.clone());
let response = Arc::new(Mutex::new(Vec::new()));
let headers = Arc::new(Mutex::new(Vec::new()));
let mut req = Easy::new();
req.get(true).unwrap();
req.url(url).unwrap();
let response2 = response.clone();
req.write_function(move |data| {
response2.lock().unwrap().extend_from_slice(data);
Ok(data.len())
}).unwrap();
let headers2 = headers.clone();
req.header_function(move |header| {
headers2.lock().unwrap().push(header.to_vec());
true
}).unwrap();
session.perform(req).map(move |_| {
let response = response.lock().unwrap();
let response = String::from_utf8_lossy(&response);
response.into_owned()
}).boxed()
}
}
#[cfg(test)]
mod tests {
use tokio_core::reactor::Core;
#[test]
#[ignore]
fn test() {
let marathon_url = "http://localhost:8080";
let mesos_url = "http://localhost:5050";
let mut evloop = Core::new().unwrap();
let mut service = ::Service::new(evloop.handle(),
marathon_url.to_string(),
mesos_url.to_string(),
80.0, 80.0, 1.5, 10);
let fut = service.get_slaves();
let slaves = evloop.run(fut).unwrap();
let fut = service.get_apps();
let apps = evloop.run(fut).unwrap();
for id in apps {
let fut = service.get_app(&id);
let app = evloop.run(fut).unwrap().unwrap();
let fut = service.get_statistic(&app, &slaves, None);
let stat = evloop.run(fut).unwrap();
if app.name == "api" {
let fut = service.scale(&app);
evloop.run(fut).unwrap();
}
}
}
}
| {
let mut futs = Vec::new();
for (id, slave_id) in &app.tasks {
let url = slaves.get::<String>(&slave_id).unwrap().to_string();
futs.push(self.get_task_statistic(url, id));
}
let mut prev_timestamp = 0.0;
let mut prev_cpu_time = 0.0;
if let Some(p) = prev {
prev_timestamp = p.timestamp;
prev_cpu_time = p.cpu_time;
}
futures::collect(futs).map(move |tasks| {
let mut mems: Vec<f64> = Vec::new();
let mut cpus: Vec<f64> = Vec::new();
let mut timestamp: f64 = 0.0;
for task in tasks {
if task.is_none() {
continue;
}
let task = task.unwrap();
timestamp = task.timestamp;
cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs);
mems.push(100.0 * task.mem_rss_bytes as f64 /
task.mem_limit_bytes as f64);
}
let mem_usage = mems.iter()
.fold(0.0, |a, &b| a + b) / mems.len() as f64;
let cpu_time = cpus.iter()
.fold(0.0, |a, &b| a + b) / cpus.len() as f64;
let sampling_duration = timestamp - prev_timestamp;
let cpu_time_usage = cpu_time - prev_cpu_time;
let cpu_usage = cpu_time_usage / sampling_duration * 100.0;
Statistic {
timestamp: timestamp,
cpu_time: cpu_time,
mem_usage: mem_usage,
cpu_usage: cpu_usage,
}
}).boxed()
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.