max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
netrd/__init__.py | sdmccabe/netrd | 116 | 12698 | """
netrd
-----
netrd stands for Network Reconstruction and Distances. It is a repository
of different algorithms for constructing a network from time series data,
as well as for comparing two networks. It is the product of the Network
Science Insitute 2019 Collabathon.
"""
from . import distance # noqa
from . import reconstruction # noqa
from . import dynamics # noqa
from . import utilities # noqa
|
pytorch/GPT.py | lyq628/NLP-Tutorials | 643 | 12699 | from transformer import Encoder
from torch import nn,optim
from torch.nn.functional import cross_entropy,softmax, relu
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
import torch
import utils
import os
import pickle
class GPT(nn.Module):
def __init__(self, model_dim, max_len, num_layer, num_head, n_vocab, lr, max_seg=3, drop_rate=0.2,padding_idx=0):
super().__init__()
self.padding_idx = padding_idx
self.n_vocab = n_vocab
self.max_len = max_len
self.word_emb = nn.Embedding(n_vocab,model_dim)
self.word_emb.weight.data.normal_(0,0.1)
self.segment_emb = nn.Embedding(num_embeddings= max_seg, embedding_dim=model_dim)
self.segment_emb.weight.data.normal_(0,0.1)
self.position_emb = torch.empty(1,max_len,model_dim)
nn.init.kaiming_normal_(self.position_emb,mode='fan_out', nonlinearity='relu')
self.position_emb = nn.Parameter(self.position_emb)
self.encoder = Encoder(n_head=num_head, emb_dim=model_dim, drop_rate=drop_rate, n_layer=num_layer)
self.task_mlm = nn.Linear(in_features=model_dim, out_features=n_vocab)
self.task_nsp = nn.Linear(in_features=model_dim*self.max_len, out_features=2)
self.opt = optim.Adam(self.parameters(),lr)
def forward(self,seqs, segs, training=False):
embed = self.input_emb(seqs, segs)
z = self.encoder(embed, training, mask = self.mask(seqs)) # [n, step, model_dim]
mlm_logits = self.task_mlm(z) # [n, step, n_vocab]
nsp_logits = self.task_nsp(z.reshape(z.shape[0],-1)) # [n, n_cls]
return mlm_logits, nsp_logits
def step(self, seqs, segs, seqs_, nsp_labels):
self.opt.zero_grad()
mlm_logits, nsp_logits = self(seqs, segs, training=True)
pred_loss = cross_entropy(mlm_logits.reshape(-1,self.n_vocab),seqs_.reshape(-1))
nsp_loss = cross_entropy(nsp_logits,nsp_labels.reshape(-1))
loss = pred_loss + 0.2 * nsp_loss
loss.backward()
self.opt.step()
return loss.cpu().data.numpy(), mlm_logits
def input_emb(self,seqs, segs):
# device = next(self.parameters()).device
# self.position_emb = self.position_emb.to(device)
return self.word_emb(seqs) + self.segment_emb(segs) + self.position_emb
def mask(self, seqs):
device = next(self.parameters()).device
batch_size, seq_len = seqs.shape
mask = torch.triu(torch.ones((seq_len,seq_len), dtype=torch.long), diagonal=1).to(device) # [seq_len ,seq_len]
pad = torch.eq(seqs,self.padding_idx) # [n, seq_len]
mask = torch.where(pad[:,None,None,:],1,mask[None,None,:,:]).to(device) # [n, 1, seq_len, seq_len]
return mask>0 # [n, 1, seq_len, seq_len]
@property
def attentions(self):
attentions = {
"encoder": [l.mh.attention.cpu().data.numpy() for l in self.encoder.encoder_layers]
}
return attentions
def train():
MODEL_DIM = 256
N_LAYER = 4
LEARNING_RATE = 1e-4
dataset = utils.MRPCData("./MRPC",2000)
print("num word: ",dataset.num_word)
model = GPT(
model_dim=MODEL_DIM, max_len=dataset.max_len-1, num_layer=N_LAYER, num_head=4, n_vocab=dataset.num_word,
lr=LEARNING_RATE, max_seg=dataset.num_seg, drop_rate=0.2, padding_idx=dataset.pad_id
)
if torch.cuda.is_available():
print("GPU train avaliable")
device =torch.device("cuda")
model = model.cuda()
else:
device = torch.device("cpu")
model = model.cpu()
loader = DataLoader(dataset,batch_size=32,shuffle=True)
for epoch in range(100):
for batch_idx, batch in enumerate(loader):
seqs, segs,xlen,nsp_labels = batch
seqs, segs,nsp_labels = seqs.type(torch.LongTensor).to(device), segs.type(torch.LongTensor).to(device),nsp_labels.to(device)
# pred: [n, step, n_vocab]
loss,pred = model.step(seqs=seqs[:,:-1], segs= segs[:,:-1], seqs_=seqs[:,1:], nsp_labels=nsp_labels)
if batch_idx %100 == 0:
pred = pred[0].cpu().data.numpy().argmax(axis = 1) # [step]
print(
"Epoch: ",epoch,
"|batch: ", batch_idx,
"| loss: %.3f" % loss,
"\n| tgt: ", " ".join([dataset.i2v[i] for i in seqs[0, 1:].cpu().data.numpy()[:xlen[0].sum()+1]]),
"\n| prd: ", " ".join([dataset.i2v[i] for i in pred[:xlen[0].sum()+1]]),
)
os.makedirs("./visual/models/gpt",exist_ok=True)
torch.save(model.state_dict(),"./visual/models/gpt/model.pth")
export_attention(model,device,dataset)
def export_attention(model,device,data,name="gpt"):
model.load_state_dict(torch.load("./visual/models/gpt/model.pth",map_location=device))
seqs, segs,xlen,nsp_labels = data[:32]
seqs, segs,xlen,nsp_labels = torch.from_numpy(seqs),torch.from_numpy(segs),torch.from_numpy(xlen),torch.from_numpy(nsp_labels)
seqs, segs,nsp_labels = seqs.type(torch.LongTensor).to(device), segs.type(torch.LongTensor).to(device),nsp_labels.to(device)
model(seqs[:,:-1],segs[:,:-1],False)
seqs = seqs.cpu().data.numpy()
data = {"src": [[data.i2v[i] for i in seqs[j]] for j in range(len(seqs))], "attentions": model.attentions}
path = "./visual/tmp/%s_attention_matrix.pkl" % name
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
pickle.dump(data, f)
if __name__ == "__main__":
train()
|
src/main/python/smart/smartplots3_run.py | cday97/beam | 123 | 12724 | import pandas as pd
import smartplots3_setup
def createSetup(name,expansion_factor,percapita_factor,plot_size,settings):
plt_setup_smart={
'name': name,
'expansion_factor':expansion_factor,
'percapita_factor':percapita_factor,
'scenarios_itr': [],
'scenarios_id':[],
'scenarios_year':[],
'plot_size': plot_size,
'bottom_labels': [],
'top_labels': [],
'plots_folder': "makeplots3"
}
plt_setup_smart['name']=name
plt_setup_smart['expansion_factor']=expansion_factor
plt_setup_smart['plot_size']=plot_size
plt_setup_smart['scenarios_year']=[]
plt_setup_smart['scenarios_id']=[]
plt_setup_smart['scenarios_itr']=[]
plt_setup_smart['top_labels']=[]
for (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label) in settings:
plt_setup_smart['scenarios_year'].append(scenarios_year)
plt_setup_smart['scenarios_id'].append(scenarios_id)
plt_setup_smart['scenarios_itr'].append(scenarios_itr)
plt_setup_smart['top_labels'].append(top_label)
plt_setup_smart['bottom_labels'].append(bottom_label)
return plt_setup_smart
def createSettingRow(scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label):
return (scenarios_year,scenarios_id,scenarios_itr,bottom_label,top_label)
scenarios_lables = {
"Base_CL_CT": "Base0",
"Base_STL_STT_BAU": "Base2",
"Base_STL_STT_VTO": "Base3",
"Base_LTL_LTT_BAU": "Base5",
"Base_LTL_LTT_VTO": "Base6",
"A_STL_STT_BAU": "A2",
"A_STL_STT_VTO": "A3",
"B_LTL_LTT_BAU": "B5",
"B_LTL_LTT_VTO": "B6",
"C_LTL_LTT_BAU": "C5",
"C_LTL_LTT_VTO": "C6"
}
output_folder = "/home/ubuntu/git/jupyter/data/28thOct2019"
# Base_CL_CT
# A_STL_STT_BAU
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3 = createSetup('7scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (8, 4.5), settings)
#smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3, output_folder)
#smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3, output_folder)
#smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3, output_folder)
#smartplots3_setup.pltRHWaitTime(plt_setup_smart3, output_folder)
#smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3, output_folder)
settings=[]
settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], ""))
settings.append(createSettingRow(2025,2,15,scenarios_lables["Base_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,3,15,scenarios_lables["Base_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,4,15,scenarios_lables["Base_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,5,15,scenarios_lables["Base_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], ""))
settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], ""))
settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], ""))
settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], ""))
settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], ""))
plt_setup_smart3_base = createSetup('11scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (10, 4.5), settings)
smartplots3_setup.pltEnergyPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltRHWaitTime(plt_setup_smart3_base, output_folder)
smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3_base, output_folder)
#smartplots3_setup.pltMEP(plt_setup_smart3, output_folder, [15071,21151,22872,29014,27541,36325,45267])
smartplots3_setup.tableSummary(plt_setup_smart3_base, output_folder) |
contrib/libs/cxxsupp/libsan/generate_symbolizer.py | HeyLey/catboost | 6,989 | 12726 | import os
import sys
def main():
print 'const char* ya_get_symbolizer_gen() {'
print ' return "{}";'.format(os.path.join(os.path.dirname(sys.argv[1]), 'llvm-symbolizer'))
print '}'
if __name__ == '__main__':
main()
|
utils/save_atten.py | xiaomengyc/SPG | 152 | 12745 | <reponame>xiaomengyc/SPG
import numpy as np
import cv2
import os
import torch
import os
import time
from torchvision import models, transforms
from torch.utils.data import DataLoader
from torch.optim import SGD
from torch.autograd import Variable
idx2catename = {'voc20': ['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow','diningtable','dog','horse',
'motorbike','person','pottedplant','sheep','sofa','train','tvmonitor'],
'coco80': ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench',
'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet',
'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']}
class SAVE_ATTEN(object):
def __init__(self, save_dir='save_bins', dataset=None):
# type: (object, object) -> object
self.save_dir = save_dir
if dataset is not None:
self.idx2cate = self._get_idx2cate_dict(datasetname=dataset)
else:
self.idx2cate = None
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
def save_top_5_pred_labels(self, preds, org_paths, global_step):
img_num = np.shape(preds)[0]
for idx in xrange(img_num):
img_name = org_paths[idx].strip().split('/')[-1]
if '.JPEG' in img_name:
img_id = img_name[:-5]
elif '.png' in img_name or '.jpg' in img_name:
img_id = img_name[:-4]
out = img_id + ' ' + ' '.join(map(str, preds[idx,:])) + '\n'
out_file = os.path.join(self.save_dir, 'pred_labels.txt')
if global_step == 0 and idx==0 and os.path.exists(out_file):
os.remove(out_file)
with open(out_file, 'a') as f:
f.write(out)
def save_masked_img_batch(self, path_batch, atten_batch, label_batch):
#img_num = np.shape(atten_batch)[0]
img_num = atten_batch.size()[0]
# fid = open('imagenet_val_shape.txt', 'a')
# print(np.shape(img_batch), np.shape(label_batch), np.shape(org_size_batch), np.shape(atten_batch))
for idx in xrange(img_num):
atten = atten_batch[idx]
atten = atten.cpu().data.numpy()
label = label_batch[idx]
label = int(label)
self._save_masked_img(path_batch[idx], atten,label)
def _get_idx2cate_dict(self, datasetname=None):
if datasetname not in idx2catename.keys():
print 'The given %s dataset category names are not available. The supported are: %s'\
%(str(datasetname),','.join(idx2catename.keys()))
return None
else:
return {idx:cate_name for idx, cate_name in enumerate(idx2catename[datasetname])}
def _save_masked_img(self, img_path, atten, label):
'''
save masked images with only one ground truth label
:param path:
:param img:
:param atten:
:param org_size:
:param label:
:param scores:
:param step:
:param args:
:return:
'''
if not os.path.isfile(img_path):
raise 'Image not exist:%s'%(img_path)
img = cv2.imread(img_path)
org_size = np.shape(img)
w = org_size[0]
h = org_size[1]
attention_map = atten[label,:,:]
atten_norm = attention_map
print(np.shape(attention_map), 'Max:', np.max(attention_map), 'Min:',np.min(attention_map))
# min_val = np.min(attention_map)
# max_val = np.max(attention_map)
# atten_norm = (attention_map - min_val)/(max_val - min_val)
atten_norm = cv2.resize(atten_norm, dsize=(h,w))
atten_norm = atten_norm* 255
heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
img_id = img_path.strip().split('/')[-1]
img_id = img_id.strip().split('.')[0]
save_dir = os.path.join(self.save_dir, img_id+'.png')
cv2.imwrite(save_dir, img)
def get_img_id(self, path):
img_id = path.strip().split('/')[-1]
return img_id.strip().split('.')[0]
def save_top_5_atten_maps(self, atten_fuse_batch, top_indices_batch, org_paths, topk=5):
'''
Save top-5 localization maps for generating bboxes
:param atten_fuse_batch: normalized last layer feature maps of size (batch_size, C, W, H), type: numpy array
:param top_indices_batch: ranked predicted labels of size (batch_size, C), type: numpy array
:param org_paths:
:param args:
:return:
'''
img_num = np.shape(atten_fuse_batch)[0]
for idx in xrange(img_num):
img_id = org_paths[idx].strip().split('/')[-1][:-4]
for k in range(topk):
atten_pos = top_indices_batch[idx, k]
atten_map = atten_fuse_batch[idx, atten_pos,:,:]
heat_map = cv2.resize(atten_map, dsize=(224, 224))
# heat_map = cv2.resize(atten_map, dsize=(img_shape[1], img_shape[0]))
heat_map = heat_map* 255
save_path = os.path.join(self.save_dir, 'heat_maps', 'top%d'%(k+1))
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path = os.path.join(save_path,img_id+'.png')
cv2.imwrite(save_path, heat_map)
# def save_heatmap_segmentation(self, img_path, atten, gt_label, save_dir=None, size=(224,224), maskedimg=False):
# assert np.ndim(atten) == 4
#
# labels_idx = np.where(gt_label[0]==1)[0] if np.ndim(gt_label)==2 else np.where(gt_label==1)[0]
#
# if save_dir is None:
# save_dir = self.save_dir
# if not os.path.exists(save_dir):
# os.mkdir(save_dir)
#
# if isinstance(img_path, list) or isinstance(img_path, tuple):
# batch_size = len(img_path)
# for i in range(batch_size):
# img, size = self.read_img(img_path[i], size=size)
# atten_img = atten[i] #get attention maps for the i-th img of the batch
# img_name = self.get_img_id(img_path[i])
# img_dir = os.path.join(save_dir, img_name)
# if not os.path.exists(img_dir):
# os.mkdir(img_dir)
# for k in labels_idx:
# atten_map_k = atten_img[k,:,:]
# atten_map_k = cv2.resize(atten_map_k, dsize=size)
# if maskedimg:
# img_to_save = self._add_msk2img(img, atten_map_k)
# else:
# img_to_save = self.normalize_map(atten_map_k)*255.0
#
# save_path = os.path.join(img_dir, '%d.png'%(k))
# cv2.imwrite(save_path, img_to_save)
def normalize_map(self, atten_map):
min_val = np.min(atten_map)
max_val = np.max(atten_map)
atten_norm = (atten_map - min_val)/(max_val - min_val)
return atten_norm
def _add_msk2img(self, img, msk, isnorm=True):
if np.ndim(img) == 3:
assert np.shape(img)[0:2] == np.shape(msk)
else:
assert np.shape(img) == np.shape(msk)
if isnorm:
min_val = np.min(msk)
max_val = np.max(msk)
atten_norm = (msk - min_val)/(max_val - min_val)
atten_norm = atten_norm* 255
heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
w_img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
return w_img
def _draw_text(self, pic, txt, pos='topleft'):
font = cv2.FONT_HERSHEY_SIMPLEX #multiple line
txt = txt.strip().split('\n')
stat_y = 30
for t in txt:
pic = cv2.putText(pic,t,(10,stat_y), font, 0.8,(255,255,255),2,cv2.LINE_AA)
stat_y += 30
return pic
def _mark_score_on_picture(self, pic, score_vec, label_idx):
score = score_vec[label_idx]
txt = '%.3f'%(score)
pic = self._draw_text(pic, txt, pos='topleft')
return pic
def get_heatmap_idxes(self, gt_label):
labels_idx = []
if np.ndim(gt_label) == 1:
labels_idx = np.expand_dims(gt_label, axis=1).astype(np.int)
elif np.ndim(gt_label) == 2:
for row in gt_label:
idxes = np.where(row[0]==1)[0] if np.ndim(row)==2 else np.where(row==1)[0]
labels_idx.append(idxes.tolist())
else:
labels_idx = None
return labels_idx
def get_map_k(self, atten, k, size=(224,224)):
atten_map_k = atten[k,:,:]
# print np.max(atten_map_k), np.min(atten_map_k)
atten_map_k = cv2.resize(atten_map_k, dsize=size)
return atten_map_k
def read_img(self, img_path, size=(224,224)):
img = cv2.imread(img_path)
if img is None:
print "Image does not exist. %s" %(img_path)
exit(0)
if size == (0,0):
size = np.shape(img)[:2]
else:
img = cv2.resize(img, size)
return img, size[::-1]
def get_masked_img(self, img_path, atten, gt_label,
size=(224,224), maps_in_dir=False, save_dir=None, only_map=False):
assert np.ndim(atten) == 4
save_dir = save_dir if save_dir is not None else self.save_dir
if isinstance(img_path, list) or isinstance(img_path, tuple):
batch_size = len(img_path)
label_indexes = self.get_heatmap_idxes(gt_label)
for i in range(batch_size):
img, size = self.read_img(img_path[i], size)
img_name = img_path[i].split('/')[-1]
img_name = img_name.strip().split('.')[0]
if maps_in_dir:
img_save_dir = os.path.join(save_dir, img_name)
os.mkdir(img_save_dir)
for k in label_indexes[i]:
atten_map_k = self.get_map_k(atten[i], k , size)
msked_img = self._add_msk2img(img, atten_map_k)
suffix = str(k+1)
if only_map:
save_img = (self.normalize_map(atten_map_k)*255).astype(np.int)
else:
save_img = msked_img
if maps_in_dir:
cv2.imwrite(os.path.join(img_save_dir, suffix + '.png'), save_img)
else:
cv2.imwrite(os.path.join(save_dir, img_name + '_' + suffix + '.png'), save_img)
# if score_vec is not None and labels_idx is not None:
# msked_img = self._mark_score_on_picture(msked_img, score_vec, labels_idx[k])
# if labels_idx is not None:
# suffix = self.idx2cate.get(labels_idx[k], k)
# def get_masked_img_ml(self, img_path, atten, save_dir=None, size=(224,224),
# gt_label=None, score_vec=None):
# assert np.ndim(atten) == 4
#
# if gt_label is not None and self.idx2cate is not None:
# labels_idx = np.where(gt_label[0]==1)[0] if np.ndim(gt_label)==2 else np.where(gt_label==1)[0]
# else:
# labels_idx = None
#
#
# if save_dir is not None:
# self.save_dir = save_dir
# if isinstance(img_path, list) or isinstance(img_path, tuple):
# batch_size = len(img_path)
# for i in range(batch_size):
# img = cv2.imread(img_path[i])
# if img is None:
# print "Image does not exist. %s" %(img_path[i])
# exit(0)
#
# else:
# atten_img = atten[i] #get attention maps for the i-th img
# img_name = img_path[i].split('/')[-1]
# for k in range(np.shape(atten_img)[0]):
# if size == (0,0):
# w, h, _ = np.shape(img)
# # h, w, _ = np.shape(img)
# else:
# h, w = size
# img = cv2.resize(img, dsize=(h, w))
# atten_map_k = atten_img[k,:,:]
# # print np.max(atten_map_k), np.min(atten_map_k)
# atten_map_k = cv2.resize(atten_map_k, dsize=(h,w))
# msked_img = self._add_msk2img(img, atten_map_k)
# if score_vec is not None and labels_idx is not None:
# msked_img = self._mark_score_on_picture(msked_img, score_vec, labels_idx[k])
# if labels_idx is not None:
# suffix = self.idx2cate.get(labels_idx[k], k)
# else:
# suffix = str(k)
# if '.' in img_name:
# img_name = img_name.strip().split('.')[0]
# cv2.imwrite(os.path.join(self.save_dir, img_name + '_' + suffix + '.png'), msked_img)
#
#
# def get_masked_img(self, img_path, atten, save_dir=None, size=(224,224), combine=True):
# '''
#
# :param img_path:
# :param atten:
# :param size: if it is (0,0) use original image size, otherwise use the specified size.
# :param combine:
# :return:
# '''
#
# if save_dir is not None:
# self.save_dir = save_dir
# if isinstance(img_path, list) or isinstance(img_path, tuple):
# batch_size = len(img_path)
#
# for i in range(batch_size):
# atten_norm = atten[i]
# min_val = np.min(atten_norm)
# max_val = np.max(atten_norm)
# atten_norm = (atten_norm - min_val)/(max_val - min_val)
# # print np.max(atten_norm), np.min(atten_norm)
# img = cv2.imread(img_path[i])
# if img is None:
# print "Image does not exist. %s" %(img_path[i])
# exit(0)
#
# if size == (0,0):
# w, h, _ = np.shape(img)
# # h, w, _ = np.shape(img)
# else:
# h, w = size
# img = cv2.resize(img, dsize=(h, w))
#
# atten_norm = cv2.resize(atten_norm, dsize=(h,w))
# # atten_norm = cv2.resize(atten_norm, dsize=(w,h))
# atten_norm = atten_norm* 255
# heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
# img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
#
#
# # font = cv2.FONT_HERSHEY_SIMPLEX
# # cv2.putText(img,'OpenCV \n hello',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA)
#
# img_name = img_path[i].split('/')[-1]
# print os.path.join(self.save_dir, img_name)
# cv2.imwrite(os.path.join(self.save_dir, img_name), img)
def get_atten_map(self, img_path, atten, save_dir=None, size=(321,321)):
'''
:param img_path:
:param atten:
:param size: if it is (0,0) use original image size, otherwise use the specified size.
:param combine:
:return:
'''
if save_dir is not None:
self.save_dir = save_dir
if isinstance(img_path, list) or isinstance(img_path, tuple):
batch_size = len(img_path)
for i in range(batch_size):
atten_norm = atten[i]
min_val = np.min(atten_norm)
max_val = np.max(atten_norm)
atten_norm = (atten_norm - min_val)/(max_val - min_val)
# print np.max(atten_norm), np.min(atten_norm)
h, w = size
atten_norm = cv2.resize(atten_norm, dsize=(h,w))
# atten_norm = cv2.resize(atten_norm, dsize=(w,h))
atten_norm = atten_norm* 255
img_name = img_path[i].split('/')[-1]
img_name = img_name.replace('jpg', 'png')
cv2.imwrite(os.path.join(self.save_dir, img_name), atten_norm)
class DRAW(object):
def __init__(self):
pass
def draw_text(self, img, text):
if isinstance(text, dict):
pass
|
color_extractor/cluster.py | hcoura/color-extractor | 276 | 12748 | <filename>color_extractor/cluster.py
from sklearn.cluster import KMeans
from .exceptions import KMeansException
from .task import Task
class Cluster(Task):
"""
Use the K-Means algorithm to group pixels by clusters. The algorithm tries
to determine the optimal number of clusters for the given pixels.
"""
def __init__(self, settings=None):
if settings is None:
settings = {}
super(Cluster, self).__init__(settings)
self._kmeans_args = {
'max_iter': 50,
'tol': 1.0,
}
def get(self, img):
a = self._settings['algorithm']
if a == 'kmeans':
return self._jump(img)
else:
raise ValueError('Unknown algorithm {}'.format(a))
def _kmeans(self, img, k):
kmeans = KMeans(n_clusters=k, **self._kmeans_args)
try:
kmeans.fit(img)
except:
raise KMeansException()
return kmeans.inertia_, kmeans.labels_, kmeans.cluster_centers_
def _jump(self, img):
npixels = img.size
best = None
prev_distorsion = 0
largest_diff = float('-inf')
for k in range(self._settings['min_k'], self._settings['max_k']):
compact, labels, centers = self._kmeans(img, k)
distorsion = Cluster._square_distorsion(npixels, compact, 1.5)
diff = prev_distorsion - distorsion
prev_distorsion = distorsion
if diff > largest_diff:
largest_diff = diff
best = k, labels, centers
return best
@staticmethod
def _default_settings():
return {
'min_k': 2,
'max_k': 7,
'algorithm': 'kmeans',
}
@staticmethod
def _square_distorsion(npixels, compact, y):
return pow(compact / npixels, -y)
|
notebooks/datasets.py | jweill-aws/jupyterlab-data-explorer | 173 | 12749 | <reponame>jweill-aws/jupyterlab-data-explorer
#
# @license BSD-3-Clause
#
# Copyright (c) 2019 Project Jupyter Contributors.
# Distributed under the terms of the 3-Clause BSD License.
import IPython.display
import pandas
def output_url(url):
IPython.display.publish_display_data(
{"application/x.jupyter.relative-dataset-urls+json": [url]}
)
|
src/lib/others/info_gathering/finder/finding_comment.py | nahuelhm17/vault_scanner | 230 | 12752 | #! /usr/bin/python
import requests
import re
from bs4 import BeautifulSoup
import colors
class FindingComments(object):
def __init__(self, url):
self.url = url
self.comment_list = ['<!--(.*)-->']
self.found_comments = {}
def get_soure_code(self):
resp_text = requests.get(self.url).text
return resp_text
def find_comment(self):
source_code = self.get_soure_code()
for comment in self.comment_list:
comments = re.findall(comment, source_code)
self.found_comments[comment] = comments
def parse_comments(self):
self.find_comment()
comment_dict = {}
if len(self.found_comments) > 0:
for comment_code, comment in self.found_comments.items():
colors.success('Found for {} : {}'
.format(comment_code, comment))
comment_dict[comment_code] = comment
else:
colors.error('No comment found')
return comment_dict
|
micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py | WenJinfeng/PyCG | 121 | 12753 | d = {"1": "a"}
d[1]
d["1"]
|
torchrec/metrics/rec_metric.py | xing-liu/torchrec | 814 | 12794 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import abc
import math
from collections import defaultdict, deque
from dataclasses import dataclass
from enum import Enum
from typing import (
Any,
Callable,
cast,
Deque,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import torch
import torch.distributed as dist
import torch.nn as nn
from torchmetrics import Metric
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import (
compose_metric_key,
MetricNameBase,
MetricNamespaceBase,
MetricPrefix,
)
RecModelOutput = Union[torch.Tensor, Dict[str, torch.Tensor]]
@dataclass(frozen=True)
class MetricComputationReport:
name: MetricNameBase
metric_prefix: MetricPrefix
value: torch.Tensor
DefaultValueT = TypeVar("DefaultValueT")
ComputeIterType = Iterator[
Tuple[RecTaskInfo, MetricNameBase, torch.Tensor, MetricPrefix]
]
MAX_BUFFER_COUNT = 1000
class RecMetricException(Exception):
pass
class WindowBuffer:
def __init__(self, max_size: int, max_buffer_count: int) -> None:
self._max_size: int = max_size
self._max_buffer_count: int = max_buffer_count
self._buffers: Deque[torch.Tensor] = deque(maxlen=max_buffer_count)
self._used_sizes: Deque[int] = deque(maxlen=max_buffer_count)
self._window_used_size = 0
def aggregate_state(
self, window_state: torch.Tensor, curr_state: torch.Tensor, size: int
) -> None:
def remove(window_state: torch.Tensor) -> None:
window_state -= self._buffers.popleft()
self._window_used_size -= self._used_sizes.popleft()
if len(self._buffers) == self._buffers.maxlen:
remove(window_state)
self._buffers.append(curr_state)
self._used_sizes.append(size)
window_state += curr_state
self._window_used_size += size
while self._window_used_size > self._max_size:
remove(window_state)
@property
def buffers(self) -> Deque[torch.Tensor]:
return self._buffers
class RecMetricComputation(Metric, abc.ABC):
r"""The internal computation class template.
A metric implementation should overwrite update() and compute(). These two
APIs focuses the actual mathematical meaning of the metric, without the
detail knowledge of model output and task information.
Args:
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
n_tasks (int): the number tasks this communication obj
will have to compute.
window_size (int): the window size for the window metric.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consum metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
"""
_batch_window_buffers: Optional[Dict[str, WindowBuffer]]
def __init__(
self,
my_rank: int,
batch_size: int,
n_tasks: int,
window_size: int,
compute_on_all_ranks: bool = False,
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
process_group: Optional[dist.ProcessGroup] = None,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(process_group=process_group, *args, **kwargs)
self._my_rank = my_rank
self._n_tasks = n_tasks
self._batch_size = batch_size
self._window_size = window_size
self._compute_on_all_ranks = compute_on_all_ranks
if self._window_size > 0:
self._batch_window_buffers = {}
else:
self._batch_window_buffers = None
self._add_state(
"has_valid_update",
torch.zeros(self._n_tasks, dtype=torch.uint8),
add_window_state=False,
dist_reduce_fx=lambda x: torch.any(x, dim=0).byte(),
persistent=True,
)
@staticmethod
def get_window_state_name(state_name: str) -> str:
return f"window_{state_name}"
def get_window_state(self, state_name: str) -> torch.Tensor:
return getattr(self, self.get_window_state_name(state_name))
def _add_state(
self, name: str, default: DefaultValueT, add_window_state: bool, **kwargs: Any
) -> None:
# pyre-fixme[6]: Expected `Union[List[typing.Any], torch.Tensor]` for 2nd
# param but got `DefaultValueT`.
super().add_state(name, default, **kwargs)
if add_window_state:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
kwargs["persistent"] = False
window_state_name = self.get_window_state_name(name)
# Avoid pyre error
assert isinstance(default, torch.Tensor)
super().add_state(window_state_name, default.detach().clone(), **kwargs)
self._batch_window_buffers[window_state_name] = WindowBuffer(
max_size=self._window_size,
max_buffer_count=MAX_BUFFER_COUNT,
)
def _aggregate_window_state(
self, state_name: str, state: torch.Tensor, num_samples: int
) -> None:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
window_state_name = self.get_window_state_name(state_name)
assert self._batch_window_buffers is not None
self._batch_window_buffers[window_state_name].aggregate_state(
getattr(self, window_state_name), curr_state=state, size=num_samples
)
@abc.abstractmethod
# pyre-fixme[14]: `update` overrides method defined in `Metric` inconsistently.
def update(
self,
*,
predictions: Optional[torch.Tensor],
labels: torch.Tensor,
weights: Optional[torch.Tensor],
) -> None: # pragma: no cover
pass
@abc.abstractmethod
def _compute(self) -> List[MetricComputationReport]: # pragma: no cover
pass
def pre_compute(self) -> None:
r"""If a metric need to do some work before `compute()`, the metric
has to override this `pre_compute()`. One possible usage is to do
some pre-processing of the local state before `compute()` as TorchMetric
wraps `RecMetricComputation.compute()` and will do the global aggregation
before `RecMetricComputation.compute()` is called.
"""
return
def compute(self) -> List[MetricComputationReport]:
if self._my_rank == 0 or self._compute_on_all_ranks:
return self._compute()
else:
return []
def local_compute(self) -> List[MetricComputationReport]:
return self._compute()
class RecMetric(nn.Module, abc.ABC):
r"""The main class template to implement a recommendation metric.
This class contains the recommendation tasks information (RecTaskInfo) and
the actual computation object (RecMetricComputation). RecMetric processes
all the information related to RecTaskInfo and models and pass the required
signals to the computation object, allowing the implementation of
RecMetricComputation to focus on the mathemetical meaning.
A new metric that inherit RecMetric must override the following attributes
in its own __init__(): `_namespace` and `_metrics_computations`. No other
methods should be overridden.
Args:
world_size (int): the number of trainers.
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
tasks (List[RecTaskInfo]): the information of the model tasks.
compute_mode (RecComputeMode): the computation mode. See RecComputeMode.
window_size (int): the window size for the window metric.
fused_update_limit (int): the maximum number of updates to be fused.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consume global metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo,
)
"""
_computation_class: Type[RecMetricComputation]
_namespace: MetricNamespaceBase
_metrics_computations: nn.ModuleList
_tasks: List[RecTaskInfo]
_window_size: int
_tasks_iter: Callable[[str], ComputeIterType]
_update_buffers: Dict[str, List[RecModelOutput]]
_default_weights: Dict[Tuple[int, ...], torch.Tensor]
PREDICTIONS: str = "predictions"
LABELS: str = "labels"
WEIGHTS: str = "weights"
def __init__(
self,
world_size: int,
my_rank: int,
batch_size: int,
tasks: List[RecTaskInfo],
compute_mode: RecComputeMode = RecComputeMode.UNFUSED_TASKS_COMPUTATION,
window_size: int = 100,
fused_update_limit: int = 0,
compute_on_all_ranks: bool = False,
process_group: Optional[dist.ProcessGroup] = None,
**kwargs: Any,
) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.Metric or
# TorchMetrics.MetricCollection.
if (
compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION
and fused_update_limit > 0
):
raise ValueError(
"The fused tasks computation and the fused update cannot be set at the same time"
)
super().__init__()
self._world_size = world_size
self._my_rank = my_rank
self._window_size = math.ceil(window_size / world_size)
self._batch_size = batch_size
self._tasks = tasks
self._compute_mode = compute_mode
self._fused_update_limit = fused_update_limit
self._default_weights = {}
self._update_buffers = {
self.PREDICTIONS: [],
self.LABELS: [],
self.WEIGHTS: [],
}
if compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
n_metrics = 1
task_per_metric = len(self._tasks)
self._tasks_iter = self._fused_tasks_iter
else:
n_metrics = len(self._tasks)
task_per_metric = 1
self._tasks_iter = self._unfused_tasks_iter
self._metrics_computations: nn.ModuleList = nn.ModuleList(
[
# This Pyre error seems to be Pyre's bug as it can be inferred by mypy
# according to https://github.com/python/mypy/issues/3048.
# pyre-fixme[45]: Cannot instantiate abstract class `RecMetricCoputation`.
self._computation_class(
my_rank,
batch_size,
task_per_metric,
self._window_size,
compute_on_all_ranks,
process_group,
**kwargs,
)
for _ in range(n_metrics)
]
)
# TODO(stellaya): Refactor the _[fused, unfused]_tasks_iter methods and replace the
# compute_scope str input with an enum
def _fused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
assert len(self._metrics_computations) == 1
self._metrics_computations[0].pre_compute()
for metric_report in getattr(
self._metrics_computations[0], compute_scope + "compute"
)():
for task, metric_value, has_valid_update in zip(
self._tasks,
metric_report.value,
self._metrics_computations[0].has_valid_update,
):
# The attribute has_valid_update is a tensor whose length equals to the
# number of tasks. Each value in it is corresponding to whether a task
# has valid updates or not.
# If for a task there's no valid updates, the calculated metric_value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_value
if has_valid_update > 0
else torch.zeros_like(metric_value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _unfused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
for task, metric_computation in zip(self._tasks, self._metrics_computations):
metric_computation.pre_compute()
for metric_report in getattr(
metric_computation, compute_scope + "compute"
)():
# The attribute has_valid_update is a tensor with only 1 value
# corresponding to whether the task has valid updates or not.
# If there's no valid update, the calculated metric_report.value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_report.value
if metric_computation.has_valid_update[0] > 0
else torch.zeros_like(metric_report.value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _fuse_update_buffers(self) -> Dict[str, RecModelOutput]:
def fuse(outputs: List[RecModelOutput]) -> RecModelOutput:
assert len(outputs) > 0
if isinstance(outputs[0], torch.Tensor):
return torch.cat(cast(List[torch.Tensor], outputs))
else:
task_outputs: Dict[str, List[torch.Tensor]] = defaultdict(list)
for output in outputs:
assert isinstance(output, dict)
for task_name, tensor in output.items():
task_outputs[task_name].append(tensor)
return {
name: torch.cat(tensors) for name, tensors in task_outputs.items()
}
ret: Dict[str, RecModelOutput] = {}
for key, output_list in self._update_buffers.items():
if len(output_list) > 0:
ret[key] = fuse(output_list)
else:
assert key == self.WEIGHTS
output_list.clear()
return ret
def _check_fused_update(self, force: bool) -> None:
if self._fused_update_limit <= 0:
return
if len(self._update_buffers[self.PREDICTIONS]) == 0:
return
if (
not force
and len(self._update_buffers[self.PREDICTIONS]) < self._fused_update_limit
):
return
fused_arguments = self._fuse_update_buffers()
self._update(
predictions=fused_arguments[self.PREDICTIONS],
labels=fused_arguments[self.LABELS],
weights=fused_arguments.get(self.WEIGHTS, None),
)
def _create_default_weights(self, predictions: torch.Tensor) -> torch.Tensor:
weights = self._default_weights.get(predictions.size(), None)
if weights is None:
weights = torch.ones_like(predictions)
self._default_weights[predictions.size()] = weights
return weights
def _check_nonempty_weights(self, weights: torch.Tensor) -> torch.Tensor:
return torch.gt(torch.count_nonzero(weights, dim=-1), 0)
def _update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
with torch.no_grad():
if self._compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
assert isinstance(predictions, torch.Tensor)
# Reshape the predictions to size([len(self._tasks), self._batch_size])
predictions = predictions.view(-1, self._batch_size)
assert isinstance(labels, torch.Tensor)
labels = labels.view(-1, self._batch_size)
if weights is None:
weights = self._create_default_weights(predictions)
else:
assert isinstance(weights, torch.Tensor)
weights = weights.view(-1, self._batch_size)
# has_valid_weights is a tensor of bool whose length equals to the number
# of tasks. Each value in it is corresponding to whether the weights
# are valid, i.e. are set to non-zero values for that task in this update.
# If has_valid_weights are Falses for all the tasks, we just ignore this
# update.
has_valid_weights = self._check_nonempty_weights(weights)
if torch.any(has_valid_weights):
self._metrics_computations[0].update(
predictions=predictions, labels=labels, weights=weights
)
self._metrics_computations[0].has_valid_update.logical_or_(
has_valid_weights
).byte()
else:
for task, metric_ in zip(self._tasks, self._metrics_computations):
if task.name not in predictions:
continue
if torch.numel(predictions[task.name]) == 0:
assert torch.numel(labels[task.name]) == 0
assert weights is None or torch.numel(weights[task.name]) == 0
continue
# Reshape the predictions to size([1, self._batch_size])
task_predictions = predictions[task.name].view(1, -1)
task_labels = labels[task.name].view(1, -1)
if weights is None:
task_weights = self._create_default_weights(task_predictions)
else:
task_weights = weights[task.name].view(1, -1)
# has_valid_weights is a tensor with only 1 value corresponding to
# whether the weights are valid, i.e. are set to non-zero values for
# the task in this update.
# If has_valid_update[0] is False, we just ignore this update.
has_valid_weights = self._check_nonempty_weights(task_weights)
if has_valid_weights[0]:
metric_.update(
predictions=task_predictions,
labels=task_labels,
weights=task_weights,
)
metric_.has_valid_update.logical_or_(has_valid_weights).byte()
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
if self._fused_update_limit > 0:
self._update_buffers[self.PREDICTIONS].append(predictions)
self._update_buffers[self.LABELS].append(labels)
if weights is not None:
self._update_buffers[self.WEIGHTS].append(weights)
self._check_fused_update(force=False)
else:
self._update(predictions=predictions, labels=labels, weights=weights)
# The implementation of compute is very similar to local_compute, but compute overwrites
# the abstract method compute in torchmetrics.Metric, which is wrapped by _wrap_compute
def compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter(""):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter("local_"):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def sync(self) -> None:
for computation in self._metrics_computations:
computation.sync()
def unsync(self) -> None:
for computation in self._metrics_computations:
if computation._is_synced:
computation.unsync()
def reset(self) -> None:
for computation in self._metrics_computations:
computation.reset()
def get_memory_usage(self) -> Dict[torch.Tensor, int]:
r"""Estimates the memory of the rec metric instance's
underlying tensors; returns the map of tensor to size
"""
tensor_map = {}
attributes_q = deque(self.__dict__.values())
while attributes_q:
attribute = attributes_q.popleft()
if isinstance(attribute, torch.Tensor):
tensor_map[attribute] = (
attribute.size().numel() * attribute.element_size()
)
elif isinstance(attribute, WindowBuffer):
attributes_q.extend(attribute.buffers)
elif isinstance(attribute, Mapping):
attributes_q.extend(attribute.values())
elif isinstance(attribute, Sequence) and not isinstance(attribute, str):
attributes_q.extend(attribute)
elif hasattr(attribute, "__dict__") and not isinstance(attribute, Enum):
attributes_q.extend(attribute.__dict__.values())
return tensor_map
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, torch.Tensor]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, torch.Tensor]:
# We need to flush the cached output to ensure checkpointing correctness.
self._check_fused_update(force=True)
destination = super().state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
return self._metrics_computations.state_dict(
destination=destination,
prefix=f"{prefix}_metrics_computations.",
keep_vars=keep_vars,
)
class RecMetricList(nn.Module):
"""
A list module to encapulate multiple RecMetric instances and provide the
same interfaces as RecMetric.
Args:
rec_metrics (List[RecMetric]: the list of the input RecMetrics.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo
)
metrics = RecMetricList([ne])
"""
rec_metrics: nn.ModuleList
def __init__(self, rec_metrics: List[RecMetric]) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.MetricCollection.
# The prequsite to use MetricCollection is that RecMetric inherits from
# TorchMetrics.Metric or TorchMetrics.MetricCollection
super().__init__()
self.rec_metrics = nn.ModuleList(rec_metrics)
def __len__(self) -> int:
return len(self.rec_metrics)
def __getitem__(self, idx: int) -> nn.Module:
return self.rec_metrics[idx]
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: RecModelOutput,
) -> None:
for metric in self.rec_metrics:
metric.update(predictions=predictions, labels=labels, weights=weights)
def compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.compute())
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.local_compute())
return ret
def sync(self) -> None:
for metric in self.rec_metrics:
metric.sync()
def unsync(self) -> None:
for metric in self.rec_metrics:
metric.unsync()
def reset(self) -> None:
for metric in self.rec_metrics:
metric.reset()
|
homeassistant/components/eight_sleep/binary_sensor.py | andersop91/core | 22,481 | 12805 | <reponame>andersop91/core
"""Support for Eight Sleep binary sensors."""
from __future__ import annotations
import logging
from pyeight.eight import EightSleep
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import (
CONF_BINARY_SENSORS,
DATA_API,
DATA_EIGHT,
DATA_HEAT,
EightSleepBaseEntity,
EightSleepHeatDataCoordinator,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType = None,
) -> None:
"""Set up the eight sleep binary sensor."""
if discovery_info is None:
return
name = "Eight"
sensors = discovery_info[CONF_BINARY_SENSORS]
eight: EightSleep = hass.data[DATA_EIGHT][DATA_API]
heat_coordinator: EightSleepHeatDataCoordinator = hass.data[DATA_EIGHT][DATA_HEAT]
all_sensors = [
EightHeatSensor(name, heat_coordinator, eight, side, sensor)
for side, sensor in sensors
]
async_add_entities(all_sensors)
class EightHeatSensor(EightSleepBaseEntity, BinarySensorEntity):
"""Representation of a Eight Sleep heat-based sensor."""
def __init__(
self,
name: str,
coordinator: EightSleepHeatDataCoordinator,
eight: EightSleep,
side: str | None,
sensor: str,
) -> None:
"""Initialize the sensor."""
super().__init__(name, coordinator, eight, side, sensor)
self._attr_device_class = BinarySensorDeviceClass.OCCUPANCY
assert self._usrobj
_LOGGER.debug(
"Presence Sensor: %s, Side: %s, User: %s",
self._sensor,
self._side,
self._usrobj.userid,
)
@property
def is_on(self) -> bool:
"""Return true if the binary sensor is on."""
assert self._usrobj
return bool(self._usrobj.bed_presence)
|
nautobot/circuits/__init__.py | psmware-ltd/nautobot | 384 | 12847 | <reponame>psmware-ltd/nautobot
default_app_config = "nautobot.circuits.apps.CircuitsConfig"
|
installer/core/terraform/resources/variable.py | Diffblue-benchmarks/pacbot | 1,165 | 12849 | from core.terraform.resources import BaseTerraformVariable
class TerraformVariable(BaseTerraformVariable):
"""
Base resource class for Terraform tfvar variable
Attributes:
variable_dict_input (dict/none): Var dict values
available_args (dict): Instance configurations
variable_type (str): Define the variable i.e. terraform list var or terraform dict var etc
"""
variable_dict_input = None
variable_type = None
available_args = {
'variable_name': {'required': True},
'variable_type': {'required': False},
'default_value': {'required': False}
}
|
第12章/program/Requester/Launcher.py | kingname/SourceCodeOfBook | 274 | 12871 | import os
scrapy_project_path = '/Users/kingname/book/chapter_12/DeploySpider'
os.chdir(scrapy_project_path) #切换工作区,进入爬虫工程根目录执行命令
os.system('scrapyd-deploy')
import json
import time
import requests
start_url = 'http://45.76.110.210:6800/schedule.json'
start_data = {'project': 'DeploySpider',
'spider': 'Example'}
end_url = 'http://172.16.31.10:6800/cancel.json'
end_data = {'project': 'DeploySpider'}
result = requests.post(start_url, data=start_data, auth=('kingname', 'genius')).text
result = requests.post(end_url, data=end_data, auth=('kingname', 'genius')).text
# result_dict = json.loads(result)
# job_id = result_dict['jobid']
# print(f'启动的爬虫,jobid为:{job_id}')
#
# time.sleep(5)
# end_data['job'] = job_id
# result = requests.post(end_url, data=end_data).text
# print(result)
|
glacier/glacierexception.py | JeffAlyanak/amazon-glacier-cmd-interface | 166 | 12891 | import traceback
import re
import sys
import logging
"""
**********
Note by wvmarle:
This file contains the complete code from chained_exception.py plus the
error handling code from GlacierWrapper.py, allowing it to be used in other
modules like glaciercorecalls as well.
**********
"""
class GlacierException(Exception):
"""
An extension of the built-in Exception class, this handles
an additional cause keyword argument, adding it as cause
attribute to the exception message.
It logs the error message (amount of information depends on the log
level) and passes it on to a higher level to handle.
Furthermore it allows for the upstream handler to call for a
complete stack trace or just a simple error and cause message.
TODO: describe usage.
"""
ERRORCODE = {'InternalError': 127, # Library internal error.
'UndefinedErrorCode': 126, # Undefined code.
'NoResults': 125, # Operation yielded no results.
'GlacierConnectionError': 1, # Can not connect to Glacier.
'SdbConnectionError': 2, # Can not connect to SimpleDB.
'CommandError': 3, # Command line is invalid.
'VaultNameError': 4, # Invalid vault name.
'DescriptionError': 5, # Invalid archive description.
'IdError': 6, # Invalid upload/archive/job ID given.
'RegionError': 7, # Invalid region given.
'FileError': 8, # Error related to reading/writing a file.
'ResumeError': 9, # Problem resuming a multipart upload.
'NotReady': 10, # Requested download is not ready yet.
'BookkeepingError': 11, # Bookkeeping not available.
'SdbCommunicationError': 12, # Problem reading/writing SimpleDB data.
'ResourceNotFoundException': 13, # Glacier can not find the requested resource.
'InvalidParameterValueException': 14, # Parameter not accepted.
'DownloadError': 15, # Downloading an archive failed.
'SNSConnectionError': 126, # Can not connect to SNS
'SNSConfigurationError': 127, # Problem with configuration file
'SNSParameterError':128, # Problem with arguments passed to SNS
}
def __init__(self, message, code=None, cause=None):
"""
Constructor. Logs the error.
:param message: the error message.
:type message: str
:param code: the error code.
:type code: str
:param cause: explanation on what caused the error.
:type cause: str
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.exitcode = self.ERRORCODE[code] if code in self.ERRORCODE else 254
self.code = code
if cause:
self.logger.error('ERROR: %s'% cause)
self.cause = cause if isinstance(cause, tuple) else (cause,)
self.stack = traceback.format_stack()[:-2]
else:
self.logger.error('An error occurred, exiting.')
self.cause = ()
# Just wrap up a cause-less exception.
# Get the stack trace for this exception.
self.stack = (
traceback.format_stack()[:-2] +
traceback.format_tb(sys.exc_info()[2]))
# ^^^ let's hope the information is still there; caller must take
# care of this.
self.message = message
self.logger.info(self.fetch(message=True))
self.logger.debug(self.fetch(stack=True))
if self.exitcode == 254:
self.logger.debug('Unknown error code: %s.'% code)
# Works as a generator to help get the stack trace and the cause
# written out.
def causeTree(self, indentation=' ', alreadyMentionedTree=[], stack=False, message=False):
"""
Returns a complete stack tree, an error message, or both.
Returns a warning if neither stack or message are True.
"""
if stack:
yield "Traceback (most recent call last):\n"
ellipsed = 0
for i, line in enumerate(self.stack):
if (ellipsed is not False
and i < len(alreadyMentionedTree)
and line == alreadyMentionedTree[i]):
ellipsed += 1
else:
if ellipsed:
yield " ... (%d frame%s repeated)\n" % (
ellipsed,
"" if ellipsed == 1 else "s")
ellipsed = False # marker for "given out"
yield line
if message:
exc = self if self.message is None else self.message
for line in traceback.format_exception_only(exc.__class__, exc):
yield line
if self.cause:
yield ("Caused by: %d exception%s\n" %
(len(self.cause), "" if len(self.cause) == 1 else "s"))
for causePart in self.cause:
if hasattr(causePart,"causeTree"):
for line in causePart.causeTree(indentation, self.stack):
yield re.sub(r'([^\n]*\n)', indentation + r'\1', line)
else:
for line in traceback.format_exception_only(causePart.__class__, causePart):
yield re.sub(r'([^\n]*\n)', indentation + r'\1', line)
if not message and not stack:
yield ('No output. Specify message=True and/or stack=True \
to get output when calling this function.\n')
def write(self, stream=None, indentation=' ', message=False, stack=False):
"""
Writes the error details to sys.stderr or a stream.
"""
stream = sys.stderr if stream is None else stream
for line in self.causeTree(indentation, message=message, stack=stack):
stream.write(line)
def fetch(self, indentation=' ', message=False, stack=False):
"""
Fetches the error details and returns them as string.
"""
out = ''
for line in self.causeTree(indentation, message=message, stack=stack):
out += line
return out
class InputException(GlacierException):
"""
Exception that is raised when there is someting wrong with the
user input.
"""
VaultNameError = 1
VaultDescriptionError = 2
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class ConnectionException(GlacierException):
"""
Exception that is raised when there is something wrong with
the connection.
"""
GlacierConnectionError = 1
SdbConnectionError = 2
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class CommunicationException(GlacierException):
"""
Exception that is raised when there is something wrong in
the communication with an external library like boto.
"""
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class ResponseException(GlacierException):
"""
Exception that is raised when there is an http response error.
"""
def __init__(self, message, code=None, cause=None):
GlacierException.__init__(self, message, code=code, cause=cause)
if __name__ == '__main__':
class ChildrenException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ParentException(GlacierException):
def __init__(self, message, cause=None):
if cause:
GlacierException.__init__(self, message, cause=cause)
else:
GlacierException.__init__(self, message)
try:
try:
raise ChildrenException("parent")
except ChildrenException, e:
raise ParentException("children", cause=e)
except ParentException, e:
e.write(indentation='|| ')
|
Lib/async/test/test_echoupper.py | pyparallel/pyparallel | 652 | 12897 | <reponame>pyparallel/pyparallel
import async
from async.services import EchoUpperData
server = async.server('10.211.55.3', 20007)
async.register(transport=server, protocol=EchoUpperData)
async.run()
|
lldb/examples/summaries/cocoa/NSException.py | bytesnake/Enzyme | 427 | 12956 | <filename>lldb/examples/summaries/cocoa/NSException.py
"""
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# summary provider for class NSException
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import CFString
import lldb
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
class NSKnownException_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.id):
self.sys_params.types_cache.id = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeObjCID)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def offset_name(self):
logger = lldb.formatters.Logger.Logger()
return self.sys_params.pointer_size
def offset_reason(self):
logger = lldb.formatters.Logger.Logger()
return 2 * self.sys_params.pointer_size
def description(self):
logger = lldb.formatters.Logger.Logger()
name_ptr = self.valobj.CreateChildAtOffset(
"name", self.offset_name(), self.sys_params.types_cache.id)
reason_ptr = self.valobj.CreateChildAtOffset(
"reason", self.offset_reason(), self.sys_params.types_cache.id)
return 'name:' + CFString.CFString_SummaryProvider(
name_ptr, None) + ' reason:' + CFString.CFString_SummaryProvider(reason_ptr, None)
class NSUnknownException_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def description(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
name_vo = self.valobj.CreateValueFromExpression(
"name", "(NSString*)[" + stream.GetData() + " name]")
reason_vo = self.valobj.CreateValueFromExpression(
"reason", "(NSString*)[" + stream.GetData() + " reason]")
if name_vo.IsValid() and reason_vo.IsValid():
return CFString.CFString_SummaryProvider(
name_vo, None) + ' ' + CFString.CFString_SummaryProvider(reason_vo, None)
return '<variable is not NSException>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSException':
wrapper = NSKnownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
else:
wrapper = NSUnknownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
return wrapper
def NSException_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.description()
except:
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSException>'
return str(summary)
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSException.NSException_SummaryProvider NSException")
|
RecoBTag/PerformanceDB/python/measure/Pool_mistag110118.py | ckamtsikis/cmssw | 852 | 13021 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBCommon_cfi import *
PoolDBESSourceMistag110118 = cms.ESSource("PoolDBESSource",
CondDBCommon,
toGet = cms.VPSet(
#
# working points
#
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGJBPLtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGJBPLtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGJBPLwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGJBPLwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGJBPMtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGJBPMtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGJBPMwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGJBPMwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGJBPTtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGJBPTtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGJBPTwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGJBPTwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGJPLtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGJPLtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGJPLwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGJPLwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGJPMtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGJPMtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGJPMwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGJPMwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGJPTtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGJPTtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGJPTwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGJPTwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGSSVHEMtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGSSVHEMtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGSSVHEMwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGSSVHEMwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGSSVHPTtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGSSVHPTtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGSSVHPTwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGSSVHPTwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGTCHELtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGTCHELtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGTCHELwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGTCHELwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGTCHEMtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGTCHEMtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGTCHEMwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGTCHEMwp_v5_offline')
),
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGTCHPTtable_v5_offline'),
label = cms.untracked.string('BTagMISTAGTCHPTtable_v5_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGTCHPTwp_v5_offline'),
label = cms.untracked.string('BTagMISTAGTCHPTwp_v5_offline')
),
))
PoolDBESSourceMistag110118.connect = 'frontier://FrontierProd/CMS_COND_31X_PHYSICSTOOLS'
|
get_vocab.py | Amir-Mehrpanah/hgraph2graph | 182 | 13037 | <gh_stars>100-1000
import sys
import argparse
from hgraph import *
from rdkit import Chem
from multiprocessing import Pool
def process(data):
vocab = set()
for line in data:
s = line.strip("\r\n ")
hmol = MolGraph(s)
for node,attr in hmol.mol_tree.nodes(data=True):
smiles = attr['smiles']
vocab.add( attr['label'] )
for i,s in attr['inter_label']:
vocab.add( (smiles, s) )
return vocab
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ncpu', type=int, default=1)
args = parser.parse_args()
data = [mol for line in sys.stdin for mol in line.split()[:2]]
data = list(set(data))
batch_size = len(data) // args.ncpu + 1
batches = [data[i : i + batch_size] for i in range(0, len(data), batch_size)]
pool = Pool(args.ncpu)
vocab_list = pool.map(process, batches)
vocab = [(x,y) for vocab in vocab_list for x,y in vocab]
vocab = list(set(vocab))
for x,y in sorted(vocab):
print(x, y)
|
tests/test_sql.py | YPlan/django-perf-rec | 148 | 13048 | <reponame>YPlan/django-perf-rec<gh_stars>100-1000
from __future__ import annotations
from django_perf_rec.sql import sql_fingerprint
def test_empty():
assert sql_fingerprint("") == ""
assert sql_fingerprint("\n\n \n") == ""
def test_select():
assert sql_fingerprint("SELECT `f1`, `f2` FROM `b`") == "SELECT ... FROM `b`"
def test_select_show_columns(settings):
assert (
sql_fingerprint("SELECT `f1`, `f2` FROM `b`", hide_columns=False)
== "SELECT `f1`, `f2` FROM `b`"
)
def test_select_limit(settings):
assert (
sql_fingerprint("SELECT `f1`, `f2` FROM `b` LIMIT 12", hide_columns=False)
== "SELECT `f1`, `f2` FROM `b` LIMIT #"
)
def test_select_coalesce_show_columns(settings):
assert (
sql_fingerprint(
(
"SELECT `table`.`f1`, COALESCE(table.f2->>'a', table.f2->>'b', "
+ "'default') FROM `table`"
),
hide_columns=False,
)
== "SELECT `table`.`f1`, COALESCE(table.f2->>#, table.f2->>#, #) FROM `table`"
)
def test_select_where():
assert (
sql_fingerprint(
"SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = 1"
)
== "SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = #"
)
def test_select_where_show_columns(settings):
assert (
sql_fingerprint(
"SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = 1",
hide_columns=False,
)
== "SELECT DISTINCT `table`.`field` FROM `table` WHERE `table`.`id` = #"
)
def test_select_comment():
assert (
sql_fingerprint("SELECT /* comment */ `f1`, `f2` FROM `b`")
== "SELECT /* comment */ ... FROM `b`"
)
def test_select_comment_show_columns(settings):
assert (
sql_fingerprint("SELECT /* comment */ `f1`, `f2` FROM `b`", hide_columns=False)
== "SELECT /* comment */ `f1`, `f2` FROM `b`"
)
def test_select_join():
assert (
sql_fingerprint(
"SELECT f1, f2 FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = 1"
)
== "SELECT ... FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = #"
)
def test_select_join_show_columns(settings):
assert (
sql_fingerprint(
"SELECT f1, f2 FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = 1",
hide_columns=False,
)
== "SELECT f1, f2 FROM a INNER JOIN b ON (a.b_id = b.id) WHERE a.f2 = #"
)
def test_select_order_by():
assert (
sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3")
== "SELECT ... FROM a ORDER BY f3"
)
def test_select_order_by_limit():
assert (
sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3 LIMIT 12")
== "SELECT ... FROM a ORDER BY f3 LIMIT #"
)
def test_select_order_by_show_columns(settings):
assert (
sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3", hide_columns=False)
== "SELECT f1, f2 FROM a ORDER BY f3"
)
def test_select_order_by_multiple():
assert (
sql_fingerprint("SELECT f1, f2 FROM a ORDER BY f3, f4")
== "SELECT ... FROM a ORDER BY f3, f4"
)
def test_select_group_by():
assert (
sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1")
== "SELECT ... FROM a GROUP BY f1"
)
def test_select_group_by_show_columns(settings):
assert (
sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1", hide_columns=False)
== "SELECT f1, f2 FROM a GROUP BY f1"
)
def test_select_group_by_multiple():
assert (
sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1, f2")
== "SELECT ... FROM a GROUP BY f1, f2"
)
def test_select_group_by_having():
assert (
sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > 21")
== "SELECT ... FROM a GROUP BY f1 HAVING f1 > #"
)
def test_select_group_by_having_show_columns(settings):
assert (
sql_fingerprint(
"SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > 21", hide_columns=False
)
== "SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > #"
)
def test_select_group_by_having_multiple():
assert (
sql_fingerprint("SELECT f1, f2 FROM a GROUP BY f1 HAVING f1 > 21, f2 < 42")
== "SELECT ... FROM a GROUP BY f1 HAVING f1 > #, f2 < #"
)
def test_insert():
assert (
sql_fingerprint("INSERT INTO `table` (`f1`, `f2`) VALUES ('v1', 2)")
== "INSERT INTO `table` (...) VALUES (...)"
)
def test_insert_show_columns(settings):
assert (
sql_fingerprint(
"INSERT INTO `table` (`f1`, `f2`) VALUES ('v1', 2)", hide_columns=False
)
== "INSERT INTO `table` (`f1`, `f2`) VALUES (#, #)"
)
def test_update():
assert (
sql_fingerprint("UPDATE `table` SET `foo` = 'bar' WHERE `table`.`id` = 1")
== "UPDATE `table` SET ... WHERE `table`.`id` = #"
)
def test_update_no_where():
assert (
sql_fingerprint("UPDATE `table` SET `foo` = 'bar'") == "UPDATE `table` SET ..."
)
def test_declare_cursor():
assert (
sql_fingerprint(
'DECLARE "_django_curs_140239496394496_1300" NO SCROLL CURSOR WITHOUT'
)
== 'DECLARE "_django_curs_#" NO SCROLL CURSOR WITHOUT'
)
def test_savepoint():
assert sql_fingerprint("SAVEPOINT `s140323809662784_x54`") == "SAVEPOINT `#`"
def test_rollback_to_savepoint():
assert (
sql_fingerprint("ROLLBACK TO SAVEPOINT `s140323809662784_x54`")
== "ROLLBACK TO SAVEPOINT `#`"
)
def test_release_savepoint():
assert (
sql_fingerprint("RELEASE SAVEPOINT `s140699855320896_x17`")
== "RELEASE SAVEPOINT `#`"
)
def test_null_value():
assert (
sql_fingerprint(
"SELECT `f1`, `f2` FROM `b` WHERE `b`.`name` IS NULL", hide_columns=False
)
== "SELECT `f1`, `f2` FROM `b` WHERE `b`.`name` IS #"
)
def test_strip_duplicate_whitespaces():
assert (
sql_fingerprint(
"SELECT `f1`, `f2` FROM `b` WHERE `b`.`f1` IS NULL LIMIT 12 "
)
== "SELECT ... FROM `b` WHERE `b`.`f1` IS # LIMIT #"
)
def test_strip_duplicate_whitespaces_recursive():
assert (
sql_fingerprint(
"SELECT `f1`, `f2`, ( COALESCE(b.f3->>'en', b.f3->>'fr', '')) "
"FROM `b` WHERE (`b`.`f1` IS NULL OR ( EXISTS COUNT(1) )) LIMIT 12 ",
hide_columns=False,
)
== "SELECT `f1`, `f2`, (COALESCE(b.f3->>#, b.f3->>#, #)) "
"FROM `b` WHERE (`b`.`f1` IS # OR (EXISTS COUNT(#))) LIMIT #"
)
def test_strip_newlines():
assert (
sql_fingerprint("SELECT `f1`, `f2`\nFROM `b`\n LIMIT 12\n\n")
== "SELECT ... FROM `b` LIMIT #"
)
def test_strip_raw_query():
assert (
sql_fingerprint(
"""
SELECT 'f1'
, 'f2'
, 'f3'
FROM "table_a" WHERE "table_a"."f1" = 1 OR (
"table_a"."type" = 'A' AND
EXISTS (
SELECT "table_b"."id"
FROM "table_b"
WHERE "table_b"."id" = 1
) = true)
"""
)
== (
'SELECT ... FROM "table_a" WHERE "table_a"."f1" = # OR '
+ '("table_a"."type" = # AND EXISTS (SELECT "table_b"."id" FROM '
+ '"table_b" WHERE "table_b"."id" = # ) = true)'
)
)
def test_in_single_value():
assert (
sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1)")
== "SELECT ... FROM `b` WHERE `x` IN (...)"
)
def test_in_multiple_values():
assert (
sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1, 2, 3)")
== "SELECT ... FROM `b` WHERE `x` IN (...)"
)
def test_in_multiple_clauses():
assert (
sql_fingerprint(
"SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1, 2, 3) AND `y` IN (4, 5, 6)"
)
== "SELECT ... FROM `b` WHERE `x` IN (...) AND `y` IN (...)"
)
def test_in_multiple_values_and_clause():
assert (
sql_fingerprint(
"SELECT `f1`, `f2` FROM `b` WHERE `x` IN (1, 2, 3) AND (`y` = 1 OR `y` = 2)"
)
== "SELECT ... FROM `b` WHERE `x` IN (...) AND (`y` = # OR `y` = #)"
)
def test_in_subquery():
assert (
sql_fingerprint("SELECT `f1`, `f2` FROM `b` WHERE `x` IN (SELECT 1)")
== "SELECT ... FROM `b` WHERE `x` IN (SELECT #)"
)
|
clearml/backend_interface/setupuploadmixin.py | arielleoren/clearml | 2,097 | 13053 | from abc import abstractproperty
from ..backend_config.bucket_config import S3BucketConfig
from ..storage.helper import StorageHelper
class SetupUploadMixin(object):
log = abstractproperty()
storage_uri = abstractproperty()
def setup_upload(
self, bucket_name, host=None, access_key=None, secret_key=None, region=None, multipart=True, https=True, verify=True):
"""
Setup upload options (currently only S3 is supported)
:param bucket_name: AWS bucket name
:type bucket_name: str
:param host: Hostname. Only required in case a Non-AWS S3 solution such as a local Minio server is used)
:type host: str
:param access_key: AWS access key. If not provided, we'll attempt to obtain the key from the
configuration file (bucket-specific, than global)
:type access_key: str
:param secret_key: AWS secret key. If not provided, we'll attempt to obtain the secret from the
configuration file (bucket-specific, than global)
:type secret_key: str
:param multipart: Server supports multipart. Only required when using a Non-AWS S3 solution that doesn't support
multipart.
:type multipart: bool
:param https: Server supports HTTPS. Only required when using a Non-AWS S3 solution that only supports HTTPS.
:type https: bool
:param region: Bucket region. Required if the bucket doesn't reside in the default region (us-east-1)
:type region: str
:param verify: Whether or not to verify SSL certificates. Only required when using a Non-AWS S3 solution that only supports HTTPS with self-signed certificate.
:type verify: bool
"""
self._bucket_config = S3BucketConfig(
bucket=bucket_name,
host=host,
key=access_key,
secret=secret_key,
multipart=multipart,
secure=https,
region=region,
verify=verify
)
self.storage_uri = ('s3://%(host)s/%(bucket_name)s' if host else 's3://%(bucket_name)s') % locals()
StorageHelper.add_configuration(self._bucket_config, log=self.log)
|
var/spack/repos/builtin/packages/autoconf/package.py | LiamBindle/spack | 2,360 | 13076 | <reponame>LiamBindle/spack
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
class Autoconf(AutotoolsPackage, GNUMirrorPackage):
"""Autoconf -- system configuration part of autotools"""
homepage = 'https://www.gnu.org/software/autoconf/'
gnu_mirror_path = 'autoconf/autoconf-2.69.tar.gz'
version('2.71', sha256='431075ad0bf529ef13cb41e9042c542381103e80015686222b8a9d4abef42a1c')
version('2.70', sha256='f05f410fda74323ada4bdc4610db37f8dbd556602ba65bc843edb4d4d4a1b2b7')
version('2.69', sha256='954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969',
preferred=True)
version('2.62', sha256='83aa747e6443def0ebd1882509c53f5a2133f502ddefa21b3de141c433914bdd')
version('2.59', sha256='9cd05c73c5fcb1f5ccae53dd6cac36bb8cb9c7b3e97ffae5a7c05c72594c88d8')
# https://savannah.gnu.org/support/?110396
patch('https://git.savannah.gnu.org/cgit/autoconf.git/patch/?id=05972f49ee632cd98057a3caf82ebfb9574846da',
sha256='eaa3f69d927a853313a0b06e2117c51adab6377a2278549b05abc5df93643e16',
when='@2.70')
# Apply long-time released and already in-use upstream patches to fix test cases:
# tests/foreign.at (Libtool): Be tolerant of 'quote' replacing the older `quote'
patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-fix-libtool-test.patch',
sha256='7793209b33013dc0f81208718c68440c5aae80e7a1c4b8d336e382525af791a7',
when='@2.69')
# Fix bin/autoscan.in for current perl releases (reported already in January 2013)
patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-perl-5.26.patch',
sha256='35c449281546376449766f92d49fc121ca50e330e60fefcfc9be2af3253082c2',
when='@2.62:2.69 ^[email protected]:')
# Fix bin/autoheader.in for current perl relases not having "." in @INC:
patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-perl-5.26-2.patch',
sha256='a49dd5bac3b62daa0ff688ab4d508d71dbd2f4f8d7e2a02321926346161bf3ee',
when='@2.62:2.69 ^[email protected]:')
# Note: m4 is not a pure build-time dependency of autoconf. m4 is
# needed when autoconf runs, not only when autoconf is built.
depends_on('[email protected]:', type=('build', 'run'))
depends_on('perl', type=('build', 'run'))
build_directory = 'spack-build'
tags = ['build-tools']
executables = [
'^autoconf$', '^autoheader$', '^autom4te$', '^autoreconf$',
'^autoscan$', '^autoupdate$', '^ifnames$'
]
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('--version', output=str, error=str)
match = re.search(r'\(GNU Autoconf\)\s+(\S+)', output)
return match.group(1) if match else None
def patch(self):
# The full perl shebang might be too long; we have to fix this here
# because autom4te is called during the build
patched_file = 'bin/autom4te.in'
# We save and restore the modification timestamp of the file to prevent
# regeneration of the respective man page:
with keep_modification_time(patched_file):
filter_file('^#! @PERL@ -w',
'#! /usr/bin/env perl',
patched_file)
if self.version == Version('2.62'):
# skip help2man for patched autoheader.in and autoscan.in
touch('man/autoheader.1')
touch('man/autoscan.1')
# make installcheck would execute the testsuite a 2nd time, skip it
def installcheck(self):
pass
@run_after('install')
def filter_sbang(self):
# We have to do this after install because otherwise the install
# target will try to rebuild the binaries (filter_file updates the
# timestamps)
# Revert sbang, so Spack's sbang hook can fix it up
filter_file('^#! /usr/bin/env perl',
'#! {0} -w'.format(self.spec['perl'].command.path),
self.prefix.bin.autom4te,
backup=False)
def _make_executable(self, name):
return Executable(join_path(self.prefix.bin, name))
def setup_dependent_package(self, module, dependent_spec):
# Autoconf is very likely to be a build dependency,
# so we add the tools it provides to the dependent module
executables = ['autoconf',
'autoheader',
'autom4te',
'autoreconf',
'autoscan',
'autoupdate',
'ifnames']
for name in executables:
setattr(module, name, self._make_executable(name))
|
seg/segmentor/tools/module_runner.py | Frank-Abagnal/HRFormer | 254 | 13079 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: <NAME>(<EMAIL>)
# Some methods used by main methods.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn.parallel.scatter_gather import gather as torch_gather
from lib.extensions.parallel.data_parallel import DataParallelModel
from lib.utils.tools.logger import Logger as Log
from lib.utils.distributed import get_rank, is_distributed
class ModuleRunner(object):
def __init__(self, configer):
self.configer = configer
self._init()
def _init(self):
self.configer.add(['iters'], 0)
self.configer.add(['last_iters'], 0)
self.configer.add(['epoch'], 0)
self.configer.add(['last_epoch'], 0)
self.configer.add(['max_performance'], 0.0)
self.configer.add(['performance'], 0.0)
self.configer.add(['min_val_loss'], 9999.0)
self.configer.add(['val_loss'], 9999.0)
if not self.configer.exists('network', 'bn_type'):
self.configer.add(['network', 'bn_type'], 'torchbn')
# if self.configer.get('phase') == 'train':
# assert len(self.configer.get('gpu')) > 1 or self.configer.get('network', 'bn_type') == 'torchbn'
Log.info('BN Type is {}.'.format(self.configer.get('network', 'bn_type')))
def to_device(self, *params, force_list=False):
if is_distributed():
device = torch.device('cuda:{}'.format(get_rank()))
else:
device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')
return_list = list()
for i in range(len(params)):
return_list.append(params[i].to(device))
if force_list:
return return_list
else:
return return_list[0] if len(params) == 1 else return_list
def _make_parallel(self, net):
if is_distributed():
local_rank = get_rank()
return torch.nn.parallel.DistributedDataParallel(
net,
device_ids=[local_rank],
output_device=local_rank,
find_unused_parameters=True
)
if len(self.configer.get('gpu')) == 1:
self.configer.update(['network', 'gathered'], True)
return DataParallelModel(net, gather_=self.configer.get('network', 'gathered'))
def load_net(self, net):
net = self.to_device(net)
net = self._make_parallel(net)
if not is_distributed():
net = net.to(torch.device('cpu' if self.configer.get('gpu') is None else 'cuda'))
net.float()
if self.configer.get('network', 'resume') is not None:
Log.info('Loading checkpoint from {}...'.format(self.configer.get('network', 'resume')))
resume_dict = torch.load(self.configer.get('network', 'resume'))
if 'state_dict' in resume_dict:
checkpoint_dict = resume_dict['state_dict']
elif 'model' in resume_dict:
checkpoint_dict = resume_dict['model']
elif isinstance(resume_dict, OrderedDict):
checkpoint_dict = resume_dict
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(self.configer.get('network', 'resume')))
if list(checkpoint_dict.keys())[0].startswith('module.'):
checkpoint_dict = {k[7:]: v for k, v in checkpoint_dict.items()}
# load state_dict
if hasattr(net, 'module'):
self.load_state_dict(net.module, checkpoint_dict, self.configer.get('network', 'resume_strict'))
else:
self.load_state_dict(net, checkpoint_dict, self.configer.get('network', 'resume_strict'))
if self.configer.get('network', 'resume_continue'):
self.configer.resume(resume_dict['config_dict'])
return net
@staticmethod
def load_state_dict(module, state_dict, strict=False):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
"""
unexpected_keys = []
own_state = module.state_dict()
for name, param in state_dict.items():
if name not in own_state:
unexpected_keys.append(name)
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
except Exception:
Log.warn('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(),
param.size()))
missing_keys = set(own_state.keys()) - set(state_dict.keys())
err_msg = []
if unexpected_keys:
err_msg.append('unexpected key in source state_dict: {}\n'.format(', '.join(unexpected_keys)))
if missing_keys:
# we comment this to fine-tune the models with some missing keys.
err_msg.append('missing keys in source state_dict: {}\n'.format(', '.join(missing_keys)))
err_msg = '\n'.join(err_msg)
if err_msg:
if strict:
raise RuntimeError(err_msg)
else:
Log.warn(err_msg)
def save_net(self, net, save_mode='iters'):
if is_distributed() and get_rank() != 0:
return
state = {
'config_dict': self.configer.to_dict(),
'state_dict': net.state_dict(),
}
if self.configer.get('checkpoints', 'checkpoints_root') is None:
checkpoints_dir = os.path.join(self.configer.get('project_dir'),
self.configer.get('checkpoints', 'checkpoints_dir'))
else:
checkpoints_dir = os.path.join(self.configer.get('checkpoints', 'checkpoints_root'),
self.configer.get('checkpoints', 'checkpoints_dir'))
if not os.path.exists(checkpoints_dir):
os.makedirs(checkpoints_dir)
latest_name = '{}_latest.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'))
torch.save(state, os.path.join(checkpoints_dir, latest_name))
if save_mode == 'performance':
if self.configer.get('performance') > self.configer.get('max_performance'):
latest_name = '{}_max_performance.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'))
torch.save(state, os.path.join(checkpoints_dir, latest_name))
self.configer.update(['max_performance'], self.configer.get('performance'))
elif save_mode == 'val_loss':
if self.configer.get('val_loss') < self.configer.get('min_val_loss'):
latest_name = '{}_min_loss.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'))
torch.save(state, os.path.join(checkpoints_dir, latest_name))
self.configer.update(['min_val_loss'], self.configer.get('val_loss'))
elif save_mode == 'iters':
if self.configer.get('iters') - self.configer.get('last_iters') >= \
self.configer.get('checkpoints', 'save_iters'):
latest_name = '{}_iters{}.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'),
self.configer.get('iters'))
torch.save(state, os.path.join(checkpoints_dir, latest_name))
self.configer.update(['last_iters'], self.configer.get('iters'))
elif save_mode == 'epoch':
if self.configer.get('epoch') - self.configer.get('last_epoch') >= \
self.configer.get('checkpoints', 'save_epoch'):
latest_name = '{}_epoch{}.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'),
self.configer.get('epoch'))
torch.save(state, os.path.join(checkpoints_dir, latest_name))
self.configer.update(['last_epoch'], self.configer.get('epoch'))
else:
Log.error('Metric: {} is invalid.'.format(save_mode))
exit(1)
def freeze_bn(self, net, syncbn=False):
for m in net.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.eval()
if syncbn:
from lib.extensions import BatchNorm2d, BatchNorm1d
if isinstance(m, BatchNorm2d) or isinstance(m, BatchNorm1d):
m.eval()
def clip_grad(self, model, max_grad=10.):
"""Computes a gradient clipping coefficient based on gradient norm."""
total_norm = 0
for p in model.parameters():
if p.requires_grad:
modulenorm = p.grad.data.norm()
total_norm += modulenorm ** 2
total_norm = math.sqrt(total_norm)
norm = max_grad / max(total_norm, max_grad)
for p in model.parameters():
if p.requires_grad:
p.grad.mul_(norm)
def gather(self, outputs, target_device=None, dim=0):
r"""
Gathers tensors from different GPUs on a specified device
(-1 means the CPU).
"""
if not self.configer.get('network', 'gathered'):
if target_device is None:
target_device = list(range(torch.cuda.device_count()))[0]
return torch_gather(outputs, target_device, dim=dim)
else:
return outputs
def get_lr(self, optimizer):
return [param_group['lr'] for param_group in optimizer.param_groups]
def warm_lr(self, iters, scheduler, optimizer, backbone_list=(0, )):
"""Sets the learning rate
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
if not self.configer.exists('lr', 'is_warm') or not self.configer.get('lr', 'is_warm'):
return
warm_iters = self.configer.get('lr', 'warm')['warm_iters']
if iters < warm_iters:
if self.configer.get('lr', 'warm')['freeze_backbone']:
for backbone_index in backbone_list:
optimizer.param_groups[backbone_index]['lr'] = 0.0
else:
lr_ratio = (self.configer.get('iters') + 1) / warm_iters
base_lr_list = scheduler.get_lr()
for backbone_index in backbone_list:
optimizer.param_groups[backbone_index]['lr'] = base_lr_list[backbone_index] * (lr_ratio ** 4)
|
lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-synthval/myIntSynthProvider.py | medismailben/llvm-project | 2,338 | 13092 | <gh_stars>1000+
class myIntSynthProvider(object):
def __init__(self, valobj, dict):
self.valobj = valobj
self.val = self.valobj.GetChildMemberWithName("theValue")
def num_children(self):
return 0
def get_child_at_index(self, index):
return None
def get_child_index(self, name):
return None
def update(self):
return False
def has_children(self):
return False
def get_value(self):
return self.val
class myArraySynthProvider(object):
def __init__(self, valobj, dict):
self.valobj = valobj
self.array = self.valobj.GetChildMemberWithName("array")
def num_children(self, max_count):
if 16 < max_count:
return 16
return max_count
def get_child_at_index(self, index):
return None # Keep it simple when this is not tested here.
def get_child_index(self, name):
return None # Keep it simple when this is not tested here.
def has_children(self):
return True
|
custom_components/waste_collection_schedule/waste_collection_schedule/wizard/stadtreinigung_hamburg.py | UBS-P/hacs_waste_collection_schedule | 142 | 13112 | #!/usr/bin/env python3
from html.parser import HTMLParser
import inquirer
import requests
# Parser for HTML input
class InputParser(HTMLParser):
def __init__(self, input_name):
super().__init__()
self._input_name = input_name
self._value = None
@property
def value(self):
return self._value
def handle_starttag(self, tag, attrs):
if tag == "input":
for attr in attrs:
if attr[0] == "name" and attr[1] == self._input_name:
for attr2 in attrs:
if attr2[0] == "value":
self._value = attr2[1]
break
break
# Parser for HTML option list
class OptionParser(HTMLParser):
def __init__(self, select_name):
super().__init__()
self._select_name = select_name
self._within_select = False
self._within_option = False
self._option_name = ""
self._option_value = "-1"
self._choices = []
@property
def choices(self):
return self._choices
def handle_starttag(self, tag, attrs):
if tag == "select":
for attr in attrs:
if attr[0] == "name" and attr[1] == self._select_name:
self._within_select = True
break
elif tag == "option" and self._within_select:
self._within_option = True
for attr in attrs:
if attr[0] == "value":
self._option_value = attr[1]
def handle_endtag(self, tag):
if tag == "select":
self._within_select = False
elif tag == "option":
if (
self._within_select
and self._within_option
and len(self._option_name) > 0
and self._option_value != ""
):
self._choices.append((self._option_name, self._option_value))
self._within_option = False
self._option_name = ""
self._option_value = "-1"
def handle_data(self, data):
if self._within_option:
self._option_name += data
def main():
# search for street
questions = [
inquirer.Text("strasse", message="Enter search string for street"),
# inquirer.Text("hausnummer", message="Enter search string for house number"),
]
answers = inquirer.prompt(questions)
answers["hausnummer"] = ""
answers["bestaetigung"] = "true"
answers["mode"] = "search"
r = requests.post(
"https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html",
data=answers,
)
# search for street
input_parser = InputParser(input_name="asId")
input_parser.feed(r.text)
if input_parser.value is not None:
answers["asId"] = input_parser.value
else:
# query returned a list of streets
parser = OptionParser(select_name="asId")
parser.feed(r.text)
questions = [
inquirer.List("asId", choices=parser.choices, message="Select street")
]
answers.update(inquirer.prompt(questions))
# search for building number
r = requests.post(
"https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html",
data=answers,
)
# parser HTML option list
parser = OptionParser(select_name="hnId")
parser.feed(r.text)
if len(parser.choices) == 0:
answers["hnId"] = ""
else:
questions = [
inquirer.List("hnId", choices=parser.choices, message="Select house number")
]
answers.update(inquirer.prompt(questions))
print("Copy the following statements into your configuration.yaml:\n")
print("# waste_collection_schedule source configuration")
print("waste_collection_schedule:")
print(" sources:")
print(" - name: stadtreinigung_hamburg")
print(" args:")
print(f" asId: {answers['asId']}")
print(f" hnId: {answers['hnId']}")
if __name__ == "__main__":
main()
|
autosa_tests/large/mm_int16/unroll.py | mfkiwl/AutoSA-SystolicArray | 102 | 13118 | import math
# Modify the parameters here
UNROLL_FACTOR = 32
DATA_T = 'unsigned short'
# Generate the code
data_type = DATA_T
level = int(math.log2(UNROLL_FACTOR))
for layer in range(level - 1, -1, -1):
pair = int(math.pow(2, layer))
for i in range(pair):
# data_t tmp_[layer]_[pair] = tmp_[layer+1]_[pair*2]_[pair*2+1]
if layer == level - 1:
print(f'{data_type} mul_{layer}_{i}_0 = local_A[0][{i*2}] * local_B[0][{i*2}];')
print(f'{data_type} add_{layer}_{i} = mul_{layer}_{i}_0 + local_A[0][{i*2+1}] * local_B[0][{i*2+1}];')
else:
print(f'{data_type} add_{layer}_{i} = add_{layer+1}_{i*2} + add_{layer+1}_{i*2+1};')
print('local_C[c7][c6] += add_0_0;')
|
skfda/exploratory/__init__.py | jiduque/scikit-fda | 147 | 13120 | from . import depth
from . import outliers
from . import stats
from . import visualization
|
MuonAnalysis/MuonAssociators/test/L1MuonMatcher/test.py | ckamtsikis/cmssw | 852 | 13129 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
# initialize MessageLogger and output report
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.threshold = 'INFO'
process.MessageLogger.cerr.INFO = cms.untracked.PSet(
default = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
PATSummaryTables = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
# source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
#'file:/afs/cern.ch/cms/PRS/top/cmssw-data/relval200-for-pat-testing/TauolaTTbar-Summer08_IDEAL_V9_v1-AODSIM.80.root'
'/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/1E84F77B-341C-DE11-8A99-0019DB29C5FC.root',
'/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/34267FD6-1C1C-DE11-A836-001617C3B78C.root',
'/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/68BF59CF-1C1C-DE11-AFA9-000423D98BC4.root'
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.GlobalTag.globaltag = cms.string('IDEAL_V9::All')
process.GlobalTag.globaltag = cms.string('STARTUP_V9::All')
process.load("Configuration.StandardSequences.MagneticField_cff")
# PAT Layer 0+1
process.load("PhysicsTools.PatAlgos.patSequences_cff")
process.load("MuonAnalysis.MuonAssociators.muonL1Match_cfi")
process.muonL1Match.preselection = cms.string("")
process.allLayer1Muons.trigPrimMatch = cms.VInputTag(
cms.InputTag("muonL1Match"),
cms.InputTag("muonL1Match","propagatedReco"),
)
## Put your EDAnalyzer here
## process.plots = cms.EDFilter("DataPlotter",
## muons = cms.InputTag("cleanLayer1Muons"),
## muonCut = cms.string("")
## )
process.p = cms.Path(
process.muonL1Match *
process.patDefaultSequence
# * process.plots
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("plots.root")
)
|
market_sim/_agents/risk_model.py | quanttrade/rl_trading | 247 | 13148 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Implement different methods to hedge positions and measure the risk of a Zero
cupon bond portfolio
REFERENCE: <NAME>; <NAME>.; <NAME>., "Interest Rate Risk
Modeling, the fixed Income Valuation course". Wiley, 2005
@author: ucaiado
Created on 12/22/2016
"""
import numpy as np
import math
import pandas as pd
import pprint
'''
Begin help functions
'''
'''
End help functions
'''
def update_maxmin(f_frice, a):
'''
Update maximum and minimum price observed by the agent while positioned
:param f_frice: float.
:param a: agent object.
'''
if f_frice > a.current_max_price:
a.current_max_price = f_frice
if f_frice < a.current_min_price:
a.current_min_price = f_frice
class RiskModel(object):
'''
A basic risk model representation for a fixed income strategy that measures
the loss potential and the immunization needs
'''
def __init__(self, env, f_portfolio_value=10**6):
'''
Initiate a RiskModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param f_portfolio_value*: float. The total
'''
self.env = env
self.l_hedging_instr = env.l_hedge
self.s_main = env.s_main_intrument
self.l_ratios = []
self.d_dv01 = {}
self.na_pu = None
self.na_du = None
self.f_portfolio_value = f_portfolio_value
self.s_risk_model = 'BasicModel'
self.b_stop_trading = False
self.price_stop_buy = None
self.price_stop_sell = None
def reset(self):
'''
reset risk model parameters to use in a new simulation
'''
self.current_price = None
self.b_stop_trading = False
self.price_stop_buy = None
self.price_stop_sell = None
self.l_ratios = []
self.na_pu = None
self.na_du = None
def set_ratios(self):
'''
Set the DV01 ratios of the pair between the main instrument and the
others avaiable to hedging
'''
# calculate the dv01 for each instrument
d_aux = {}
l_rtn = []
l_du = []
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
f_du = self.env.l_du[self.env.order_matching.idx][idx]/252.
f_price, f_qty = book_obj.best_bid
f_dv01 = (f_du*10.)/(1. + f_price/100.)**(1. + f_du)
d_aux[s_key] = f_dv01
l_du.append(f_du)
# calculate the ration in relation to the main instrument
self.d_dv01 = d_aux
for s_instr in self.l_hedging_instr:
l_rtn.append(d_aux[s_instr]/d_aux[self.s_main])
self.l_du = l_du
return l_rtn
def portfolio_duration(self, d_position):
'''
Return the duration of a portfolio
:param d_position: dictionary. portfolio to be hedged
'''
l_pu = []
l_pos = []
l_du = []
self.last_pu = {}
self.last_pos = {}
self.last_du = {}
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
f_du = self.env.l_du[self.env.order_matching.idx][idx]
f_price, f_qty = book_obj.best_bid
f_pu = 10.**5/(1. + f_price/100.)**(f_du/252.)
f_pos = -d_position[s_key]['qBid'] # inverto para qty em PU ?
f_pos -= -d_position[s_key]['qAsk']
self.last_du[s_key] = f_du
l_du.append(f_du)
self.last_pos[s_key] = f_pos
l_pos.append(f_pos)
self.last_pu[s_key] = f_pu
l_pu.append(f_pu)
return self._get_duration(l_pu, l_du, l_pos)
def _get_duration(self, l_pu, l_du, l_pos):
'''
Calculate the duration for a given position
:param l_pu: list.
:param l_du: list.
:param l_pos: list. final position in each instrument traded
'''
na_weight = self._get_weights(l_pu, l_pos)
return sum(np.array(l_du)/252. * na_weight)
def _get_weights(self, l_pu, l_pos):
'''
Return the positions as portfolio weights
:param l_pu: list. the PU of each instrument
:param l_pos: list. final position in each instrument traded (in PU)
'''
na_weight = np.array(l_pu) * np.array(l_pos)
na_weight /= self.f_portfolio_value
return na_weight
def get_instruments_to_hedge(self, agent):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param agent: Agent object. agent that need to hedge
'''
d_position = agent.position
return self._get_instruments_to_hedge(d_position)
def _get_instruments_to_hedge(self, d_position):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param d_position: dictionary. portfolio in qty of contracts
'''
# check the ratios just once
if not self.l_ratios:
self.l_ratios = self.set_ratios()
f_current_duration = self.portfolio_duration(d_position)
# check were should hedge and what quantity
f_main_pos = -d_position[self.s_main]['qBid']
f_main_pos -= -d_position[self.s_main]['qAsk']
l_hedged_position = []
l_pos = [f_main_pos]
l_du = [self.last_du[self.s_main]]
l_pu = [self.last_pu[self.s_main]]
for s_instr, f_ratio in zip(self.l_hedging_instr, self.l_ratios):
if s_instr == self.s_main:
s_action = 'BUY'
if f_main_pos < 0:
s_action = 'SELL'
if f_main_pos == 0:
return []
return [(s_action, s_instr, f_main_pos)]
f_aux_pos = -d_position[s_instr]['qBid']
f_aux_pos -= -d_position[s_instr]['qAsk']
l_hedged_position.append(f_aux_pos*f_ratio)
l_pos.append(f_aux_pos)
l_du.append(self.last_du[s_instr])
l_pu.append(self.last_pu[s_instr])
f_main_position = f_main_pos + sum(np.array(l_hedged_position))
na_to_hedge = np.array([f_main_position] * len(l_hedged_position))
na_to_hedge /= np.array(self.l_ratios)
na_sign = np.sign(na_to_hedge)
na_mult = 5 * na_sign
if sum((abs(na_to_hedge)/5) < 1) != 0:
na_to_hedge = np.ceil(abs(na_to_hedge)/5).astype(int) * na_mult
else:
na_to_hedge = np.round(abs(na_to_hedge)/5).astype(int) * na_mult
l_to_hedge = list(na_to_hedge)
l_rtn = []
for idx, s_instr in enumerate(self.l_hedging_instr):
i_qty = -l_to_hedge[idx]
if i_qty != 0:
l_pos_aux = l_pos[:]
l_pos_aux[idx+1] += i_qty
f_future_duration = self._get_duration(l_pu, l_du, l_pos_aux)
f_abs_dur = abs(f_future_duration)
# if qty is not enough to dicrease the duration, increase it
if f_abs_dur > 1.2 and f_abs_dur < 3.:
i_qty *= 2
elif f_abs_dur >= 3.:
i_qty *= 3
l_pos_aux = l_pos[:]
l_pos_aux[idx+1] += i_qty
f_future_duration = self._get_duration(l_pu, l_du, l_pos_aux)
# recalculate all
if abs(f_future_duration) < abs(f_current_duration):
# change to rate quantity
s_action = 'BUY'
if -i_qty < 0:
s_action = 'SELL'
l_rtn.append((s_action, s_instr, -i_qty))
return l_rtn
class KRDModel(RiskModel):
'''
A key rate duration model representation that uses the KRDs selected to
decide what instruments sould be used in the immunization of a portfolio
'''
def __init__(self, env, l_krd, f_portfolio_value=10**6, s_kind='trava'):
'''
Initiate a KRDModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param l_krd: list. maturity of the key rates used, in years
:param f_portfolio_value*: float. The total
'''
super(KRDModel, self).__init__(env, f_portfolio_value)
self.s_risk_model = 'KRDModel_{}'.format(s_kind)
self.l_krd = l_krd
self.df_ratios = None
self.l_cmm_target = ['DI1F19', 'DI1F21', 'DI1F23']
self.s_kind = s_kind
def portfolio_krd(self, d_position):
'''
Return a tuple with the key rate durations of a portfolio and all
information needed to recalculate it
:param d_position: dictionary. portfolio to be hedged
'''
# recover variables
f_facevalue = 10.**5
l_rates = []
l_pos = []
l_maturity = []
l_instrument = []
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
book_obj = self.env.order_matching.l_order_books[idx]
l_instrument.append(book_obj.s_instrument)
f_du = self.env.l_du[self.env.order_matching.idx][idx]
f_price, f_qty = book_obj.best_bid
f_pos = -d_position[s_key]['qBid'] # inverto para qty em PU ?
f_pos -= -d_position[s_key]['qAsk']
l_maturity.append(f_du/252.)
l_pos.append(f_pos)
l_rates.append(f_price)
# get the key rate duration matrix
l_exp_pu = [f_facevalue * np.exp(-f_rate/100 * f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
l_pu = [f_facevalue * (1.+f_rate/100)**(-f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
l_dPdYP = [f_facevalue * f_mat * np.exp(-f_rate/100 * f_mat)
for f_rate, f_mat in zip(l_rates, l_maturity)]
df_krd = self.key_rates(l_dPdYP, l_exp_pu)
na_weights = self._get_weights(l_pu, l_pos)
df_exposure = self._get_krd_exposure(df_krd, na_weights)
t_rtn = (df_krd, na_weights, df_exposure, l_maturity, l_pos, l_pu,
l_instrument)
return t_rtn
def _get_krd_exposure(self, df_krd, na_weights):
'''
Return the exposure in KRDs based on krds passed and weights
:param df_krd: data frame. KRD of the instruments traded
:param na_weights: numpy array. the weight in portfolio of eack KRD
'''
df_exposure = pd.Series(df_krd.T.dot(na_weights))
df_exposure.index = self.l_krd
return df_exposure
def key_rates(self, l_dPdYP, l_pu):
'''
Return the matrix of key rates durations for the instruments traded
in the environment
:param l_dPdYP: list. $\frac{dP * P}{dY}$
:param l_pu: list. PU of aeach contract
'''
# add up the linear contributions $s(t, t_i)\$ for $i=1, 2, ..., m$ to
# obtain the change in the given zero-coupon rate $\Delta y(t)$
if isinstance(self.df_ratios, type(None)):
self._set_linear_contributions()
df = self.df_ratios
return df.apply(lambda x: x * np.array(l_dPdYP) / np.array(l_pu),
axis=0)
def get_target_krds(self, l_cmm, d_data, df_krd, s_kind='fly'):
'''
Rerturn the target krds pandas serties to be the same of a buttlerfly.
:param l_cmm: list. instruments used in the butterfly, ordered by matry
:param d_data: dictionary. maturity and PU of each instrument
:param s_kind*: string. the kind of target to return
'''
# calculate positions
if s_kind == 'fly':
f_Qm = 1. # quantity at the middle of the structure
f_alpha = (d_data[l_cmm[2]][1] * 1. - d_data[l_cmm[1]][1])
f_alpha /= (d_data[l_cmm[2]][1] / 1. - d_data[l_cmm[0]][1])
f_Qs = (f_Qm * f_alpha * d_data[l_cmm[1]][0]) / d_data[l_cmm[0]][0]
f_Ql = (f_Qm * (1 - f_alpha) * d_data[l_cmm[1]][0])
f_Ql /= d_data[l_cmm[2]][0]
l_pos = [-f_Qs, f_Qm, -f_Ql]
elif s_kind == 'trava':
l_pu = [d_data[s_key][0] for s_key in l_cmm]
l_mat = [d_data[s_key][1] for s_key in l_cmm]
l_pos = [0., 10, 0.]
na_weights = self._get_weights(l_pu, l_pos)
f_curr_duration = sum(np.array(l_mat) * na_weights)
l_pos_aux = []
for s_key in self.l_hedging_instr:
f_pu = d_data[s_key][0]
f_matr = d_data[s_key][1]
f_dur_aux = 5. * f_pu / self.f_portfolio_value * f_matr
f_unt = -f_curr_duration / f_dur_aux * 5.
l_pos_aux.append(f_unt)
l_pos = [l_pos_aux[0]/20.] + [1.] + [l_pos_aux[1]/20.]
# calculate targe
l_p = [d_data[l_cmm[0]][0], d_data[l_cmm[1]][0], d_data[l_cmm[2]][0]]
na_weights = self._get_weights(l_p, l_pos)
df_target = pd.Series(df_krd.T.dot(na_weights))
df_target.index = self.l_krd
return df_target
def _set_linear_contributions(self):
'''
Define the linear contribution $s(t, t_i)$ made by the change in the
ith key rate, $\Delta y(t_i)$, to the change in a given zero-coupon
rate $\Delta y(t)$, according to Nawalkha, 266
'''
l_maturity = []
l_krd = self.l_krd
# recover data from books
for s_key, idx in self.env.order_matching.d_map_book_list.iteritems():
f_du = self.env.l_du[self.env.order_matching.idx][idx]
l_maturity.append(f_du/252.)
# create the $s(t, t_i)$ matrix, according to Nawalkha, 266
l = []
i_last_idx = len(l_krd) - 1
for i_list, f_mat in enumerate(l_maturity):
l.append([])
for idx in xrange(len(l_krd)):
f_krd = l_krd[idx]
if idx == 0:
f_krd1 = l_krd[idx+1]
if f_mat < f_krd:
l[i_list].append(1.)
elif f_mat > f_krd1:
l[i_list].append(0.)
else:
l[i_list].append((f_krd1 - f_mat)/(f_krd1-f_krd))
elif idx == i_last_idx:
f_krd_1 = l_krd[idx-1]
if f_mat > f_krd:
l[i_list].append(1.)
elif f_mat < f_krd_1:
l[i_list].append(0.)
else:
l[i_list].append((f_mat - f_krd_1)/(f_krd-f_krd_1))
else:
f_krd1 = l_krd[idx+1]
f_krd_1 = l_krd[idx-1]
if (f_mat >= f_krd_1) & (f_mat <= f_krd):
l[i_list].append((f_mat - f_krd_1)/(f_krd-f_krd_1))
elif (f_mat >= f_krd) & (f_mat <= f_krd1):
l[i_list].append((f_krd1 - f_mat)/(f_krd1-f_krd))
elif (f_mat < f_krd_1) | (f_mat > f_krd1):
l[i_list].append(0.)
else:
l[i_list].append(0.)
self.df_ratios = pd.DataFrame(l)
def _get_instruments_to_hedge(self, d_position):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio (in rate, not PU)
:param d_position: dictionary. portfolio in qty of contracts
'''
# measure the KRDs of the current portfolios
f_portfolio_value = self.f_portfolio_value
t_rtn = self.portfolio_krd(d_position)
df_krd, na_weights, df_expos, l_mat, l_pos, l_pu, l_instr = t_rtn
d_aux = dict(zip(l_instr, zip(l_pu, l_mat,
np.cumsum(len(l_instr) * [1])-1)))
df_target = self.get_target_krds(self.l_cmm_target, d_aux, df_krd,
s_kind=self.s_kind)
# NOTE: Why I am inverting the signal? I dont know
# ... maybe something related to positions in PU and rates
df_target *= (l_pos[d_aux[self.l_cmm_target[1]][2]])
# calculate the current duration and distance for the target in
# absolute percentage
f_curr_duration = sum(np.array(l_mat) * na_weights)
f_curr_abs_target = sum(abs((df_expos-df_target)/df_target))
# check which hedge will drive the strategy closer to the target
f_min_abs_target = f_curr_abs_target
l_rtn = []
for idx, s_key in enumerate(self.l_hedging_instr):
f_pu = d_aux[s_key][0]
f_matr = d_aux[s_key][1]
f_dur_aux = 5. * f_pu / f_portfolio_value * f_matr
f_unt = np.round(-f_curr_duration / f_dur_aux)
if abs(f_unt) > 10e-6:
s_debug = '\t{}: {:0.2f}, {:0.2f}'
# limit the number of contracts that can be traded at each time
i_qty = float(f_unt*5)
if f_unt > 3.:
i_qty = 15.
elif f_unt < -3.:
i_qty = -15.
# simulate how would be the measures doing the hedge
# recalculate all
idx = d_aux[s_key][2]
l_pos_aux = l_pos[:]
l_pos_aux[idx] += i_qty
na_weights_aux = self._get_weights(l_pu, l_pos_aux)
f_aux_duration = sum(np.array(l_mat) * na_weights_aux)
df_expos_aux = self._get_krd_exposure(df_krd, na_weights_aux)
f_aux_abs_target = sum(abs((df_expos_aux-df_target)/df_target))
# === DEBUG ===
# print s_debug.format(s_key, f_aux_duration, f_aux_abs_target)
# =============
# check the hedge instrument that will drive down the krd most
if abs(f_aux_duration) < abs(f_curr_duration):
if f_aux_abs_target < f_min_abs_target:
f_min_abs_target = f_aux_abs_target
# the quantity is in PU. So Convert to rate
s_action = 'BUY'
if -i_qty < 0:
s_action = 'SELL'
l_rtn = [(s_action, s_key, -i_qty)]
return l_rtn
class SingleHedgeModel(RiskModel):
'''
A SingleHedgeModel model representation that immunize portfolio using just
one instrument
'''
def __init__(self, env, f_portfolio_value=10**6, s_instrument='DI1F19'):
'''
Initiate a KRDModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param l_krd: list. maturity of the key rates used, in years
:param f_portfolio_value*: float. The total
'''
super(SingleHedgeModel, self).__init__(env, f_portfolio_value)
self.s_risk_model = 'SingleHedgeModel'
self.l_hedging_instr = [s_instrument]
class GreedyHedgeModel(RiskModel):
'''
A GreedyHedgeModel checks if the the market is offering a good deal to
hedge the agent's position. The immunization is done using a duration
neutral strategy that used just one instrument. The 'good deal' notion
should be implemented as something related to price, time or even
fair-priceness quant struff
'''
def __init__(self, env, f_value=10**6, s_instrument='DI1F19',
s_fairness='spread'):
'''
Initiate a GreedyHedgeModel object. Save all parameters as attributes
:param env: Environment object. the environment that uses this object
:param s_fairness*: string. the fair price notion of the agent
:param f_value*: float. The total value available
'''
super(GreedyHedgeModel, self).__init__(env, f_value)
self.s_fairness = s_fairness
if s_fairness == 'spread':
self.func_fair_price = self._compare_to_spread
elif s_fairness == 'closeout':
# closeout also should include stoploss?
self.func_fair_price = self._compare_to_closeout
s_instrument = env.s_main_intrument
self.s_risk_model = 'GreedyHedge_{}'.format(s_fairness)
self.l_hedging_instr = [s_instrument]
self.main_hedge = s_instrument
self.f_target = 0.03 # could be smaller when closeout (2 bps?)
self.f_stop = 0.03
self.last_txt = ''
self.current_price = None
self.f_last_gain = None
self.f_last_loss = None
self.price_stop_buy = None
self.price_stop_sell = None
def set_gain_loss(self, f_gain, f_loss):
'''
Set a target to the agent stop trading on the session
:param f_gain: float.
:param f_loss: float.
'''
self.f_last_gain = f_gain
self.f_last_loss = f_loss
def can_open_position(self, s_side, agent):
'''
Check the positions limits of an agent
:param s_side: string. Side of the trade to check the limit
:param agent: Agent object. agent that need to hedge
'''
if not self.l_ratios:
self.l_ratios = self.set_ratios()
# recover position limits
s_instr = self.env.s_main_intrument
f_max_pos = agent.max_pos
f_max_disclosed = agent.max_disclosed_pos
# calculate the current position
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
f_pnlt = 0.
# check if can open position to a specific side
if s_side == 'ASK':
if f_pos <= f_max_pos * -1:
return False
elif f_pos_discl <= f_max_disclosed * -1:
return False
elif s_side == 'BID':
if f_pos >= f_max_pos:
return False
elif f_pos_discl >= f_max_disclosed:
return False
return True
def should_open_at_current_price(self, s_side, agent):
'''
'''
# recover position limits
s_instr = self.env.s_main_intrument
f_pnlt = 0.
if agent.f_pnl < -1500.:
f_pnlt = self.f_stop / 3. * 3.
elif agent.f_pnl < -1000.:
f_pnlt = self.f_stop / 3. * 2
elif agent.f_pnl < -500.:
f_pnlt = self.f_stop / 3. * 1.
# calculate the current position
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
# recover prices
book_obj = agent.env.get_order_book(s_instr)
f_current_bid, i_qbid = book_obj.best_bid
f_current_ask, i_qask = book_obj.best_ask
f_bidask_spread = (f_current_ask - f_current_bid)
# check if there is something wierd in the prices
if (f_bidask_spread <= 0.005) or (f_bidask_spread > 0.04):
# print 'wierd bid-ask spread', f_bidask_spread
return False
# check if can open position based on the last stop
if self.price_stop_sell and s_side == 'ASK':
f_check = self.price_stop_sell
if f_current_ask >= f_check - f_pnlt:
if f_current_ask <= f_check + f_pnlt:
# print 'last time of stop at ask', f_check
return False
if self.price_stop_buy and s_side == 'BID':
f_check = self.price_stop_buy
if f_current_bid >= f_check - f_pnlt:
if f_current_bid <= f_check + f_pnlt:
# print 'last time of stop at bid', f_check
return False
# check if can open positions based on the last price traded
if f_pos < 0 and s_side == 'ASK':
l_agent_prices = [f_p for f_p, f_q, d_tob in
agent.d_trades[s_instr][s_side]]
f_min = min(l_agent_prices) - f_pnlt
f_max = max(l_agent_prices) + f_pnlt
if f_current_ask >= f_min and f_current_ask <= f_max:
# print 'same prices at ask', f_current_ask, f_max, f_min
return False
elif f_pos > 0 and s_side == 'BID':
l_agent_prices = [f_p for f_p, f_q, d_tob in
agent.d_trades[s_instr][s_side]]
f_min = min(l_agent_prices) - f_pnlt
f_max = max(l_agent_prices) + f_pnlt
if f_current_bid >= f_min and f_current_bid <= f_max:
# print 'same prices at bid', f_current_bid, f_max, f_min
return False
elif f_pos_discl > 0 and s_side == 'ASK':
f_agent_price = agent.current_open_price
if abs(f_current_ask - f_agent_price) < 0.005:
# print 'too low at ask', f_current_ask, f_agent_price
return False
elif f_pos_discl < 0 and s_side == 'BID':
f_agent_price = agent.current_open_price
if abs(f_current_bid - f_agent_price) < 0.005:
# print 'too low at bid', f_current_bid, f_agent_price
return False
return True
def should_hedge_open_position(self, agent):
'''
Check if the current open position should be hedged
:param agent: Agent object. agent that need to hedge
'''
# recover position limits
s_instr = self.env.s_main_intrument
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
# recover price from hedging instrument
obj_book = self.env.get_order_book(self.main_hedge)
if f_pos_discl < 0:
f_price, f_qty = obj_book.best_ask
elif f_pos_discl > 0:
f_price, f_qty = obj_book.best_bid
# check if is fair to mound a spread
if f_pos_discl != 0 and f_pos != 0:
s_side = 'ASK'
if f_pos > 0:
s_side = 'BID'
if not self.func_fair_price(f_price, f_pos_discl, agent, s_side):
return False
print '.',
# close out open positions by the current mid
if s_instr != self.main_hedge:
obj_book = self.env.get_order_book(s_instr)
f_ask, f_qty = obj_book.best_ask
f_bid, f_qty = obj_book.best_bid
f_mid = (f_ask + f_bid)/2.
if f_pos_discl < 0:
f_qty = abs(f_pos_discl)
f_vol = f_qty * f_mid
agent.disclosed_position[s_instr]['qBid'] += f_qty
agent.disclosed_position[s_instr]['Bid'] += f_vol
elif f_pos_discl > 0:
f_qty = abs(f_pos_discl)
f_vol = f_qty * f_mid
agent.disclosed_position[s_instr]['qAsk'] += f_qty
agent.disclosed_position[s_instr]['Ask'] += f_vol
return True
def get_instruments_to_hedge(self, agent):
'''
Return a list of tuples with the instruments and quantities that can be
used to hedge a given portfolio
:param agent: Agent object. agent that need to hedge
'''
# TODO: if s_fairness==closeout, should "hedge" on the main instrument
d_position = agent.position
return self._get_instruments_to_hedge(d_position)
def should_stop_disclosed(self, agent):
'''
Return if the agent should stop the current disclosed position or not
:param agent: Agent object. agent that need to hedge
'''
s_instr = self.env.s_main_intrument
# calculate the current position
f_pos = agent.position[s_instr]['qBid']
f_pos -= agent.position[s_instr]['qAsk']
f_pos_discl = f_pos + agent.disclosed_position[s_instr]['qBid']
f_pos_discl -= agent.disclosed_position[s_instr]['qAsk']
f_agent_price = agent.current_open_price
if not f_agent_price or f_pos_discl == 0.:
if self.b_stop_trading:
agent.done = True
return False
f_ref_price = f_agent_price
# recover prices
book_obj = agent.env.get_order_book(s_instr)
f_current_bid, i_qbid = book_obj.best_bid
f_current_ask, i_qask = book_obj.best_ask
f_bidask_spread = (f_current_ask - f_current_bid)
# check if there is something weird with the spread
if (f_bidask_spread <= 0.005) or (f_bidask_spread > 0.03):
return False
# check if should stop to trade
if self.b_stop_trading:
return True
if self.f_last_gain:
f_pnl = agent.f_pnl - 40. # due to MtM
if f_pnl > self.f_last_gain:
self.b_stop_trading = True
return True
elif f_pnl < self.f_last_loss:
self.b_stop_trading = True
return True
# check if should execute the stop gain
if f_pos_discl > 0:
update_maxmin(f_current_bid, agent)
f_ref_price = max(agent.current_max_price, f_ref_price)
f_loss = f_ref_price - self.f_stop
if f_current_bid < f_loss:
if i_qbid <= 600.:
return True
return f_current_bid < f_loss - self.f_stop/2.
elif f_pos_discl < 0:
update_maxmin(f_current_ask, agent)
f_ref_price = min(agent.current_min_price, f_ref_price)
f_loss = f_ref_price + self.f_stop
if f_current_ask > f_loss:
if i_qask <= 600.:
return True
return f_current_ask > f_loss + self.f_stop/2.
return False
def _compare_to_spread(self, f_current_price, f_open_pos, agent, s_side):
'''
...
:param f_current_price: float. The current price in the hedging instr
:param f_open_pos: float. the current disclosed position
:param agent: Agent object. agent that need to hedge
'''
# short_current_price >= (long_avg_price-avg_spread_price + param)
if f_open_pos > 0:
f_param = self.f_target # NOTE: hard coded
elif f_open_pos < 0:
f_param = -self.f_target # NOTE: hard coded
s_instr = self.env.s_main_intrument
s_hedge = self.main_hedge
# s_side = 'ASK'
# if f_open_pos > 0:
# s_side = 'BID'
# implement the prices accountability
idx = int(abs(f_open_pos/agent.order_size))
l_disclosed = agent.d_trades[s_instr][s_side][-idx:]
if len(l_disclosed) == 0:
print 'no disclosed position'
print '--open'
pprint.pprint(agent.d_trades)
print '--position'
pprint.pprint(agent.position)
print '--disclosed'
print agent.disclosed_position
print '--param'
print s_side, f_open_pos
raise NotImplementedError
f_long_avg_price = 0.
f_avg_spread = 0.
f_qtot = 0.
for f_p, f_q, d_tob in l_disclosed:
f_long_avg_price += f_p*f_q
f_qtot += f_q
f_aux = (d_tob[s_instr]['Ask'] + d_tob[s_instr]['Bid'])/2.
f_aux -= (d_tob[s_hedge]['Ask'] + d_tob[s_hedge]['Bid'])/2.
f_avg_spread += f_aux * f_q
f_long_avg_price /= f_qtot
f_avg_spread /= f_qtot
f_fair_price = (f_long_avg_price - f_avg_spread + f_param)
# keep the price into memory of the agent
agent.current_open_price = f_long_avg_price
s_err = 'PRICE: {}, DISCL: {}, AVG SPREAD: {}, MY PRICE: {}'
s_err += ', CURRNT: {}'
s_err = s_err.format(f_fair_price, f_open_pos, f_avg_spread,
f_long_avg_price, f_current_price)
if self.last_txt != s_err:
# print s_err
self.last_txt = s_err
if f_open_pos > 0:
return f_current_price >= f_fair_price
elif f_open_pos < 0:
return f_current_price <= f_fair_price
def _compare_to_closeout(self, f_current_price, f_open_pos, agent, s_side):
'''
'''
# short_current_price >= (long_avg_price-avg_spread_price + param)
s_instr = self.env.s_main_intrument
idx = int(abs(f_open_pos/agent.order_size))
l_disclosed = agent.d_trades[s_instr][s_side][-idx:]
f_long_avg_price = 0.
f_avg_spread = 0.
f_qtot = 0.
for f_p, f_q, d_tob in l_disclosed:
f_long_avg_price += f_p*f_q
f_qtot += f_q
f_long_avg_price /= f_qtot
f_avg_spread /= f_qtot
f_fair_price = (f_long_avg_price + self.f_target)
# keep the price into memory of the agent
agent.current_open_price = f_long_avg_price
s_err = 'POS: {}, MY PRICE: {}, CURRNT: {}, MAX: {}, MIN: {}'
s_err = s_err.format(f_open_pos, f_long_avg_price, f_current_price,
agent.current_max_price, agent.current_min_price)
if self.last_txt != s_err:
# print s_err + '\n'
self.last_txt = s_err
# recover prices
book_obj = agent.env.get_order_book(s_instr)
f_current_bid, i_qbid = book_obj.best_bid
f_current_ask, i_qask = book_obj.best_ask
f_bidask_spread = (f_current_ask - f_current_bid)
# check if there is something wierd in the prices
if (f_bidask_spread <= 0.005) or (f_bidask_spread > 0.04):
return False
# check if should execute the stop gain
if f_open_pos > 0:
f_gain = f_long_avg_price + self.f_target
if f_current_bid >= f_gain:
if i_qbid <= 400.:
return True
return f_current_bid > f_gain + self.f_target/2.
elif f_open_pos < 0:
f_gain = f_long_avg_price - self.f_target
if f_current_ask <= f_gain:
if i_qask <= 400.:
return True
return f_current_ask < f_gain - self.f_target/2.
return False
|
Python-3/basic_examples/strings/python_str_to_datetime.py | ghiloufibelgacem/jornaldev | 1,139 | 13163 | <filename>Python-3/basic_examples/strings/python_str_to_datetime.py<gh_stars>1000+
from datetime import datetime
# string to datetime object
datetime_str = '09/19/18 13:55:26'
datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S')
print(type(datetime_object))
print(datetime_object) # printed in default format
# string to date object
date_str = '09-19-2018'
date_object = datetime.strptime(date_str, '%m-%d-%Y').date()
print(type(date_object))
print(date_object) # printed in default formatting
# string to time object
time_str = '13::55::26'
time_object = datetime.strptime(time_str, '%H::%M::%S').time()
print(type(time_object))
print(time_object)
# time module
import time
time_obj = time.strptime(time_str, '%H::%M::%S')
print(type(time_obj))
print(time_obj)
# default formatting - "%a %b %d %H:%M:%S %Y"
print(time.strptime('Wed Sep 19 14:55:02 2018'))
# exception handling example
datetime_str = '09/19/18 13:55:26'
try:
datetime_object = datetime.strptime(datetime_str, '%m/%d/%y')
except ValueError as ve:
print('ValueError Raised:', ve)
time_str = '99::55::26'
try:
time_object = time.strptime(time_str, '%H::%M::%S')
except ValueError as e:
print('ValueError:', e)
# str to datetime with locale
import locale
locale.setlocale(locale.LC_ALL, 'de_DE')
date_str_de_DE = '10-Dezember-2018 Montag' # de_DE locale
datetime_object = datetime.strptime(date_str_de_DE, '%d-%B-%Y %A')
print(datetime_object)
|
Binary Search Tree/235. Lowest Common Ancestor of a Binary Search Tree.py | beckswu/Leetcode | 138 | 13176 | """
235. Lowest Common Ancestor of a Binary Search Tree
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
minn = min(p.val, q.val)
maxx = max(p.val,q.val)
while root.val < minn or root.val>maxx:
if root.val < minn: root = root.right
else: root = root.left
return root
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if (p.val-root.val)*(q.val-root.val) <= 0:
return root
elif p.val < root.val:
return self.lowestCommonAncestor(root.left,p,q)
else:
return self.lowestCommonAncestor(root.right,p,q)
|
waliki/acl.py | sckevmit/waliki | 324 | 13177 | from functools import wraps
from collections import Iterable
from django.conf import settings
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six import string_types
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.shortcuts import resolve_url
from waliki.utils import is_authenticated
from .models import ACLRule
from .settings import (WALIKI_ANONYMOUS_USER_PERMISSIONS,
WALIKI_LOGGED_USER_PERMISSIONS,
WALIKI_RENDER_403)
def check_perms(perms, user, slug, raise_exception=False):
"""a helper user to check if a user has the permissions
for a given slug"""
if isinstance(perms, string_types):
perms = {perms}
else:
perms = set(perms)
allowed_users = ACLRule.get_users_for(perms, slug)
if allowed_users:
return user in allowed_users
if perms.issubset(set(WALIKI_ANONYMOUS_USER_PERMISSIONS)):
return True
if is_authenticated(user) and perms.issubset(set(WALIKI_LOGGED_USER_PERMISSIONS)):
return True
# First check if the user has the permission (even anon users)
if user.has_perms(['waliki.%s' % p for p in perms]):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
this is analog to django's builtin ``permission_required`` decorator, but
improved to check per slug ACLRules and default permissions for
anonymous and logged in users
if there is a rule affecting a slug, the user needs to be part of the
rule's allowed users. If there isn't a matching rule, defaults permissions
apply.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if check_perms(perms, request.user, kwargs['slug'], raise_exception=raise_exception):
return view_func(request, *args, **kwargs)
if is_authenticated(request.user):
if WALIKI_RENDER_403:
return render(request, 'waliki/403.html', kwargs, status=403)
else:
raise PermissionDenied
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(login_url or settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
|
deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 3,782 | 13183 | """
Generate coulomb matrices for molecules.
See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
from deepchem.utils.data_utils import pad_array
from deepchem.feat.base_classes import MolecularFeaturizer
class CoulombMatrix(MolecularFeaturizer):
"""Calculate Coulomb matrices for molecules.
Coulomb matrices provide a representation of the electronic structure of
a molecule. For a molecule with `N` atoms, the Coulomb matrix is a
`N X N` matrix where each element gives the strength of the
electrostatic interaction between two atoms. The method is described
in more detail in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrix(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
upper_tri: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
upper_tri: bool, optional (default False)
Generate only upper triangle part of Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.upper_tri = upper_tri
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate Coulomb matrices for molecules. If extra randomized
matrices are generated, they are treated as if they are features
for additional conformers.
Since Coulomb matrices are symmetric, only the (flattened) upper
triangular portion is returned.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule.
The default shape is `(num_confs, max_atoms, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms, max_atoms)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
features = self.coulomb_matrix(datapoint)
if self.upper_tri:
features = [f[np.triu_indices_from(f)] for f in features]
features = np.asarray(features)
if features.shape[0] == 1:
# `(1, max_atoms, max_atoms)` -> `(max_atoms, max_atoms)`
features = np.squeeze(features, axis=0)
return features
def coulomb_matrix(self, mol: RDKitMol) -> np.ndarray:
"""
Generate Coulomb matrices for each conformer of the given molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
# Check whether num_confs >=1 or not
num_confs = len(mol.GetConformers())
if num_confs == 0:
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, AllChem.ETKDG())
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol)
n_atoms = mol.GetNumAtoms()
z = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
rval = []
for conf in mol.GetConformers():
d = self.get_interatomic_distances(conf)
m = np.outer(z, z) / d
m[range(n_atoms), range(n_atoms)] = 0.5 * np.array(z)**2.4
if self.randomize:
for random_m in self.randomize_coulomb_matrix(m):
random_m = pad_array(random_m, self.max_atoms)
rval.append(random_m)
else:
m = pad_array(m, self.max_atoms)
rval.append(m)
return np.asarray(rval)
def randomize_coulomb_matrix(self, m: np.ndarray) -> List[np.ndarray]:
"""Randomize a Coulomb matrix as decribed in [1]_:
1. Compute row norms for M in a vector row_norms.
2. Sample a zero-mean unit-variance noise vector e with dimension
equal to row_norms.
3. Permute the rows and columns of M with the permutation that
sorts row_norms + e.
Parameters
----------
m: np.ndarray
Coulomb matrix.
Returns
-------
List[np.ndarray]
List of the random coulomb matrix
References
----------
.. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003
"""
rval = []
row_norms = np.asarray([np.linalg.norm(row) for row in m], dtype=float)
rng = np.random.RandomState(self.seed)
for i in range(self.n_samples):
e = rng.normal(size=row_norms.size)
p = np.argsort(row_norms + e)
new = m[p][:, p] # permute rows first, then columns
rval.append(new)
return rval
@staticmethod
def get_interatomic_distances(conf: Any) -> np.ndarray:
"""
Get interatomic distances for atoms in a molecular conformer.
Parameters
----------
conf: rdkit.Chem.rdchem.Conformer
Molecule conformer.
Returns
-------
np.ndarray
The distances matrix for all atoms in a molecule
"""
n_atoms = conf.GetNumAtoms()
coords = [
# Convert AtomPositions from Angstrom to bohr (atomic units)
conf.GetAtomPosition(i).__idiv__(0.52917721092) for i in range(n_atoms)
]
d = np.zeros((n_atoms, n_atoms), dtype=float)
for i in range(n_atoms):
for j in range(i):
d[i, j] = coords[i].Distance(coords[j])
d[j, i] = d[i, j]
return d
class CoulombMatrixEig(CoulombMatrix):
"""Calculate the eigenvalues of Coulomb matrices for molecules.
This featurizer computes the eigenvalues of the Coulomb matrices for provided
molecules. Coulomb matrices are described in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues
are returned sorted by absolute value in descending order and padded
by max_atoms.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The eigenvalues of Coulomb matrix for molecules.
The default shape is `(num_confs, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms,)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
cmat = self.coulomb_matrix(datapoint)
features_list = []
for f in cmat:
w, v = np.linalg.eig(f)
w_abs = np.abs(w)
sortidx = np.argsort(w_abs)
sortidx = sortidx[::-1]
w = w[sortidx]
f = pad_array(w, self.max_atoms)
features_list.append(f)
features = np.asarray(features_list)
if features.shape[0] == 1:
# `(1, max_atoms)` -> `(max_atoms,)`
features = np.squeeze(features, axis=0)
return features
|
pymagnitude/third_party/allennlp/tests/data/dataset_readers/snli_reader_test.py | tpeng/magnitude | 1,520 | 13200 | # pylint: disable=no-self-use,invalid-name
from __future__ import division
from __future__ import absolute_import
import pytest
from allennlp.data.dataset_readers import SnliReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
class TestSnliReader(object):
@pytest.mark.parametrize(u"lazy", (True, False))
def test_read_from_file(self, lazy):
reader = SnliReader(lazy=lazy)
instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / u'data' / u'snli.jsonl')
instances = ensure_list(instances)
instance1 = {u"premise": [u"A", u"person", u"on", u"a", u"horse", u"jumps", u"over", u"a", u"broken",
u"down", u"airplane", u"."],
u"hypothesis": [u"A", u"person", u"is", u"training", u"his", u"horse", u"for", u"a",
u"competition", u"."],
u"label": u"neutral"}
instance2 = {u"premise": [u"A", u"person", u"on", u"a", u"horse", u"jumps", u"over", u"a", u"broken",
u"down", u"airplane", u"."],
u"hypothesis": [u"A", u"person", u"is", u"at", u"a", u"diner", u",", u"ordering", u"an",
u"omelette", u"."],
u"label": u"contradiction"}
instance3 = {u"premise": [u"A", u"person", u"on", u"a", u"horse", u"jumps", u"over", u"a", u"broken",
u"down", u"airplane", u"."],
u"hypothesis": [u"A", u"person", u"is", u"outdoors", u",", u"on", u"a", u"horse", u"."],
u"label": u"entailment"}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields[u"premise"].tokens] == instance1[u"premise"]
assert [t.text for t in fields[u"hypothesis"].tokens] == instance1[u"hypothesis"]
assert fields[u"label"].label == instance1[u"label"]
fields = instances[1].fields
assert [t.text for t in fields[u"premise"].tokens] == instance2[u"premise"]
assert [t.text for t in fields[u"hypothesis"].tokens] == instance2[u"hypothesis"]
assert fields[u"label"].label == instance2[u"label"]
fields = instances[2].fields
assert [t.text for t in fields[u"premise"].tokens] == instance3[u"premise"]
assert [t.text for t in fields[u"hypothesis"].tokens] == instance3[u"hypothesis"]
assert fields[u"label"].label == instance3[u"label"]
|
tests/test_swagger_registry.py | niall-byrne/flask-restful-swagger | 667 | 13218 | from flask import Flask
from flask_restful_swagger.swagger import SwaggerRegistry
try:
from unittest.mock import patch
except ImportError:
from mock import patch
@patch("flask_restful_swagger.swagger._get_current_registry")
@patch("flask_restful_swagger.swagger.render_homepage")
def test_get_swagger_registry(homepage, registry):
mock_registry = {
"apiVersion": "mock_version",
"swaggerVersion": "mock_swagger_version",
"basePath": "mock_path",
"spec_endpoint_path": "mock_spec_endpoint_path",
"description": "mock_description",
}
registry.return_value = mock_registry
app = Flask(__name__)
resource = SwaggerRegistry()
bases = [base.__name__ for base in SwaggerRegistry.__mro__]
assert sorted(bases) == [
"MethodView",
"Resource",
"SwaggerRegistry",
"View",
"object",
]
with app.test_request_context(path="/some_path.html"):
_ = resource.get()
assert homepage.called
homepage.assert_called_once_with(
"mock_pathmock_spec_endpoint_path/_/resource_list.json"
)
with app.test_request_context(path="/some_path"):
homepage.reset_mock()
response = resource.get()
assert not homepage.called
assert response == mock_registry
|
components/mpas-seaice/testing_and_setup/testcases/advection/plot_testcase.py | Fa-Li/E3SM | 235 | 13265 | from netCDF4 import Dataset
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.cm as cm
import numpy as np
#-------------------------------------------------------------
def plot_subfigure(axis, array, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, cmin, cmax, cmap):
xMin = 1.0e30
xMax = -1.0e30
yMin = 1.0e30
yMax = -1.0e30
cmap = plt.get_cmap(cmap)
patches = []
colors = []
for iCell in range(0,nCells):
if (yCell[iCell] > 0.0):
vertices = []
for iVertexOnCell in range(0,nEdgesOnCell[iCell]):
iVertex = verticesOnCell[iCell,iVertexOnCell]
vertices.append((xVertex[iVertex],zVertex[iVertex]))
colors.append(array[iCell])
patches.append(Polygon(vertices))
xMin = min(xMin,xVertex[iVertex])
xMax = max(xMax,xVertex[iVertex])
yMin = min(yMin,zVertex[iVertex])
yMax = max(yMax,zVertex[iVertex])
pc = PatchCollection(patches, cmap=cmap)
pc.set_array(np.array(colors))
pc.set_clim(cmin, cmax)
axis.add_collection(pc)
axis.set_xlim(xMin,xMax)
axis.set_ylim(yMin,yMax)
axis.set_aspect("equal")
axis.ticklabel_format(style='plain')
axis.tick_params(axis='x', \
which='both', \
bottom=False, \
top=False, \
labelbottom=False)
axis.tick_params(axis='y', \
which='both', \
left=False, \
right=False, \
labelleft=False)
#-------------------------------------------------------------
def plot_testcase():
nGrids = [2562,10242,40962,163842]
testTypes = ["cosine_bell","slotted_cylinder"]
methods = ["IR","IR","upwind"]
iTimes = [0,-1,-1]
for nGrid in nGrids:
print("nGrid: ", nGrid)
fig, axes = plt.subplots(3,4)
iTestType = -1
for testType in testTypes:
iTestType += 1
print(" Test type: ", testType)
iMethod = -1
for method, iTime in zip(methods,iTimes):
iMethod += 1
print(" Method: ", method, ", iTime: ", iTime)
filenamein = "./output_%s_%s_%i/output.2000.nc" %(method,testType,nGrid)
filein = Dataset(filenamein,"r")
nCells = len(filein.dimensions["nCells"])
nEdgesOnCell = filein.variables["nEdgesOnCell"][:]
verticesOnCell = filein.variables["verticesOnCell"][:]
xCell = filein.variables["xCell"][:]
yCell = filein.variables["yCell"][:]
zCell = filein.variables["zCell"][:]
xVertex = filein.variables["xVertex"][:]
yVertex = filein.variables["yVertex"][:]
zVertex = filein.variables["zVertex"][:]
verticesOnCell[:] = verticesOnCell[:] - 1
iceAreaCategory = filein.variables["iceAreaCategory"][:]
filein.close()
iceAreaCell = np.sum(iceAreaCategory,axis=(2,3))
plot_subfigure(axes[iMethod,iTestType*2], iceAreaCell[iTime], nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, 0.0, 1.0, "viridis")
iceAreaCellDiff = iceAreaCell[iTime] - iceAreaCell[0]
if (iMethod != 0):
plot_subfigure(axes[iMethod,iTestType*2+1], iceAreaCellDiff, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, -1.0, 1.0, "bwr")
else:
axes[iMethod,iTestType*2+1].axis('off')
plt.savefig("advection_%6.6i.png" %(nGrid),dpi=300)
plt.cla()
plt.close(fig)
#-------------------------------------------------------------------------------
if __name__ == "__main__":
plot_testcase()
|
examples/python/bunny_pieline.py | Willyzw/vdbfusion | 119 | 13272 | <reponame>Willyzw/vdbfusion
#!/usr/bin/env python3
# @file cow_pipeline.py
# @author <NAME> [<EMAIL>]
#
# Copyright (c) 2021 <NAME>, all rights reserved
import argh
from datasets import BunnyGeneratedDataset as Dataset
from vdbfusion_pipeline import VDBFusionPipeline as Pipeline
def main(
data_source: str,
config: str = "config/bunny.yaml",
visualize: bool = False,
):
"""Help here!"""
dataset = Dataset(data_source, apply_pose=True)
pipeline = Pipeline(dataset, config, map_name="bunny")
pipeline.run()
pipeline.visualize() if visualize else None
if __name__ == "__main__":
argh.dispatch_command(main)
|
freezer/storage/fslike.py | kwu83tw/freezer | 141 | 13294 | <reponame>kwu83tw/freezer
# (c) Copyright 2014,2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_serialization import jsonutils as json
from freezer.storage import physical
class FsLikeStorage(physical.PhysicalStorage, metaclass=abc.ABCMeta):
_type = 'fslike'
def __init__(self, storage_path,
max_segment_size, skip_prepare=False):
super(FsLikeStorage, self).__init__(
storage_path=storage_path,
max_segment_size=max_segment_size,
skip_prepare=skip_prepare)
def prepare(self):
self.create_dirs(self.storage_path)
def info(self):
pass
def write_backup(self, rich_queue, backup):
"""
Stores backup in storage
:type rich_queue: freezer.utils.streaming.RichQueue
:type backup: freezer.storage.base.Backup
"""
backup = backup.copy(storage=self)
path = backup.data_path
self.create_dirs(path.rsplit('/', 1)[0])
with self.open(path, mode='wb') as \
b_file:
for message in rich_queue.get_messages():
b_file.write(message)
def backup_blocks(self, backup):
"""
:param backup:
:type backup: freezer.storage.base.Backup
:return:
"""
with self.open(backup.data_path, 'rb') as backup_file:
while True:
chunk = backup_file.read(self.max_segment_size)
if len(chunk):
yield chunk
else:
break
@abc.abstractmethod
def open(self, filename, mode):
"""
:type filename: str
:param filename:
:type mode: str
:param mode:
:return:
"""
pass
def add_stream(self, stream, package_name, headers=None):
"""
:param stream: data
:param package_name: path
:param headers: backup metadata information
:return:
"""
split = package_name.rsplit('/', 1)
# create backup_basedir
backup_basedir = "{0}/{1}".format(self.storage_path,
package_name)
self.create_dirs(backup_basedir)
# define backup_data_name
backup_basepath = "{0}/{1}".format(backup_basedir,
split[0])
backup_metadata = "%s/metadata" % backup_basedir
# write backup to backup_basepath
with self.open(backup_basepath, 'wb') as backup_file:
for el in stream:
backup_file.write(el)
# write data matadata to backup_metadata
with self.open(backup_metadata, 'wb') as backup_meta:
backup_meta.write(json.dumps(headers))
|
samples/create_project.py | zuarbase/server-client-python | 470 | 13298 | ####
# This script demonstrates how to use the Tableau Server Client
# to create new projects, both at the root level and how to nest them using
# parent_id.
#
#
# To run the script, you must have installed Python 3.6 or later.
####
import argparse
import logging
import sys
import tableauserverclient as TSC
def create_project(server, project_item):
try:
project_item = server.projects.create(project_item)
print('Created a new project called: %s' % project_item.name)
return project_item
except TSC.ServerResponseError:
print('We have already created this project: %s' % project_item.name)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='Create new projects.')
# Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
parser.add_argument('--site', '-S', help='site name')
parser.add_argument('--token-name', '-p', required=True,
help='name of the personal access token used to sign into the server')
parser.add_argument('--token-value', '-v', required=True,
help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
# Options specific to this sample
# This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Use highest Server REST API version available
server.use_server_version()
# Without parent_id specified, projects are created at the top level.
top_level_project = TSC.ProjectItem(name='Top Level Project')
top_level_project = create_project(server, top_level_project)
# Specifying parent_id creates a nested projects.
child_project = TSC.ProjectItem(name='Child Project', parent_id=top_level_project.id)
child_project = create_project(server, child_project)
# Projects can be nested at any level.
grand_child_project = TSC.ProjectItem(name='Grand Child Project', parent_id=child_project.id)
grand_child_project = create_project(server, grand_child_project)
if __name__ == '__main__':
main()
|
tests/test_threading.py | nmandery/rasterio | 1,479 | 13299 | <filename>tests/test_threading.py
from threading import Thread
import time
import unittest
import rasterio as rio
from rasterio.env import get_gdal_config
class TestThreading(unittest.TestCase):
def test_multiopen(self):
"""
Open a file from different threads.
Regression test for issue #986
"""
def func(delay):
try:
with rio.open('tests/data/RGB.byte.tif'):
time.sleep(delay)
except Exception as err:
global exceptions
exceptions.append(err)
global exceptions
exceptions = []
t1 = Thread(target=func, args=(0.1,))
t2 = Thread(target=func, args=(0,))
with rio.Env():
t1.start()
t2.start() # potential error if Env manages globals unsafely
t1.join()
t2.join()
assert not exceptions
def test_reliability(self):
"""Allow for nondeterminism of race condition"""
for i in range(3):
self.test_multiopen()
def test_child_thread_inherits_env():
"""A new thread inherit's the main thread's env"""
def func():
with rio.Env(lol='wut'):
assert get_gdal_config('lol') == 'wut'
# The next config option will have been set in the main thread.
assert get_gdal_config('FROM_MAIN') is True
t1 = Thread(target=func)
with rio.Env(FROM_MAIN=True):
t1.start()
assert get_gdal_config('FROM_MAIN') is True
assert get_gdal_config('lol') is None
t1.join()
def test_child_thread_isolation():
"""Child threads have isolated environments"""
def func(key, value, other_key):
env = {key: value}
with rio.Env(**env):
assert get_gdal_config(key) == value
# The other key is one set in another child thread.
assert get_gdal_config(other_key) is None
t1 = Thread(target=func, args=('is_t1', True, 'is_t2'))
t2 = Thread(target=func, args=('is_t2', True, 'is_t1'))
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
unittest.main()
|
src/basset_sick_loss.py | shtoneyan/Basset | 248 | 13318 | <filename>src/basset_sick_loss.py
#!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import os
import random
import subprocess
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import pysam
from scipy.stats import binom
from scipy.stats.mstats import mquantiles
import seaborn as sns
import stats
################################################################################
# basset_sick_loss.py
#
# Shuffle SNPs that overlap DNase sites within their sites and compare the SAD
# distributions.
#
# Todo:
# -Control for GC% changes introduced by mutation shuffles.
# -Control for positional changes within the DHS regions.
# -Properly handle indels.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <vcf_file> <sample_beds_file> <model_file>'
parser = OptionParser(usage)
parser.add_option('-f', dest='genome_fasta', default='%s/assembly/hg19.fa'%os.environ['HG19'], help='Genome FASTA [Default: %default]')
parser.add_option('-g', dest='gpu', default=False, action='store_true', help='Run on GPU [Default: %default]')
parser.add_option('-l', dest='seq_len', type='int', default=600, help='Sequence length provided to the model [Default: %default]')
parser.add_option('-o', dest='out_dir', default='sad_shuffle', help='Output directory')
parser.add_option('-r', dest='replot', default=False, action='store_true', help='Re-plot only, without re-computing [Default: %default]')
parser.add_option('-s', dest='num_shuffles', default=1, type='int', help='Number of SNP shuffles [Default: %default]')
parser.add_option('-t', dest='sad_table_file', help='Pre-computed SAD scores for the SNPs')
(options,args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide VCF file, sample BEDs file, and model file')
else:
vcf_file = args[0]
sample_beds_file = args[1]
model_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
# open reference genome
genome = pysam.Fastafile(options.genome_fasta)
# open binomial stats file
binom_out = open('%s/binom.txt' % options.out_dir, 'w')
# open mann-whitney stats file
mw_out = open('%s/mannwhitney.txt' % options.out_dir, 'w')
# plot defaults
sns.set(font_scale=1.5, style='ticks')
si = 0
for line in open(sample_beds_file):
sample, bed_file = line.split()
print(sample)
#########################################
# compute SAD
#########################################
# filter VCF to overlapping SNPs
print(" intersecting SNPs")
sample_vcf_file = '%s/%s.vcf' % (options.out_dir,sample)
if not options.replot:
filter_vcf(vcf_file, bed_file, sample_vcf_file)
# compute SAD scores for this sample's SNPs
print(" computing SAD")
if options.sad_table_file:
true_sad = retrieve_sad(sample_vcf_file, options.sad_table_file, si)
else:
true_sad = compute_sad(sample_vcf_file, model_file, si, '%s/%s_sad'%(options.out_dir,sample), options.seq_len, options.gpu, options.replot)
#########################################
# compute shuffled SAD
#########################################
shuffle_sad = np.zeros((true_sad.shape[0],options.num_shuffles))
for ni in range(options.num_shuffles):
# shuffle the SNPs within their overlapping DHS
print(" shuffle %d" % ni)
sample_vcf_shuf_file = '%s/%s_shuf%d.vcf' % (options.out_dir, sample, ni)
shuffle_snps(sample_vcf_file, sample_vcf_shuf_file, genome)
# compute SAD scores for shuffled SNPs
print(" computing shuffle SAD")
shuffle_sad[:,ni] = compute_sad(sample_vcf_shuf_file, model_file, si, '%s/%s_shuf%d_sad'%(options.out_dir,sample,ni), options.seq_len, options.gpu, options.replot)
#########################################
# simple stats
#########################################
# compute shuffle means
shuffle_sad_mean = shuffle_sad.mean(axis=1)
# print sample table
sample_sad_out = open('%s/%s_table.txt' % (options.out_dir,sample), 'w')
for vi in range(len(true_sad)):
print('%f\t%f' % (true_sad[vi], shuffle_sad_mean[vi]), file=sample_sad_out)
sample_sad_out.close()
# scatter plot
# plt.figure()
# plt.scatter(true_sad, shuffle_sad_mean, color='black', alpha=0.7)
# plt.gca().grid(True, linestyle=':')
# plt.savefig('%s/%s_scatter.pdf' % (options.out_dir,sample))
# plt.close()
# plot CDFs
sns_colors = sns.color_palette('deep')
plt.figure()
plt.hist(true_sad, 1000, normed=1, histtype='step', cumulative=True, color=sns_colors[0], linewidth=1, label='SNPs')
plt.hist(shuffle_sad.flatten(), 1000, normed=1, histtype='step', cumulative=True, color=sns_colors[2], linewidth=1, label='Shuffle')
ax = plt.gca()
ax.grid(True, linestyle=':')
ax.set_xlim(-.2, .2)
plt.legend()
plt.savefig('%s/%s_cdf.pdf' % (options.out_dir,sample))
plt.close()
# plot Q-Q
true_q = mquantiles(true_sad, np.linspace(0,1,min(10000,true_sad.shape[0])))
shuf_q = mquantiles(shuffle_sad_mean, np.linspace(0,1,min(10000,true_sad.shape[0])))
plt.figure()
plt.scatter(true_q, shuf_q, color=sns_colors[0])
pmin = 1.05*min(true_q[0], shuf_q[0])
pmax = 1.05*max(true_q[-1], shuf_q[-1])
plt.plot([pmin,pmax], [pmin,pmax], color='black', linewidth=1)
ax = plt.gca()
ax.set_xlim(pmin,pmax)
ax.set_ylim(pmin,pmax)
ax.set_xlabel('True SAD')
ax.set_ylabel('Shuffled SAD')
ax.grid(True, linestyle=':')
plt.savefig('%s/%s_qq.pdf' % (options.out_dir,sample))
plt.close()
#########################################
# statistical tests
#########################################
# compute matched binomial test
true_great = sum((true_sad-shuffle_sad_mean) > 0)
true_lo = np.log2(true_great) - np.log2(len(true_sad)-true_great)
if true_lo > 0:
binom_p = 1.0 - binom.cdf(true_great-1, n=len(true_sad), p=0.5)
else:
binom_p = binom.cdf(true_great, n=len(true_sad), p=0.5)
# print significance stats
cols = (sample, len(true_sad), true_great, true_lo, binom_p)
print('%-20s %5d %5d %6.2f %6.1e' % cols, file=binom_out)
# compute Mann-Whitney
mw_z, mw_p = stats.mannwhitneyu(true_sad, shuffle_sad.flatten())
cols = (sample, len(true_sad), true_sad.mean(), shuffle_sad.mean(), mw_z, mw_p)
print('%-20s %5d %6.3f %6.3f %6.2f %6.1e' % cols, file=mw_out)
# update sample index
si += 1
binom_out.close()
mw_out.close()
genome.close()
def compute_sad(sample_vcf_file, model_file, si, out_dir, seq_len, gpu, replot):
''' Run basset_sad.py to compute scores. '''
cuda_str = ''
if gpu:
cuda_str = '--cudnn'
cmd = 'basset_sad.py %s -l %d -o %s %s %s' % (cuda_str, seq_len, out_dir, model_file, sample_vcf_file)
if not replot:
subprocess.call(cmd, shell=True)
sad = []
for line in open('%s/sad_table.txt' % out_dir):
a = line.split()
if a[3] == 't%d'%si:
sad.append(float(a[-1]))
return np.array(sad)
def filter_vcf(vcf_file, bed_file, sample_vcf_file):
''' Filter the VCF file for SNPs that overlap
the BED file, removing indels. '''
# open filtered file
sample_vcf_out = open(sample_vcf_file, 'w')
# intersect
p = subprocess.Popen('bedtools intersect -wo -a %s -b %s' % (vcf_file, bed_file), stdout=subprocess.PIPE, shell=True)
for line in p.stdout:
a = line.split()
if len(a[3]) == len(a[4]) == 1:
print(line, file=sample_vcf_out, end='')
sample_vcf_out.close()
def retrieve_sad(sample_vcf_file, sad_table_file, si):
''' Retrieve SAD scores from a pre-computed table.
Note that I'm assuming here the table has all
SAD scores in one row for each SNP so I can
pull out the score I want as column si+1.
'''
snp_indexes = {}
vi = 0
for line in open(sample_vcf_file):
a = line.split()
snp_indexes[a[2]] = vi
vi += 1
sad = np.zeros(len(snp_indexes))
for line in open(sad_table_file):
a = line.split()
print(a)
if a[0] in snp_indexes:
sad[snp_indexes[a[0]]] = float(a[si+1])
return sad
def shuffle_snps(in_vcf_file, out_vcf_file, genome):
''' Shuffle the SNPs within their overlapping DHS. '''
out_vcf_open = open(out_vcf_file, 'w')
for line in open(in_vcf_file):
a = line.split()
# read SNP info
snp_chrom = a[0]
snp_pos = int(a[1])
snp_nt = a[3]
# determine BED start
bi = 5
while a[bi] != snp_chrom:
bi += 1
# read BED info
bed_chrom = a[bi]
bed_start = int(a[bi+1])
bed_end = int(a[bi+2])
# get sequence
bed_seq = genome.fetch(bed_chrom, bed_start, bed_end)
# determine matching positions
bed_nt_matches = [i for i in range(len(bed_seq)) if bed_seq[i] == snp_nt]
while len(bed_nt_matches) == 0:
# expand segment by 10 nt
bed_start = max(0, bed_start-10)
bed_end += 10
bed_seq = genome.fetch(bed_chrom, bed_start, bed_end)
# sample new SNP position
shuf_pos = bed_start + 1 + random.choice(bed_nt_matches)
# write into columns
a[1] = str(shuf_pos)
print('\t'.join(a), file=out_vcf_open)
out_vcf_open.close()
def shuffle_snps_old(in_vcf_file, out_vcf_file, genome):
''' Shuffle the SNPs within their overlapping DHS. '''
out_vcf_open = open(out_vcf_file, 'w')
for line in open(in_vcf_file):
a = line.split()
# read SNP info
snp_chrom = a[0]
snp_pos = int(a[1])
# determine BED start
bi = 5
while a[bi] != snp_chrom:
bi += 1
# read BED info
bed_chrom = a[bi]
bed_start = int(a[bi+1])
bed_end = int(a[bi+2])
# sample new SNP position
shuf_pos = random.randint(bed_start, bed_end-1)
while shuf_pos == snp_pos:
shuf_pos = random.randint(bed_start, bed_end-1)
# set reference allele
ref_nt = genome.fetch(snp_chrom, shuf_pos-1, shuf_pos)
# sample alternate allele
alt_nt = random.choice('ACGT')
while alt_nt == ref_nt:
alt_nt = random.choice('ACGT')
# write into columns
a[1] = str(shuf_pos)
a[3] = ref_nt
a[4] = alt_nt
print('\t'.join(a), file=out_vcf_open)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
tests/test_vtable.py | matthewpruett/angr | 6,132 | 13335 | import os
import angr
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_vtable_extraction_x86_64():
p = angr.Project(os.path.join(test_location, "x86_64", "cpp_classes"), auto_load_libs=False)
vtables_sizes = {0x403cb0: 24, 0x403cd8: 16, 0x403cf8: 16, 0x403d18: 16}
vtable_analysis = p.analyses.VtableFinder()
vtables = vtable_analysis.vtables_list
assert len(vtables) == 4
for vtable in vtables:
assert vtable.vaddr in [0x403cb0, 0x403cd8, 0x403cf8, 0x403d18]
assert vtables_sizes[vtable.vaddr] == vtable.size
if __name__ == "__main__":
test_vtable_extraction_x86_64()
|
examples/map.py | jlsajfj/NBT | 241 | 13339 | <filename>examples/map.py
#!/usr/bin/env python
"""
Prints a map of the entire world.
"""
import os, sys
import math
from struct import pack
# local module
try:
import nbt
except ImportError:
# nbt not in search path. Let's see if it can be found in the parent folder
extrasearchpath = os.path.realpath(os.path.join(__file__,os.pardir,os.pardir))
if not os.path.exists(os.path.join(extrasearchpath,'nbt')):
raise
sys.path.append(extrasearchpath)
from nbt.region import RegionFile
from nbt.chunk import Chunk
from nbt.world import WorldFolder,McRegionWorldFolder
# PIL module (not build-in)
try:
from PIL import Image
except ImportError:
# PIL not in search path. Let's see if it can be found in the parent folder
sys.stderr.write("Module PIL/Image not found. Pillow (a PIL fork) can be found at http://python-imaging.github.io/\n")
# Note: it may also be possible that PIL is installed, but JPEG support is disabled or broken
sys.exit(70) # EX_SOFTWARE
def get_heightmap_image(chunk, buffer=False, gmin=False, gmax=False):
points = chunk.blocks.generate_heightmap(buffer, True)
# Normalize the points
hmin = min(points) if (gmin == False) else gmin # Allow setting the min/max explicitly, in case this is part of a bigger map
hmax = max(points) if (gmax == False) else gmax
hdelta = hmax-hmin+0.0
pixels = ""
for y in range(16):
for x in range(16):
# pix X => mc -Z
# pix Y => mc X
offset = (15-x)*16+y
height = int((points[offset]-hmin)/hdelta*255)
if (height < 0): height = 0
if (height > 255): height = 255
pixels += pack(">B", height)
im = Image.fromstring('L', (16,16), pixels)
return im
# List of blocks to ignore
# Uncomment all the lines to show underground structures
# TODO: move this list into a separate config file
block_ignore = [
'air', # At least this one
# 'cave_air', 'water', 'lava', 'snow', 'ice',
# 'grass', 'tall_grass', 'dead_bush',
# 'seagrass', 'tall_seagrass', 'kelp', 'kelp_plant',
# 'dandelion', 'poppy', 'oxeye_daisy', 'white_tulip',
# 'azure_bluet', 'lilac', 'rose_bush', 'peony', 'blue_orchid',
# 'lily_pad', 'sugar_cane', 'vine', 'pumpkin', 'cactus',
# 'wheat', 'potatoes', 'beetroots', 'carrots',
# 'oak_leaves', 'dark_oak_leaves', 'birch_leaves',
# 'acacia_leaves', 'spruce_leaves',
# 'oak_log', 'dark_oak_log', 'birch_log',
# 'acacia_log', 'spruce_log',
# 'brown_mushroom', 'red_mushroom',
# 'brown_mushroom_block', 'red_mushroom_block', 'mushroom_stem',
# 'grass_block', 'grass_path', 'farmland', 'dirt',
# 'stone', 'sand', 'gravel', 'clay',
# 'sandstone', 'diorite', 'andesite', 'granite', 'obsidian',
# 'coal_ore', 'iron_ore', 'gold_ore', 'diamond_ore',
# 'redstone_ore', 'lapis_ore', 'emerald_ore',
# 'cobweb',
]
# Map of block colors from names
# Legacy block numeric identifiers are now hidden by Block class
# and mapped to alpha identifiers in best effort
# TODO: move this map into a separate config file
block_colors = {
'acacia_leaves': {'h':114, 's':64, 'l':22 },
'acacia_log': {'h':35, 's':93, 'l':30 },
'air': {'h':0, 's':0, 'l':0 },
'andesite': {'h':0, 's':0, 'l':32 },
'azure_bluet': {'h':0, 's':0, 'l':100},
'bedrock': {'h':0, 's':0, 'l':10 },
'birch_leaves': {'h':114, 's':64, 'l':22 },
'birch_log': {'h':35, 's':93, 'l':30 },
'blue_orchid': {'h':0, 's':0, 'l':100},
'bookshelf': {'h':0, 's':0, 'l':100},
'brown_mushroom': {'h':0, 's':0, 'l':100},
'brown_mushroom_block': {'h':0, 's':0, 'l':100},
'cactus': {'h':126, 's':61, 'l':20 },
'cave_air': {'h':0, 's':0, 'l':0 },
'chest': {'h':0, 's':100, 'l':50 },
'clay': {'h':7, 's':62, 'l':23 },
'coal_ore': {'h':0, 's':0, 'l':10 },
'cobblestone': {'h':0, 's':0, 'l':25 },
'cobblestone_stairs': {'h':0, 's':0, 'l':25 },
'crafting_table': {'h':0, 's':0, 'l':100},
'dandelion': {'h':60, 's':100, 'l':60 },
'dark_oak_leaves': {'h':114, 's':64, 'l':22 },
'dark_oak_log': {'h':35, 's':93, 'l':30 },
'dark_oak_planks': {'h':35, 's':93, 'l':30 },
'dead_bush': {'h':0, 's':0, 'l':100},
'diorite': {'h':0, 's':0, 'l':32 },
'dirt': {'h':27, 's':51, 'l':15 },
'end_portal_frame': {'h':0, 's':100, 'l':50 },
'farmland': {'h':35, 's':93, 'l':15 },
'fire': {'h':55, 's':100, 'l':50 },
'flowing_lava': {'h':16, 's':100, 'l':48 },
'flowing_water': {'h':228, 's':50, 'l':23 },
'glass_pane': {'h':0, 's':0, 'l':100},
'granite': {'h':0, 's':0, 'l':32 },
'grass': {'h':94, 's':42, 'l':25 },
'grass_block': {'h':94, 's':42, 'l':32 },
'gravel': {'h':21, 's':18, 'l':20 },
'ice': {'h':240, 's':10, 'l':95 },
'infested_stone': {'h':320, 's':100, 'l':50 },
'iron_ore': {'h':22, 's':65, 'l':61 },
'iron_bars': {'h':22, 's':65, 'l':61 },
'ladder': {'h':35, 's':93, 'l':30 },
'lava': {'h':16, 's':100, 'l':48 },
'lilac': {'h':0, 's':0, 'l':100},
'lily_pad': {'h':114, 's':64, 'l':18 },
'lit_pumpkin': {'h':24, 's':100, 'l':45 },
'mossy_cobblestone': {'h':115, 's':30, 'l':50 },
'mushroom_stem': {'h':0, 's':0, 'l':100},
'oak_door': {'h':35, 's':93, 'l':30 },
'oak_fence': {'h':35, 's':93, 'l':30 },
'oak_fence_gate': {'h':35, 's':93, 'l':30 },
'oak_leaves': {'h':114, 's':64, 'l':22 },
'oak_log': {'h':35, 's':93, 'l':30 },
'oak_planks': {'h':35, 's':93, 'l':30 },
'oak_pressure_plate': {'h':35, 's':93, 'l':30 },
'oak_stairs': {'h':114, 's':64, 'l':22 },
'peony': {'h':0, 's':0, 'l':100},
'pink_tulip': {'h':0, 's':0, 'l':0 },
'poppy': {'h':0, 's':100, 'l':50 },
'pumpkin': {'h':24, 's':100, 'l':45 },
'rail': {'h':33, 's':81, 'l':50 },
'red_mushroom': {'h':0, 's':50, 'l':20 },
'red_mushroom_block': {'h':0, 's':50, 'l':20 },
'rose_bush': {'h':0, 's':0, 'l':100},
'sugar_cane': {'h':123, 's':70, 'l':50 },
'sand': {'h':53, 's':22, 'l':58 },
'sandstone': {'h':48, 's':31, 'l':40 },
'seagrass': {'h':94, 's':42, 'l':25 },
'sign': {'h':114, 's':64, 'l':22 },
'spruce_leaves': {'h':114, 's':64, 'l':22 },
'spruce_log': {'h':35, 's':93, 'l':30 },
'stone': {'h':0, 's':0, 'l':32 },
'stone_slab': {'h':0, 's':0, 'l':32 },
'tall_grass': {'h':94, 's':42, 'l':25 },
'tall_seagrass': {'h':94, 's':42, 'l':25 },
'torch': {'h':60, 's':100, 'l':50 },
'snow': {'h':240, 's':10, 'l':85 },
'spawner': {'h':180, 's':100, 'l':50 },
'vine': {'h':114, 's':64, 'l':18 },
'wall_torch': {'h':60, 's':100, 'l':50 },
'water': {'h':228, 's':50, 'l':23 },
'wheat': {'h':123, 's':60, 'l':50 },
'white_wool': {'h':0, 's':0, 'l':100},
}
def get_map(chunk):
# Show an image of the chunk from above
pixels = b""
for z in range(16):
for x in range(16):
# Find the highest block in this column
max_height = chunk.get_max_height()
ground_height = max_height
tints = []
for y in range(max_height,-1,-1):
block_id = chunk.get_block(x, y, z)
if block_id != None:
#block_data = 0 # TODO: use block properties
#if (block_id == 'water' or block_id == 'water'):
#tints.append({'h':228, 's':50, 'l':23}) # Water
#elif (block_id == 'leaves'): # TODO: old id - update
#if (block_data == 1):
#tints.append({'h':114, 's':64, 'l':22}) # Redwood Leaves
#elif (block_data == 2):
#tints.append({'h':93, 's':39, 'l':10}) # Birch Leaves
#else:
#tints.append({'h':114, 's':64, 'l':22}) # Normal Leaves
#elif (block_id == 'ice'):
#tints.append({'h':240, 's':5, 'l':95}) # Ice
#elif (block_id == 'fire'):
#tints.append({'h':55, 's':100, 'l':50}) # Fire
#elif (block_id != 'air' or block_id != 'cave_air' or y == 0):
if (block_id not in block_ignore or y == 0):
# Here is ground level
ground_height = y
break
if block_id != None:
if block_id in block_colors:
color = block_colors[block_id]
else:
color = {'h':0, 's':0, 'l':100}
print("warning: unknown color for block id: %s" % block_id)
print("hint: add that block to the 'block_colors' map")
else:
color = {'h':0, 's':0, 'l':0}
height_shift = 0 #(ground_height-64)*0.25
final_color = {'h':color['h'], 's':color['s'], 'l':color['l'] + height_shift}
if final_color['l'] > 100: final_color['l'] = 100
if final_color['l'] < 0: final_color['l'] = 0
# Apply tints from translucent blocks
for tint in reversed(tints):
final_color = hsl_slide(final_color, tint, 0.4)
rgb = hsl2rgb(final_color['h'], final_color['s'], final_color['l'])
pixels += pack("BBB", rgb[0], rgb[1], rgb[2])
im = Image.frombytes('RGB', (16,16), pixels)
return im
## Color functions for map generation ##
# Hue given in degrees,
# saturation and lightness given either in range 0-1 or 0-100 and returned in kind
def hsl_slide(hsl1, hsl2, ratio):
if (abs(hsl2['h'] - hsl1['h']) > 180):
if (hsl1['h'] > hsl2['h']):
hsl1['h'] -= 360
else:
hsl1['h'] += 360
# Find location of two colors on the H/S color circle
p1x = math.cos(math.radians(hsl1['h']))*hsl1['s']
p1y = math.sin(math.radians(hsl1['h']))*hsl1['s']
p2x = math.cos(math.radians(hsl2['h']))*hsl2['s']
p2y = math.sin(math.radians(hsl2['h']))*hsl2['s']
# Slide part of the way from tint to base color
avg_x = p1x + ratio*(p2x-p1x)
avg_y = p1y + ratio*(p2y-p1y)
avg_h = math.atan(avg_y/avg_x)
avg_s = avg_y/math.sin(avg_h)
avg_l = hsl1['l'] + ratio*(hsl2['l']-hsl1['l'])
avg_h = math.degrees(avg_h)
#print('tint: %s base: %s avg: %s %s %s' % (tint,final_color,avg_h,avg_s,avg_l))
return {'h':avg_h, 's':avg_s, 'l':avg_l}
# From http://www.easyrgb.com/index.php?X=MATH&H=19#text19
def hsl2rgb(H,S,L):
H = H/360.0
S = S/100.0 # Turn into a percentage
L = L/100.0
if (S == 0):
return (int(L*255), int(L*255), int(L*255))
var_2 = L * (1+S) if (L < 0.5) else (L+S) - (S*L)
var_1 = 2*L - var_2
def hue2rgb(v1, v2, vH):
if (vH < 0): vH += 1
if (vH > 1): vH -= 1
if ((6*vH)<1): return v1 + (v2-v1)*6*vH
if ((2*vH)<1): return v2
if ((3*vH)<2): return v1 + (v2-v1)*(2/3.0-vH)*6
return v1
R = int(255*hue2rgb(var_1, var_2, H + (1.0/3)))
G = int(255*hue2rgb(var_1, var_2, H))
B = int(255*hue2rgb(var_1, var_2, H - (1.0/3)))
return (R,G,B)
def main(world_folder, show=True):
world = WorldFolder(world_folder)
bb = world.get_boundingbox()
world_map = Image.new('RGB', (16*bb.lenx(),16*bb.lenz()))
t = world.chunk_count()
try:
i =0.0
for chunk in world.iter_chunks():
if i % 50 ==0:
sys.stdout.write("Rendering image")
elif i % 2 == 0:
sys.stdout.write(".")
sys.stdout.flush()
elif i % 50 == 49:
sys.stdout.write("%5.1f%%\n" % (100*i/t))
i +=1
chunkmap = get_map(chunk)
x,z = chunk.get_coords()
world_map.paste(chunkmap, (16*(x-bb.minx),16*(z-bb.minz)))
print(" done\n")
filename = os.path.basename(world_folder)+".png"
world_map.save(filename,"PNG")
print("Saved map as %s" % filename)
except KeyboardInterrupt:
print(" aborted\n")
filename = os.path.basename(world_folder)+".partial.png"
world_map.save(filename,"PNG")
print("Saved map as %s" % filename)
return 75 # EX_TEMPFAIL
if show:
world_map.show()
return 0 # NOERR
if __name__ == '__main__':
if (len(sys.argv) == 1):
print("No world folder specified!")
sys.exit(64) # EX_USAGE
if sys.argv[1] == '--noshow' and len(sys.argv) > 2:
show = False
world_folder = sys.argv[2]
else:
show = True
world_folder = sys.argv[1]
# clean path name, eliminate trailing slashes. required for os.path.basename()
world_folder = os.path.normpath(world_folder)
if (not os.path.exists(world_folder)):
print("No such folder as "+world_folder)
sys.exit(72) # EX_IOERR
sys.exit(main(world_folder, show))
|
desktop/core/ext-py/josepy-1.1.0/src/josepy/json_util.py | kokosing/hue | 5,079 | 13360 | """JSON (de)serialization framework.
The framework presented here is somewhat based on `Go's "json" package`_
(especially the ``omitempty`` functionality).
.. _`Go's "json" package`: http://golang.org/pkg/encoding/json/
"""
import abc
import binascii
import logging
import OpenSSL
import six
from josepy import b64, errors, interfaces, util
logger = logging.getLogger(__name__)
class Field(object):
"""JSON object field.
:class:`Field` is meant to be used together with
:class:`JSONObjectWithFields`.
``encoder`` (``decoder``) is a callable that accepts a single
parameter, i.e. a value to be encoded (decoded), and returns the
serialized (deserialized) value. In case of errors it should raise
:class:`~josepy.errors.SerializationError`
(:class:`~josepy.errors.DeserializationError`).
Note, that ``decoder`` should perform partial serialization only.
:ivar str json_name: Name of the field when encoded to JSON.
:ivar default: Default value (used when not present in JSON object).
:ivar bool omitempty: If ``True`` and the field value is empty, then
it will not be included in the serialized JSON object, and
``default`` will be used for deserialization. Otherwise, if ``False``,
field is considered as required, value will always be included in the
serialized JSON objected, and it must also be present when
deserializing.
"""
__slots__ = ('json_name', 'default', 'omitempty', 'fdec', 'fenc')
def __init__(self, json_name, default=None, omitempty=False,
decoder=None, encoder=None):
# pylint: disable=too-many-arguments
self.json_name = json_name
self.default = default
self.omitempty = omitempty
self.fdec = self.default_decoder if decoder is None else decoder
self.fenc = self.default_encoder if encoder is None else encoder
@classmethod
def _empty(cls, value):
"""Is the provided value considered "empty" for this field?
This is useful for subclasses that might want to override the
definition of being empty, e.g. for some more exotic data types.
"""
return not isinstance(value, bool) and not value
def omit(self, value):
"""Omit the value in output?"""
return self._empty(value) and self.omitempty
def _update_params(self, **kwargs):
current = dict(json_name=self.json_name, default=self.default,
omitempty=self.omitempty,
decoder=self.fdec, encoder=self.fenc)
current.update(kwargs)
return type(self)(**current) # pylint: disable=star-args
def decoder(self, fdec):
"""Descriptor to change the decoder on JSON object field."""
return self._update_params(decoder=fdec)
def encoder(self, fenc):
"""Descriptor to change the encoder on JSON object field."""
return self._update_params(encoder=fenc)
def decode(self, value):
"""Decode a value, optionally with context JSON object."""
return self.fdec(value)
def encode(self, value):
"""Encode a value, optionally with context JSON object."""
return self.fenc(value)
@classmethod
def default_decoder(cls, value):
"""Default decoder.
Recursively deserialize into immutable types (
:class:`josepy.util.frozendict` instead of
:func:`dict`, :func:`tuple` instead of :func:`list`).
"""
# bases cases for different types returned by json.loads
if isinstance(value, list):
return tuple(cls.default_decoder(subvalue) for subvalue in value)
elif isinstance(value, dict):
return util.frozendict(
dict((cls.default_decoder(key), cls.default_decoder(value))
for key, value in six.iteritems(value)))
else: # integer or string
return value
@classmethod
def default_encoder(cls, value):
"""Default (passthrough) encoder."""
# field.to_partial_json() is no good as encoder has to do partial
# serialization only
return value
class JSONObjectWithFieldsMeta(abc.ABCMeta):
"""Metaclass for :class:`JSONObjectWithFields` and its subclasses.
It makes sure that, for any class ``cls`` with ``__metaclass__``
set to ``JSONObjectWithFieldsMeta``:
1. All fields (attributes of type :class:`Field`) in the class
definition are moved to the ``cls._fields`` dictionary, where
keys are field attribute names and values are fields themselves.
2. ``cls.__slots__`` is extended by all field attribute names
(i.e. not :attr:`Field.json_name`). Original ``cls.__slots__``
are stored in ``cls._orig_slots``.
In a consequence, for a field attribute name ``some_field``,
``cls.some_field`` will be a slot descriptor and not an instance
of :class:`Field`. For example::
some_field = Field('someField', default=())
class Foo(object):
__metaclass__ = JSONObjectWithFieldsMeta
__slots__ = ('baz',)
some_field = some_field
assert Foo.__slots__ == ('some_field', 'baz')
assert Foo._orig_slots == ()
assert Foo.some_field is not Field
assert Foo._fields.keys() == ['some_field']
assert Foo._fields['some_field'] is some_field
As an implementation note, this metaclass inherits from
:class:`abc.ABCMeta` (and not the usual :class:`type`) to mitigate
the metaclass conflict (:class:`ImmutableMap` and
:class:`JSONDeSerializable`, parents of :class:`JSONObjectWithFields`,
use :class:`abc.ABCMeta` as its metaclass).
"""
def __new__(mcs, name, bases, dikt):
fields = {}
for base in bases:
fields.update(getattr(base, '_fields', {}))
# Do not reorder, this class might override fields from base classes!
for key, value in tuple(six.iteritems(dikt)):
# not six.iterkeys() (in-place edit!)
if isinstance(value, Field):
fields[key] = dikt.pop(key)
dikt['_orig_slots'] = dikt.get('__slots__', ())
dikt['__slots__'] = tuple(
list(dikt['_orig_slots']) + list(six.iterkeys(fields)))
dikt['_fields'] = fields
return abc.ABCMeta.__new__(mcs, name, bases, dikt)
@six.add_metaclass(JSONObjectWithFieldsMeta)
class JSONObjectWithFields(util.ImmutableMap, interfaces.JSONDeSerializable):
# pylint: disable=too-few-public-methods
"""JSON object with fields.
Example::
class Foo(JSONObjectWithFields):
bar = Field('Bar')
empty = Field('Empty', omitempty=True)
@bar.encoder
def bar(value):
return value + 'bar'
@bar.decoder
def bar(value):
if not value.endswith('bar'):
raise errors.DeserializationError('No bar suffix!')
return value[:-3]
assert Foo(bar='baz').to_partial_json() == {'Bar': 'bazbar'}
assert Foo.from_json({'Bar': 'bazbar'}) == Foo(bar='baz')
assert (Foo.from_json({'Bar': 'bazbar', 'Empty': '!'})
== Foo(bar='baz', empty='!'))
assert Foo(bar='baz').bar == 'baz'
"""
@classmethod
def _defaults(cls):
"""Get default fields values."""
return dict([(slot, field.default) for slot, field
in six.iteritems(cls._fields)])
def __init__(self, **kwargs):
# pylint: disable=star-args
super(JSONObjectWithFields, self).__init__(
**(dict(self._defaults(), **kwargs)))
def encode(self, name):
"""Encode a single field.
:param str name: Name of the field to be encoded.
:raises errors.SerializationError: if field cannot be serialized
:raises errors.Error: if field could not be found
"""
try:
field = self._fields[name]
except KeyError:
raise errors.Error("Field not found: {0}".format(name))
return field.encode(getattr(self, name))
def fields_to_partial_json(self):
"""Serialize fields to JSON."""
jobj = {}
omitted = set()
for slot, field in six.iteritems(self._fields):
value = getattr(self, slot)
if field.omit(value):
omitted.add((slot, value))
else:
try:
jobj[field.json_name] = field.encode(value)
except errors.SerializationError as error:
raise errors.SerializationError(
'Could not encode {0} ({1}): {2}'.format(
slot, value, error))
return jobj
def to_partial_json(self):
return self.fields_to_partial_json()
@classmethod
def _check_required(cls, jobj):
missing = set()
for _, field in six.iteritems(cls._fields):
if not field.omitempty and field.json_name not in jobj:
missing.add(field.json_name)
if missing:
raise errors.DeserializationError(
'The following fields are required: {0}'.format(
','.join(missing)))
@classmethod
def fields_from_json(cls, jobj):
"""Deserialize fields from JSON."""
cls._check_required(jobj)
fields = {}
for slot, field in six.iteritems(cls._fields):
if field.json_name not in jobj and field.omitempty:
fields[slot] = field.default
else:
value = jobj[field.json_name]
try:
fields[slot] = field.decode(value)
except errors.DeserializationError as error:
raise errors.DeserializationError(
'Could not decode {0!r} ({1!r}): {2}'.format(
slot, value, error))
return fields
@classmethod
def from_json(cls, jobj):
return cls(**cls.fields_from_json(jobj))
def encode_b64jose(data):
"""Encode JOSE Base-64 field.
:param bytes data:
:rtype: `unicode`
"""
# b64encode produces ASCII characters only
return b64.b64encode(data).decode('ascii')
def decode_b64jose(data, size=None, minimum=False):
"""Decode JOSE Base-64 field.
:param unicode data:
:param int size: Required length (after decoding).
:param bool minimum: If ``True``, then `size` will be treated as
minimum required length, as opposed to exact equality.
:rtype: bytes
"""
error_cls = TypeError if six.PY2 else binascii.Error
try:
decoded = b64.b64decode(data.encode())
except error_cls as error:
raise errors.DeserializationError(error)
if size is not None and ((not minimum and len(decoded) != size) or
(minimum and len(decoded) < size)):
raise errors.DeserializationError(
"Expected at least or exactly {0} bytes".format(size))
return decoded
def encode_hex16(value):
"""Hexlify.
:param bytes value:
:rtype: unicode
"""
return binascii.hexlify(value).decode()
def decode_hex16(value, size=None, minimum=False):
"""Decode hexlified field.
:param unicode value:
:param int size: Required length (after decoding).
:param bool minimum: If ``True``, then `size` will be treated as
minimum required length, as opposed to exact equality.
:rtype: bytes
"""
value = value.encode()
if size is not None and ((not minimum and len(value) != size * 2) or
(minimum and len(value) < size * 2)):
raise errors.DeserializationError()
error_cls = TypeError if six.PY2 else binascii.Error
try:
return binascii.unhexlify(value)
except error_cls as error:
raise errors.DeserializationError(error)
def encode_cert(cert):
"""Encode certificate as JOSE Base-64 DER.
:type cert: `OpenSSL.crypto.X509` wrapped in `.ComparableX509`
:rtype: unicode
"""
return encode_b64jose(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1, cert.wrapped))
def decode_cert(b64der):
"""Decode JOSE Base-64 DER-encoded certificate.
:param unicode b64der:
:rtype: `OpenSSL.crypto.X509` wrapped in `.ComparableX509`
"""
try:
return util.ComparableX509(OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, decode_b64jose(b64der)))
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
def encode_csr(csr):
"""Encode CSR as JOSE Base-64 DER.
:type csr: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
:rtype: unicode
"""
return encode_b64jose(OpenSSL.crypto.dump_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.wrapped))
def decode_csr(b64der):
"""Decode JOSE Base-64 DER-encoded CSR.
:param unicode b64der:
:rtype: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
"""
try:
return util.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, decode_b64jose(b64der)))
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
class TypedJSONObjectWithFields(JSONObjectWithFields):
"""JSON object with type."""
typ = NotImplemented
"""Type of the object. Subclasses must override."""
type_field_name = "type"
"""Field name used to distinguish different object types.
Subclasses will probably have to override this.
"""
TYPES = NotImplemented
"""Types registered for JSON deserialization"""
@classmethod
def register(cls, type_cls, typ=None):
"""Register class for JSON deserialization."""
typ = type_cls.typ if typ is None else typ
cls.TYPES[typ] = type_cls
return type_cls
@classmethod
def get_type_cls(cls, jobj):
"""Get the registered class for ``jobj``."""
if cls in six.itervalues(cls.TYPES):
if cls.type_field_name not in jobj:
raise errors.DeserializationError(
"Missing type field ({0})".format(cls.type_field_name))
# cls is already registered type_cls, force to use it
# so that, e.g Revocation.from_json(jobj) fails if
# jobj["type"] != "revocation".
return cls
if not isinstance(jobj, dict):
raise errors.DeserializationError(
"{0} is not a dictionary object".format(jobj))
try:
typ = jobj[cls.type_field_name]
except KeyError:
raise errors.DeserializationError("missing type field")
try:
return cls.TYPES[typ]
except KeyError:
raise errors.UnrecognizedTypeError(typ, jobj)
def to_partial_json(self):
"""Get JSON serializable object.
:returns: Serializable JSON object representing ACME typed object.
:meth:`validate` will almost certainly not work, due to reasons
explained in :class:`josepy.interfaces.IJSONSerializable`.
:rtype: dict
"""
jobj = self.fields_to_partial_json()
jobj[self.type_field_name] = self.typ
return jobj
@classmethod
def from_json(cls, jobj):
"""Deserialize ACME object from valid JSON object.
:raises josepy.errors.UnrecognizedTypeError: if type
of the ACME object has not been registered.
"""
# make sure subclasses don't cause infinite recursive from_json calls
type_cls = cls.get_type_cls(jobj)
return type_cls(**type_cls.fields_from_json(jobj))
|
src/triage/component/results_schema/alembic/versions/5dd2ba8222b1_add_run_type.py | josephbajor/triage_NN | 160 | 13406 | """add run_type
Revision ID: 5dd2ba8222b1
Revises: 079a74c15e8b
Create Date: 2021-07-22 23:53:04.043651
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '5dd2ba8222b1'
down_revision = '079a74c15e8b'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('experiment_runs', sa.Column('run_type', sa.Text(), nullable=True), schema='triage_metadata')
op.execute("UPDATE triage_metadata.experiment_runs SET run_type='experiment' WHERE run_type IS NULL")
op.alter_column('experiment_runs', 'experiment_hash', nullable=True, new_column_name='run_hash', schema='triage_metadata')
op.drop_constraint('experiment_runs_experiment_hash_fkey', 'experiment_runs', type_='foreignkey', schema='triage_metadata')
op.execute("ALTER TABLE triage_metadata.experiment_runs RENAME TO triage_runs")
op.create_table('retrain',
sa.Column('retrain_hash', sa.Text(), nullable=False),
sa.Column('config', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column('prediction_date', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('retrain_hash'),
schema='triage_metadata',
)
op.alter_column('models', 'built_in_experiment_run', nullable=False, new_column_name='built_in_triage_run', schema='triage_metadata')
op.execute("CREATE TABLE triage_metadata.deprecated_models_built_by_experiment AS SELECT model_id, model_hash, built_by_experiment FROM triage_metadata.models")
op.drop_column('models', 'built_by_experiment', schema='triage_metadata')
op.create_table('retrain_models',
sa.Column('retrain_hash', sa.String(), nullable=False),
sa.Column('model_hash', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['retrain_hash'], ['triage_metadata.retrain.retrain_hash'], ),
sa.PrimaryKeyConstraint('retrain_hash', 'model_hash'),
schema='triage_metadata'
)
def downgrade():
op.execute("ALTER TABLE triage_metadata.triage_runs RENAME TO experiment_runs")
op.drop_column('experiment_runs', 'run_type', schema='triage_metadata')
op.alter_column('experiment_runs', 'run_hash', nullable=True, new_column_name='experiment_hash', schema='triage_metadata')
op.create_foreign_key('experiment_runs_experiment_hash_fkey', 'experiment_runs', 'experiments', ['experiment_hash'], ['experiment_hash'], source_schema='triage_metadata', referent_schema='triage_metadata')
op.drop_table('retrain_models', schema='triage_metadata')
op.drop_table('retrain', schema='triage_metadata')
op.add_column('models', sa.Column('built_by_experiment', sa.Text(), nullable=True), schema='triage_metadata')
op.alter_column('models', 'built_in_triage_run', nullable=False, new_column_name='built_in_experiment_run', schema='triage_metadata')
|
projects/PanopticFCN_cityscapes/panopticfcn/__init__.py | fatihyildiz-cs/detectron2 | 166 | 13407 | from .config import add_panopticfcn_config
from .panoptic_seg import PanopticFCN
from .build_solver import build_lr_scheduler
|
txdav/common/datastore/upgrade/test/test_migrate.py | backwardn/ccs-calendarserver | 462 | 13409 | <filename>txdav/common/datastore/upgrade/test/test_migrate.py
##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for L{txdav.common.datastore.upgrade.migrate}.
"""
from twext.enterprise.adbapi2 import Pickle
from twext.enterprise.dal.syntax import Delete
from twext.python.filepath import CachingFilePath
from txweb2.http_headers import MimeType
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
from twisted.internet.protocol import Protocol
from twisted.protocols.amp import AMP, Command, String
from twisted.python.modules import getModule
from twisted.python.reflect import qual, namedAny
from twisted.trial.unittest import TestCase
from twistedcaldav import customxml, caldavxml
from twistedcaldav.config import config
from twistedcaldav.ical import Component
from txdav.base.propertystore.base import PropertyName
from txdav.caldav.datastore.test.common import CommonTests
from txdav.carddav.datastore.test.common import CommonTests as ABCommonTests
from txdav.common.datastore.file import CommonDataStore
from txdav.common.datastore.sql_tables import schema
from txdav.common.datastore.test.util import SQLStoreBuilder
from txdav.common.datastore.test.util import (
populateCalendarsFrom, StubNotifierFactory, resetCalendarMD5s,
populateAddressBooksFrom, resetAddressBookMD5s, deriveValue,
withSpecialValue, CommonCommonTests
)
from txdav.common.datastore.upgrade.migrate import UpgradeToDatabaseStep, \
StoreSpawnerService, swapAMP
from txdav.xml import element
import copy
class CreateStore(Command):
"""
Create a store in a subprocess.
"""
arguments = [('delegateTo', String())]
class PickleConfig(Command):
"""
Unpickle some configuration in a subprocess.
"""
arguments = [('delegateTo', String()),
('config', Pickle())]
class StoreCreator(AMP):
"""
Helper protocol.
"""
@CreateStore.responder
def createStore(self, delegateTo):
"""
Create a store and pass it to the named delegate class.
"""
swapAMP(self, namedAny(delegateTo)(SQLStoreBuilder.childStore()))
return {}
@PickleConfig.responder
def pickleConfig(self, config, delegateTo):
# from twistedcaldav.config import config as globalConfig
# globalConfig._data = config._data
swapAMP(self, namedAny(delegateTo)(config))
return {}
class StubSpawner(StoreSpawnerService):
"""
Stub spawner service which populates the store forcibly.
"""
def __init__(self, config=None):
super(StubSpawner, self).__init__()
self.config = config
@inlineCallbacks
def spawnWithStore(self, here, there):
"""
'here' and 'there' are the helper protocols 'there' will expect to be
created with an instance of a store.
"""
master = yield self.spawn(AMP(), StoreCreator)
yield master.callRemote(CreateStore, delegateTo=qual(there))
returnValue(swapAMP(master, here))
@inlineCallbacks
def spawnWithConfig(self, config, here, there):
"""
Similar to spawnWithStore except the child process gets a configuration
object instead.
"""
master = yield self.spawn(AMP(), StoreCreator)
subcfg = copy.deepcopy(self.config)
del subcfg._postUpdateHooks[:]
yield master.callRemote(PickleConfig, config=subcfg,
delegateTo=qual(there))
returnValue(swapAMP(master, here))
class HomeMigrationTests(CommonCommonTests, TestCase):
"""
Tests for L{UpgradeToDatabaseStep}.
"""
av1 = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//calendarserver.org//Zonal//EN
BEGIN:VAVAILABILITY
ORGANIZER:mailto:<EMAIL>
UID:<EMAIL>
DTSTAMP:20061005T133225Z
DTEND:20140101T000000Z
BEGIN:AVAILABLE
UID:<EMAIL>
DTSTAMP:20061005T133225Z
SUMMARY:Monday to Friday from 9:00 to 17:00
DTSTART:20130101T090000Z
DTEND:20130101T170000Z
RRULE:FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
END:AVAILABLE
END:VAVAILABILITY
END:VCALENDAR
""")
@inlineCallbacks
def setUp(self):
"""
Set up two stores to migrate between.
"""
yield super(HomeMigrationTests, self).setUp()
yield self.buildStoreAndDirectory(
extraUids=(
u"home1",
u"home2",
u"home3",
u"home_defaults",
u"home_no_splits",
u"home_splits",
u"home_splits_shared",
)
)
self.sqlStore = self.store
# Add some files to the file store.
self.filesPath = CachingFilePath(self.mktemp())
self.filesPath.createDirectory()
fileStore = self.fileStore = CommonDataStore(
self.filesPath, {"push": StubNotifierFactory()}, self.directory, True, True
)
self.upgrader = UpgradeToDatabaseStep(self.fileStore, self.sqlStore)
requirements = CommonTests.requirements
extras = deriveValue(self, "extraRequirements", lambda t: {})
requirements = self.mergeRequirements(requirements, extras)
yield populateCalendarsFrom(requirements, fileStore)
md5s = CommonTests.md5s
yield resetCalendarMD5s(md5s, fileStore)
self.filesPath.child("calendars").child(
"__uids__").child("ho").child("me").child("home1").child(
".some-extra-data").setContent("some extra data")
requirements = ABCommonTests.requirements
yield populateAddressBooksFrom(requirements, fileStore)
md5s = ABCommonTests.md5s
yield resetAddressBookMD5s(md5s, fileStore)
self.filesPath.child("addressbooks").child(
"__uids__").child("ho").child("me").child("home1").child(
".some-extra-data").setContent("some extra data")
# Add some properties we want to check get migrated over
txn = self.fileStore.newTransaction()
home = yield txn.calendarHomeWithUID("home_defaults")
cal = yield home.calendarWithName("calendar_1")
props = cal.properties()
props[PropertyName.fromElement(caldavxml.SupportedCalendarComponentSet)] = caldavxml.SupportedCalendarComponentSet(
caldavxml.CalendarComponent(name="VEVENT"),
caldavxml.CalendarComponent(name="VTODO"),
)
props[PropertyName.fromElement(element.ResourceType)] = element.ResourceType(
element.Collection(),
caldavxml.Calendar(),
)
props[PropertyName.fromElement(customxml.GETCTag)] = customxml.GETCTag.fromString("foobar")
inbox = yield home.calendarWithName("inbox")
props = inbox.properties()
props[PropertyName.fromElement(customxml.CalendarAvailability)] = customxml.CalendarAvailability.fromString(str(self.av1))
props[PropertyName.fromElement(caldavxml.ScheduleDefaultCalendarURL)] = caldavxml.ScheduleDefaultCalendarURL(
element.HRef.fromString("/calendars/__uids__/home_defaults/calendar_1"),
)
yield txn.commit()
def mergeRequirements(self, a, b):
"""
Merge two requirements dictionaries together, modifying C{a} and
returning it.
@param a: Some requirements, in the format of
L{CommonTests.requirements}.
@type a: C{dict}
@param b: Some additional requirements, to be merged into C{a}.
@type b: C{dict}
@return: C{a}
@rtype: C{dict}
"""
for homeUID in b:
homereq = a.setdefault(homeUID, {})
homeExtras = b[homeUID]
for calendarUID in homeExtras:
calreq = homereq.setdefault(calendarUID, {})
calendarExtras = homeExtras[calendarUID]
calreq.update(calendarExtras)
return a
@withSpecialValue(
"extraRequirements",
{
"home1": {
"calendar_1": {
"bogus.ics": (
getModule("twistedcaldav").filePath.sibling("zoneinfo")
.child("EST.ics").getContent(),
CommonTests.metadata1
)
}
}
}
)
@inlineCallbacks
def test_unknownTypeNotMigrated(self):
"""
The only types of calendar objects that should get migrated are VEVENTs
and VTODOs. Other component types, such as free-standing VTIMEZONEs,
don't have a UID and can't be stored properly in the database, so they
should not be migrated.
"""
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
self.assertIdentical(
None,
(yield (yield (yield (
yield txn.calendarHomeWithUID("home1")
).calendarWithName("calendar_1"))
).calendarObjectWithName("bogus.ics"))
)
@inlineCallbacks
def test_upgradeCalendarHomes(self):
"""
L{UpgradeToDatabaseService.startService} will do the upgrade, then
start its dependent service by adding it to its service hierarchy.
"""
# Create a fake directory in the same place as a home, but with a non-existent uid
fake_dir = self.filesPath.child("calendars").child("__uids__").child("ho").child("me").child("foobar")
fake_dir.makedirs()
# Create a fake file in the same place as a home,with a name that matches the hash uid prefix
fake_file = self.filesPath.child("calendars").child("__uids__").child("ho").child("me").child("home_file")
fake_file.setContent("")
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
for uid in CommonTests.requirements:
if CommonTests.requirements[uid] is not None:
self.assertNotIdentical(
None, (yield txn.calendarHomeWithUID(uid))
)
# Successfully migrated calendar homes are deleted
self.assertFalse(self.filesPath.child("calendars").child(
"__uids__").child("ho").child("me").child("home1").exists())
# Want metadata preserved
home = (yield txn.calendarHomeWithUID("home1"))
calendar = (yield home.calendarWithName("calendar_1"))
for name, metadata, md5 in (
("1.ics", CommonTests.metadata1, CommonTests.md5Values[0]),
("2.ics", CommonTests.metadata2, CommonTests.md5Values[1]),
("3.ics", CommonTests.metadata3, CommonTests.md5Values[2]),
):
object = (yield calendar.calendarObjectWithName(name))
self.assertEquals(object.getMetadata(), metadata)
self.assertEquals(object.md5(), md5)
@withSpecialValue(
"extraRequirements",
{
"nonexistent": {
"calendar_1": {
}
}
}
)
@inlineCallbacks
def test_upgradeCalendarHomesMissingDirectoryRecord(self):
"""
Test an upgrade where a directory record is missing for a home;
the original home directory will remain on disk.
"""
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
for uid in CommonTests.requirements:
if CommonTests.requirements[uid] is not None:
self.assertNotIdentical(
None, (yield txn.calendarHomeWithUID(uid))
)
self.assertIdentical(None, (yield txn.calendarHomeWithUID(u"nonexistent")))
# Skipped calendar homes are not deleted
self.assertTrue(self.filesPath.child("calendars").child(
"__uids__").child("no").child("ne").child("nonexistent").exists())
@inlineCallbacks
def test_upgradeExistingHome(self):
"""
L{UpgradeToDatabaseService.startService} will skip migrating existing
homes.
"""
startTxn = self.sqlStore.newTransaction("populate empty sample")
yield startTxn.calendarHomeWithUID("home1", create=True)
yield startTxn.commit()
yield self.upgrader.stepWithResult(None)
vrfyTxn = self.sqlStore.newTransaction("verify sample still empty")
self.addCleanup(vrfyTxn.commit)
home = yield vrfyTxn.calendarHomeWithUID("home1")
# The default calendar is still there.
self.assertNotIdentical(None, (yield home.calendarWithName("calendar")))
# The migrated calendar isn't.
self.assertIdentical(None, (yield home.calendarWithName("calendar_1")))
@inlineCallbacks
def test_upgradeAttachments(self):
"""
L{UpgradeToDatabaseService.startService} upgrades calendar attachments
as well.
"""
# Need to tweak config and settings to setup dropbox to work
self.patch(config, "EnableDropBox", True)
self.patch(config, "EnableManagedAttachments", False)
self.sqlStore.enableManagedAttachments = False
txn = self.sqlStore.newTransaction()
cs = schema.CALENDARSERVER
yield Delete(
From=cs,
Where=cs.NAME == "MANAGED-ATTACHMENTS"
).on(txn)
yield txn.commit()
txn = self.fileStore.newTransaction()
committed = []
def maybeCommit():
if not committed:
committed.append(True)
return txn.commit()
self.addCleanup(maybeCommit)
@inlineCallbacks
def getSampleObj():
home = (yield txn.calendarHomeWithUID("home1"))
calendar = (yield home.calendarWithName("calendar_1"))
object = (yield calendar.calendarObjectWithName("1.ics"))
returnValue(object)
inObject = yield getSampleObj()
someAttachmentName = "some-attachment"
someAttachmentType = MimeType.fromString("application/x-custom-type")
attachment = yield inObject.createAttachmentWithName(
someAttachmentName,
)
transport = attachment.store(someAttachmentType)
someAttachmentData = "Here is some data for your attachment, enjoy."
transport.write(someAttachmentData)
yield transport.loseConnection()
yield maybeCommit()
yield self.upgrader.stepWithResult(None)
committed = []
txn = self.sqlStore.newTransaction()
outObject = yield getSampleObj()
outAttachment = yield outObject.attachmentWithName(someAttachmentName)
allDone = Deferred()
class SimpleProto(Protocol):
data = ''
def dataReceived(self, data):
self.data += data
def connectionLost(self, reason):
allDone.callback(self.data)
self.assertEquals(outAttachment.contentType(), someAttachmentType)
outAttachment.retrieve(SimpleProto())
allData = yield allDone
self.assertEquals(allData, someAttachmentData)
@inlineCallbacks
def test_upgradeAddressBookHomes(self):
"""
L{UpgradeToDatabaseService.startService} will do the upgrade, then
start its dependent service by adding it to its service hierarchy.
"""
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
for uid in ABCommonTests.requirements:
if ABCommonTests.requirements[uid] is not None:
self.assertNotIdentical(
None, (yield txn.addressbookHomeWithUID(uid))
)
# Successfully migrated addressbook homes are deleted
self.assertFalse(self.filesPath.child("addressbooks").child(
"__uids__").child("ho").child("me").child("home1").exists())
# Want metadata preserved
home = (yield txn.addressbookHomeWithUID("home1"))
adbk = (yield home.addressbookWithName("addressbook"))
for name, md5 in (
("1.vcf", ABCommonTests.md5Values[0]),
("2.vcf", ABCommonTests.md5Values[1]),
("3.vcf", ABCommonTests.md5Values[2]),
):
object = (yield adbk.addressbookObjectWithName(name))
self.assertEquals(object.md5(), md5)
@inlineCallbacks
def test_upgradeProperties(self):
"""
L{UpgradeToDatabaseService.startService} will do the upgrade, then
start its dependent service by adding it to its service hierarchy.
"""
yield self.upgrader.stepWithResult(None)
txn = self.sqlStore.newTransaction()
self.addCleanup(txn.commit)
# Want metadata preserved
home = (yield txn.calendarHomeWithUID("home_defaults"))
cal = (yield home.calendarWithName("calendar_1"))
inbox = (yield home.calendarWithName("inbox"))
# Supported components
self.assertEqual(cal.getSupportedComponents(), "VEVENT")
self.assertTrue(cal.properties().get(PropertyName.fromElement(caldavxml.SupportedCalendarComponentSet)) is None)
# Resource type removed
self.assertTrue(cal.properties().get(PropertyName.fromElement(element.ResourceType)) is None)
# Ctag removed
self.assertTrue(cal.properties().get(PropertyName.fromElement(customxml.GETCTag)) is None)
# Availability
self.assertEquals(str(home.getAvailability()), str(self.av1))
self.assertTrue(inbox.properties().get(PropertyName.fromElement(customxml.CalendarAvailability)) is None)
# Default calendar
self.assertTrue(home.isDefaultCalendar(cal))
self.assertTrue(inbox.properties().get(PropertyName.fromElement(caldavxml.ScheduleDefaultCalendarURL)) is None)
def test_fileStoreFromPath(self):
"""
Verify that fileStoreFromPath() will return a CommonDataStore if
the given path contains either "calendars" or "addressbooks"
sub-directories. Otherwise it returns None
"""
# No child directories
docRootPath = CachingFilePath(self.mktemp())
docRootPath.createDirectory()
step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
self.assertEquals(step, None)
# "calendars" child directory exists
childPath = docRootPath.child("calendars")
childPath.createDirectory()
step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
self.assertTrue(isinstance(step, CommonDataStore))
childPath.remove()
# "addressbooks" child directory exists
childPath = docRootPath.child("addressbooks")
childPath.createDirectory()
step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
self.assertTrue(isinstance(step, CommonDataStore))
childPath.remove()
|
tower_cli/resources/job.py | kedark3/tower-cli | 363 | 13456 | <reponame>kedark3/tower-cli
# Copyright 2015, Ansible, Inc.
# <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
from getpass import getpass
from distutils.version import LooseVersion
import click
from tower_cli import models, get_resource, resources, exceptions as exc
from tower_cli.api import client
from tower_cli.cli import types
from tower_cli.utils import debug, parser
PROMPT_LIST = ['diff_mode', 'limit', 'tags', 'skip_tags', 'job_type', 'verbosity', 'inventory', 'credential']
class Resource(models.ExeResource):
"""A resource for jobs.
This resource has ordinary list and get methods,
but it does not have create or modify.
Instead of being created, a job is launched.
"""
cli_help = 'Launch or monitor jobs.'
endpoint = '/jobs/'
job_template = models.Field(
key='-J',
type=types.Related('job_template'), required=False, display=True
)
job_explanation = models.Field(required=False, display=False, read_only=True)
created = models.Field(required=False, display=True)
status = models.Field(required=False, display=True)
elapsed = models.Field(required=False, display=True, type=float)
@resources.command(
use_fields_as_options=('job_template',)
)
@click.option('--monitor', is_flag=True, default=False,
help='If sent, immediately calls `job monitor` on the newly '
'launched job rather than exiting with a success.')
@click.option('--wait', is_flag=True, default=False,
help='Monitor the status of the job, but do not print '
'while job is in progress.')
@click.option('--timeout', required=False, type=int,
help='If provided with --monitor, this command (not the job)'
' will time out after the given number of seconds. '
'Does nothing if --monitor is not sent.')
@click.option('--no-input', is_flag=True, default=False,
help='Suppress any requests for input.')
@click.option('-e', '--extra-vars', required=False, multiple=True,
help='yaml format text that contains extra variables '
'to pass on. Use @ to get these from a file.')
@click.option('--diff-mode', type=bool, required=False, help='Specify diff mode for job template to run.')
@click.option('--limit', required=False, help='Specify host limit for job template to run.')
@click.option('--tags', required=False, help='Specify tagged actions in the playbook to run.')
@click.option('--skip-tags', required=False, help='Specify tagged actions in the playbook to omit.')
@click.option('--job-type', required=False, type=click.Choice(['run', 'check']),
help='Specify job type for job template to run.')
@click.option('--verbosity', type=int, required=False, help='Specify verbosity of the playbook run.')
@click.option('--inventory', required=False, type=types.Related('inventory'),
help='Specify inventory for job template to run.')
@click.option('--credential', required=False, multiple=True, type=types.Related('credential'),
help='Specify any type of credential(s) for job template to run.')
def launch(self, job_template=None, monitor=False, wait=False,
timeout=None, no_input=True, extra_vars=None, **kwargs):
"""Launch a new job based on a job template.
Creates a new job in Ansible Tower, immediately starts it, and
returns back an ID in order for its status to be monitored.
=====API DOCS=====
Launch a new job based on a job template.
:param job_template: Primary key or name of the job template to launch new job.
:type job_template: str
:param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched job rather
than exiting with a success.
:type monitor: bool
:param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress.
:type wait: bool
:param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number
of seconds.
:type timeout: int
:param no_input: Flag that if set, suppress any requests for input.
:type no_input: bool
:param extra_vars: yaml formatted texts that contains extra variables to pass on.
:type extra_vars: array of strings
:param diff_mode: Specify diff mode for job template to run.
:type diff_mode: bool
:param limit: Specify host limit for job template to run.
:type limit: str
:param tags: Specify tagged actions in the playbook to run.
:type tags: str
:param skip_tags: Specify tagged actions in the playbook to omit.
:type skip_tags: str
:param job_type: Specify job type for job template to run.
:type job_type: str
:param verbosity: Specify verbosity of the playbook run.
:type verbosity: int
:param inventory: Specify machine credential for job template to run.
:type inventory: str
:param credential: Specify machine credential for job template to run.
:type credential: str
:returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent
``wait`` call if ``wait`` flag is on; Result of subsequent ``status`` call if none of
the two flags are on.
:rtype: dict
=====API DOCS=====
"""
# Get the job template from Ansible Tower.
# This is used as the baseline for starting the job.
jt_resource = get_resource('job_template')
jt = jt_resource.get(job_template)
# Update the job data for special treatment of certain fields
# Special case for job tags, historically just called --tags
tags = kwargs.get('tags', None)
data = {}
if tags:
data['job_tags'] = tags
# Special case for cross-version compatibility with credentials
cred_arg = kwargs.pop('credential', ())
if isinstance(cred_arg, (list, tuple)):
credentials = cred_arg
else:
credentials = [cred_arg]
if credentials:
if 'credentials' in jt['related']:
# Has Tower 3.3 / multi-cred support
# combine user-provided credentials with JT credentials
jt_creds = set(
c['id'] for c in jt['summary_fields']['credentials']
)
kwargs['credentials'] = list(set(credentials) | jt_creds)
else:
if len(credentials) > 1:
raise exc.UsageError(
'Providing multiple credentials on launch can only be '
'done with Tower version 3.3 and higher or recent AWX.'
)
kwargs['credential'] = credentials[0]
# Initialize an extra_vars list that starts with the job template
# preferences first, if they exist
extra_vars_list = []
if 'extra_vars' in data and len(data['extra_vars']) > 0:
# But only do this for versions before 2.3
debug.log('Getting version of Tower.', header='details')
r = client.get('/config/')
if LooseVersion(r.json()['version']) < LooseVersion('2.4'):
extra_vars_list = [data['extra_vars']]
# Add the runtime extra_vars to this list
if extra_vars:
extra_vars_list += list(extra_vars) # accept tuples
# If the job template requires prompting for extra variables,
# do so (unless --no-input is set).
if jt.get('ask_variables_on_launch', False) and not no_input \
and not extra_vars:
# If JT extra_vars are JSON, echo them to user as YAML
initial = parser.process_extra_vars(
[jt['extra_vars']], force_json=False
)
initial = '\n'.join((
'# Specify extra variables (if any) here as YAML.',
'# Lines beginning with "#" denote comments.',
initial,
))
extra_vars = click.edit(initial) or ''
if extra_vars != initial:
extra_vars_list = [extra_vars]
# Data is starting out with JT variables, and we only want to
# include extra_vars that come from the algorithm here.
data.pop('extra_vars', None)
# Replace/populate data fields if prompted.
modified = set()
for resource in PROMPT_LIST:
if jt.pop('ask_' + resource + '_on_launch', False) and not no_input:
resource_object = kwargs.get(resource, None)
if type(resource_object) == types.Related:
resource_class = get_resource(resource)
resource_object = resource_class.get(resource).pop('id', None)
if resource_object is None:
debug.log('{0} is asked at launch but not provided'.
format(resource), header='warning')
elif resource != 'tags':
data[resource] = resource_object
modified.add(resource)
# Dump extra_vars into JSON string for launching job
if len(extra_vars_list) > 0:
data['extra_vars'] = parser.process_extra_vars(
extra_vars_list, force_json=True
)
# Create the new job in Ansible Tower.
start_data = {}
endpoint = '/job_templates/%d/launch/' % jt['id']
if 'extra_vars' in data and len(data['extra_vars']) > 0:
start_data['extra_vars'] = data['extra_vars']
if tags:
start_data['job_tags'] = data['job_tags']
for resource in PROMPT_LIST:
if resource in modified:
start_data[resource] = data[resource]
# There's a non-trivial chance that we are going to need some
# additional information to start the job; in particular, many jobs
# rely on passwords entered at run-time.
#
# If there are any such passwords on this job, ask for them now.
debug.log('Asking for information necessary to start the job.',
header='details')
job_start_info = client.get(endpoint).json()
for password in job_start_info.get('passwords_needed_to_start', []):
start_data[password] = getpass('Password for %s: ' % password)
# Actually start the job.
debug.log('Launching the job.', header='details')
self._pop_none(kwargs)
kwargs.update(start_data)
job_started = client.post(endpoint, data=kwargs)
# Get the job ID from the result.
job_id = job_started.json()['id']
# If returning json indicates any ignored fields, display it in
# verbose mode.
if job_started.text == '':
ignored_fields = {}
else:
ignored_fields = job_started.json().get('ignored_fields', {})
has_ignored_fields = False
for key, value in ignored_fields.items():
if value and value != '{}':
if not has_ignored_fields:
debug.log('List of ignored fields on the server side:',
header='detail')
has_ignored_fields = True
debug.log('{0}: {1}'.format(key, value))
# Get some information about the running job to print
result = self.status(pk=job_id, detail=True)
result['changed'] = True
# If we were told to monitor the job once it started, then call
# monitor from here.
if monitor:
return self.monitor(job_id, timeout=timeout)
elif wait:
return self.wait(job_id, timeout=timeout)
return result
|
ares/attack/bim.py | KuanKuanQAQ/ares | 206 | 13464 | <reponame>KuanKuanQAQ/ares
import tensorflow as tf
import numpy as np
from ares.attack.base import BatchAttack
from ares.attack.utils import get_xs_ph, get_ys_ph, maybe_to_array, get_unit
class BIM(BatchAttack):
''' Basic Iterative Method (BIM). A white-box iterative constraint-based method. Require a differentiable loss
function and a ``ares.model.Classifier`` model.
- Supported distance metric: ``l_2``, ``l_inf``.
- Supported goal: ``t``, ``tm``, ``ut``.
- References: https://arxiv.org/abs/1607.02533.
'''
def __init__(self, model, batch_size, loss, goal, distance_metric, session, iteration_callback=None):
''' Initialize BIM.
:param model: The model to attack. A ``ares.model.Classifier`` instance.
:param batch_size: Batch size for the ``batch_attack()`` method.
:param loss: The loss function to optimize. A ``ares.loss.Loss`` instance.
:param goal: Adversarial goals. All supported values are ``'t'``, ``'tm'``, and ``'ut'``.
:param distance_metric: Adversarial distance metric. All supported values are ``'l_2'`` and ``'l_inf'``.
:param session: The ``tf.Session`` to run the attack in. The ``model`` should be loaded into this session.
:param iteration_callback: A function accept a ``xs`` ``tf.Tensor`` (the original examples) and a ``xs_adv``
``tf.Tensor`` (the adversarial examples for ``xs``). During ``batch_attack()``, this callback function would
be runned after each iteration, and its return value would be yielded back to the caller. By default,
``iteration_callback`` is ``None``.
'''
self.model, self.batch_size, self._session = model, batch_size, session
self.loss, self.goal, self.distance_metric = loss, goal, distance_metric
# placeholder for batch_attack's input
self.xs_ph = get_xs_ph(model, batch_size)
self.ys_ph = get_ys_ph(model, batch_size)
# flatten shape of xs_ph
xs_flatten_shape = (batch_size, np.prod(self.model.x_shape))
# store xs and ys in variables to reduce memory copy between tensorflow and python
# variable for the original example with shape of (batch_size, D)
self.xs_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))
# variable for labels
self.ys_var = tf.Variable(tf.zeros(shape=(batch_size,), dtype=self.model.y_dtype))
# variable for the (hopefully) adversarial example with shape of (batch_size, D)
self.xs_adv_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))
# magnitude
self.eps_ph = tf.placeholder(self.model.x_dtype, (self.batch_size,))
self.eps_var = tf.Variable(tf.zeros((self.batch_size,), dtype=self.model.x_dtype))
# step size
self.alpha_ph = tf.placeholder(self.model.x_dtype, (self.batch_size,))
self.alpha_var = tf.Variable(tf.zeros((self.batch_size,), dtype=self.model.x_dtype))
# expand dim for easier broadcast operations
eps = tf.expand_dims(self.eps_var, 1)
alpha = tf.expand_dims(self.alpha_var, 1)
# calculate loss' gradient with relate to the adversarial example
# grad.shape == (batch_size, D)
self.xs_adv_model = tf.reshape(self.xs_adv_var, (batch_size, *self.model.x_shape))
self.loss = loss(self.xs_adv_model, self.ys_var)
grad = tf.gradients(self.loss, self.xs_adv_var)[0]
if goal == 't' or goal == 'tm':
grad = -grad
elif goal != 'ut':
raise NotImplementedError
# update the adversarial example
if distance_metric == 'l_2':
grad_unit = get_unit(grad)
xs_adv_delta = self.xs_adv_var - self.xs_var + alpha * grad_unit
# clip by max l_2 magnitude of adversarial noise
xs_adv_next = self.xs_var + tf.clip_by_norm(xs_adv_delta, eps, axes=[1])
elif distance_metric == 'l_inf':
xs_lo, xs_hi = self.xs_var - eps, self.xs_var + eps
grad_sign = tf.sign(grad)
# clip by max l_inf magnitude of adversarial noise
xs_adv_next = tf.clip_by_value(self.xs_adv_var + alpha * grad_sign, xs_lo, xs_hi)
else:
raise NotImplementedError
# clip by (x_min, x_max)
xs_adv_next = tf.clip_by_value(xs_adv_next, self.model.x_min, self.model.x_max)
self.update_xs_adv_step = self.xs_adv_var.assign(xs_adv_next)
self.config_eps_step = self.eps_var.assign(self.eps_ph)
self.config_alpha_step = self.alpha_var.assign(self.alpha_ph)
self.setup_xs = [self.xs_var.assign(tf.reshape(self.xs_ph, xs_flatten_shape)),
self.xs_adv_var.assign(tf.reshape(self.xs_ph, xs_flatten_shape))]
self.setup_ys = self.ys_var.assign(self.ys_ph)
self.iteration = None
self.iteration_callback = None
if iteration_callback is not None:
xs_model = tf.reshape(self.xs_var, (self.batch_size, *self.model.x_shape))
self.iteration_callback = iteration_callback(xs_model, self.xs_adv_model)
def config(self, **kwargs):
''' (Re)config the attack.
:param magnitude: Max distortion, could be either a float number or a numpy float number array with shape of
(batch_size,).
:param alpha: Step size for each iteration, could be either a float number or a numpy float number array with
shape of (batch_size,).
:param iteration: Iteration count. An integer.
'''
if 'magnitude' in kwargs:
eps = maybe_to_array(kwargs['magnitude'], self.batch_size)
self._session.run(self.config_eps_step, feed_dict={self.eps_ph: eps})
if 'alpha' in kwargs:
alpha = maybe_to_array(kwargs['alpha'], self.batch_size)
self._session.run(self.config_alpha_step, feed_dict={self.alpha_ph: alpha})
if 'iteration' in kwargs:
self.iteration = kwargs['iteration']
def _batch_attack_generator(self, xs, ys, ys_target):
''' Attack a batch of examples. It is a generator which yields back ``iteration_callback()``'s return value
after each iteration if the ``iteration_callback`` is not ``None``, and returns the adversarial examples.
'''
labels = ys if self.goal == 'ut' else ys_target
self._session.run(self.setup_xs, feed_dict={self.xs_ph: xs})
self._session.run(self.setup_ys, feed_dict={self.ys_ph: labels})
for _ in range(self.iteration):
self._session.run(self.update_xs_adv_step)
if self.iteration_callback is not None:
yield self._session.run(self.iteration_callback)
return self._session.run(self.xs_adv_model)
def batch_attack(self, xs, ys=None, ys_target=None):
''' Attack a batch of examples.
:return: When the ``iteration_callback`` is ``None``, return the generated adversarial examples. When the
``iteration_callback`` is not ``None``, return a generator, which yields back the callback's return value
after each iteration and returns the generated adversarial examples.
'''
g = self._batch_attack_generator(xs, ys, ys_target)
if self.iteration_callback is None:
try:
next(g)
except StopIteration as exp:
return exp.value
else:
return g
|
test/test_oneview_hypervisor_cluster_profile_facts.py | nabhajit-ray/oneview-ansible | 108 | 13467 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import pytest
import mock
from copy import deepcopy
from hpe_test_utils import OneViewBaseFactsTest
from oneview_module_loader import HypervisorClusterProfileFactsModule
PROFILE_URI = '/rest/hypervisor-cluster-profiles/57d3af2a-b6d2-4446-8645-f38dd808ea4d'
PARAMS_GET_ALL = dict(
config='config.json'
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="Test Cluster Profile"
)
PARAMS_GET_BY_URI = dict(
config='config.json',
uri="/rest/test/123"
)
PARAMS_WITH_OPTIONS = dict(
config='config.json',
name="Test Cluster Profile",
options=[
'compliancePreview',
]
)
@pytest.mark.resource(TestHypervisorClusterProfileFactsModule='hypervisor_cluster_profiles')
class TestHypervisorClusterProfileFactsModule(OneViewBaseFactsTest):
"""
FactsParamsTestCase has common tests for the parameters support.
"""
def test_should_get_all_cluster_profiles(self):
cluster_profiles = [
{"name": "Cluster Profile Name 1"},
{"name": "Cluster Profile Name 2"}
]
self.mock_ov_client.hypervisor_cluster_profiles.get_all.return_value = cluster_profiles
self.mock_ansible_module.params = deepcopy(PARAMS_GET_ALL)
HypervisorClusterProfileFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_cluster_profiles=cluster_profiles)
)
def test_should_get_by_name(self):
profile = {"name": "Test Cluster Profile", 'uri': '/rest/test/123'}
obj = mock.Mock()
obj.data = profile
self.mock_ov_client.hypervisor_cluster_profiles.get_by_name.return_value = obj
self.mock_ansible_module.params = deepcopy(PARAMS_GET_BY_NAME)
HypervisorClusterProfileFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_cluster_profiles=[profile])
)
def test_should_get_by_uri(self):
cluster_profile = {"name": "Test Cluster Profile", 'uri': '/rest/test/123'}
obj = mock.Mock()
obj.data = cluster_profile
self.mock_ov_client.hypervisor_cluster_profiles.get_by_uri.return_value = obj
self.mock_ansible_module.params = deepcopy(PARAMS_GET_BY_URI)
HypervisorClusterProfileFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_cluster_profiles=[cluster_profile])
)
def test_should_get_cluster_profile_by_name_with_all_options(self):
mock_option_return = {'subresource': 'value'}
self.mock_ov_client.hypervisor_cluster_profiles.data = {"name": "Test Cluster Profile", "uri": PROFILE_URI}
self.mock_ov_client.hypervisor_cluster_profiles.get_by_name.return_value = \
self.mock_ov_client.hypervisor_cluster_profiles
self.mock_ov_client.hypervisor_cluster_profiles.get_compliance_preview.return_value = mock_option_return
self.mock_ansible_module.params = deepcopy(PARAMS_WITH_OPTIONS)
HypervisorClusterProfileFactsModule().run()
self.mock_ov_client.hypervisor_cluster_profiles.get_compliance_preview.assert_called_once_with()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts={'hypervisor_cluster_profiles': [{'name': 'Test Cluster Profile', 'uri': PROFILE_URI}],
'hypervisor_cluster_profile_compliance_preview': mock_option_return,
}
)
if __name__ == '__main__':
pytest.main([__file__])
|
gammapy/data/tests/test_pointing.py | Rishank2610/gammapy | 155 | 13469 | <gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.time import Time
from gammapy.data import FixedPointingInfo, PointingInfo
from gammapy.utils.testing import assert_time_allclose, requires_data
@requires_data()
class TestFixedPointingInfo:
@classmethod
def setup_class(cls):
filename = "$GAMMAPY_DATA/tests/pointing_table.fits.gz"
cls.fpi = FixedPointingInfo.read(filename)
def test_location(self):
lon, lat, height = self.fpi.location.geodetic
assert_allclose(lon.deg, 16.5002222222222)
assert_allclose(lat.deg, -23.2717777777778)
assert_allclose(height.value, 1834.999999999783)
def test_time_ref(self):
expected = Time(51910.00074287037, format="mjd", scale="tt")
assert_time_allclose(self.fpi.time_ref, expected)
def test_time_start(self):
time = self.fpi.time_start
expected = Time(53025.826414166666, format="mjd", scale="tt")
assert_time_allclose(time, expected)
def test_time_stop(self):
time = self.fpi.time_stop
expected = Time(53025.844770648146, format="mjd", scale="tt")
assert_time_allclose(time, expected)
def test_duration(self):
duration = self.fpi.duration
assert_allclose(duration.sec, 1586.0000000044238)
def test_radec(self):
pos = self.fpi.radec
assert_allclose(pos.ra.deg, 83.633333333333)
assert_allclose(pos.dec.deg, 24.51444444)
assert pos.name == "icrs"
def test_altaz(self):
pos = self.fpi.altaz
assert_allclose(pos.az.deg, 7.48272)
assert_allclose(pos.alt.deg, 41.84191)
assert pos.name == "altaz"
@requires_data()
class TestPointingInfo:
@classmethod
def setup_class(cls):
filename = "$GAMMAPY_DATA/tests/pointing_table.fits.gz"
cls.pointing_info = PointingInfo.read(filename)
def test_str(self):
ss = str(self.pointing_info)
assert "Pointing info" in ss
def test_location(self):
lon, lat, height = self.pointing_info.location.geodetic
assert_allclose(lon.deg, 16.5002222222222)
assert_allclose(lat.deg, -23.2717777777778)
assert_allclose(height.value, 1834.999999999783)
def test_time_ref(self):
expected = Time(51910.00074287037, format="mjd", scale="tt")
assert_time_allclose(self.pointing_info.time_ref, expected)
def test_table(self):
assert len(self.pointing_info.table) == 100
def test_time(self):
time = self.pointing_info.time
assert len(time) == 100
expected = Time(53025.826414166666, format="mjd", scale="tt")
assert_time_allclose(time[0], expected)
def test_duration(self):
duration = self.pointing_info.duration
assert_allclose(duration.sec, 1586.0000000044238)
def test_radec(self):
pos = self.pointing_info.radec[0]
assert_allclose(pos.ra.deg, 83.633333333333)
assert_allclose(pos.dec.deg, 24.51444444)
assert pos.name == "icrs"
def test_altaz(self):
pos = self.pointing_info.altaz[0]
assert_allclose(pos.az.deg, 11.45751357)
assert_allclose(pos.alt.deg, 41.34088901)
assert pos.name == "altaz"
def test_altaz_from_table(self):
pos = self.pointing_info.altaz_from_table[0]
assert_allclose(pos.az.deg, 11.20432353385406)
assert_allclose(pos.alt.deg, 41.37921408774436)
assert pos.name == "altaz"
def test_altaz_interpolate(self):
time = self.pointing_info.time[0]
pos = self.pointing_info.altaz_interpolate(time)
assert_allclose(pos.az.deg, 11.45751357)
assert_allclose(pos.alt.deg, 41.34088901)
assert pos.name == "altaz"
|
kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 435 | 13472 | <reponame>ruinunca/data_tooling
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
|
cresi/net/augmentations/functional.py | ankshah131/cresi | 117 | 13483 | import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import numpy as np
import math
from functools import wraps
def clip(img, dtype, maxval):
return np.clip(img, 0, maxval).astype(dtype)
def clipped(func):
"""
wrapper to clip results of transform to image dtype value range
"""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
dtype, maxval = img.dtype, np.max(img)
return clip(func(img, *args, **kwargs), dtype, maxval)
return wrapped_function
def fix_shift_values(img, *args):
"""
shift values are normally specified in uint, but if your data is float - you need to remap values
"""
if img.dtype == np.float32:
return list(map(lambda x: x / 255, args))
return args
def vflip(img):
return cv2.flip(img, 0)
def hflip(img):
return cv2.flip(img, 1)
def flip(img, code):
return cv2.flip(img, code)
def transpose(img):
return img.transpose(1, 0, 2) if len(img.shape) > 2 else img.transpose(1, 0)
def rot90(img, times):
img = np.rot90(img, times)
return np.ascontiguousarray(img)
def rotate(img, angle):
"""
rotate image on specified angle
:param angle: angle in degrees
"""
height, width = img.shape[0:2]
mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 1.0)
img = cv2.warpAffine(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def shift_scale_rotate(img, angle, scale, dx, dy):
"""
:param angle: in degrees
:param scale: relative scale
"""
height, width = img.shape[:2]
cc = math.cos(angle/180*math.pi) * scale
ss = math.sin(angle/180*math.pi) * scale
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width/2, height/2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx*width, height/2+dy*height])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
img = cv2.warpPerspective(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def center_crop(img, height, width):
h, w, c = img.shape
dy = (h-height)//2
dx = (w-width)//2
y1 = dy
y2 = y1 + height
x1 = dx
x2 = x1 + width
img = img[y1:y2, x1:x2, :]
return img
def shift_hsv(img, hue_shift, sat_shift, val_shift):
dtype = img.dtype
maxval = np.max(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.int32)
h, s, v = cv2.split(img)
h = cv2.add(h, hue_shift)
h = np.where(h < 0, maxval - h, h)
h = np.where(h > maxval, h - maxval, h)
h = h.astype(dtype)
s = clip(cv2.add(s, sat_shift), dtype, maxval)
v = clip(cv2.add(v, val_shift), dtype, maxval)
img = cv2.merge((h, s, v)).astype(dtype)
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
return img
def shift_channels(img, r_shift, g_shift, b_shift):
img[...,0] = clip(img[...,0] + r_shift, np.uint8, 255)
img[...,1] = clip(img[...,1] + g_shift, np.uint8, 255)
img[...,2] = clip(img[...,2] + b_shift, np.uint8, 255)
return img
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)):
img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
return img_output
def blur(img, ksize):
return cv2.blur(img, (ksize, ksize))
def invert(img):
return 255 - img
def channel_shuffle(img):
ch_arr = [0, 1, 2]
np.random.shuffle(ch_arr)
img = img[..., ch_arr]
return img
def img_to_tensor(im, verbose=False):
'''AVE edit'''
im_out = np.moveaxis(im / (255. if im.dtype == np.uint8 else 1), -1, 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): im_out.shape:", im_out.shape)
print ("im_out.unique:", np.unique(im_out))
return im_out
def mask_to_tensor(mask, num_classes, verbose=False):
'''AVE edit'''
if num_classes > 1:
mask = img_to_tensor(mask)
else:
mask = np.expand_dims(mask / (255. if mask.dtype == np.uint8 else 1), 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): mask.shape:", mask.shape)
print ("mask.unique:", np.unique(mask))
return mask
|
src/django_otp/conf.py | jaap3/django-otp | 318 | 13501 | <filename>src/django_otp/conf.py
import django.conf
class Settings:
"""
This is a simple class to take the place of the global settings object. An
instance will contain all of our settings as attributes, with default values
if they are not specified by the configuration.
"""
defaults = {
'OTP_LOGIN_URL': django.conf.settings.LOGIN_URL,
'OTP_ADMIN_HIDE_SENSITIVE_DATA': False,
}
def __getattr__(self, name):
if name in self.defaults:
return getattr(django.conf.settings, name, self.defaults[name])
else:
return getattr(django.conf.settings, name)
settings = Settings()
|
tensorboard/acceptance/__init__.py | DeepLearnI/atlas | 296 | 13503 | from .test_tensorboard_rest_api import TestTensorboardRestAPI
from .test_tensorboard_server import TestTensorboardServer
from .test_tensorboard_endpoints import TestTensorboardEndpoint |
tests/test_add_option_backtrace.py | ponponon/loguru | 11,391 | 13505 | <reponame>ponponon/loguru<gh_stars>1000+
from loguru import logger
# See "test_catch_exceptions.py" for extended testing
def test_backtrace(writer):
logger.add(writer, format="{message}", backtrace=True)
try:
1 / 0
except Exception:
logger.exception("")
result_with = writer.read().strip()
logger.remove()
writer.clear()
logger.add(writer, format="{message}", backtrace=False)
try:
1 / 0
except Exception:
logger.exception("")
result_without = writer.read().strip()
assert len(result_with.splitlines()) > len(result_without.splitlines())
|
tests/pytorch_pfn_extras_tests/training_tests/extensions_tests/test_print_report_notebook.py | yasuyuky/pytorch-pfn-extras | 243 | 13563 | import io
import pytest
import pytorch_pfn_extras as ppe
from pytorch_pfn_extras.training.extensions import _ipython_module_available
from pytorch_pfn_extras.training.extensions.log_report import _pandas_available
@pytest.mark.skipif(
not _ipython_module_available or not _pandas_available,
reason="print report notebook import failed, "
"maybe ipython is not installed"
)
def test_run_print_report_notebook():
max_epochs = 5
iters_per_epoch = 5
manager = ppe.training.ExtensionsManager(
{}, {}, max_epochs, iters_per_epoch=iters_per_epoch)
out = io.StringIO()
log_report = ppe.training.extensions.LogReport()
manager.extend(log_report)
extension = ppe.training.extensions.PrintReportNotebook(out=out)
manager.extend(extension)
for _ in range(max_epochs):
for _ in range(iters_per_epoch):
with manager.run_iteration():
# Only test it runs without fail
# The value is not tested now...
pass
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
|
angr/storage/memory_mixins/hex_dumper_mixin.py | Kyle-Kyle/angr | 6,132 | 13585 | import string
from ...errors import SimValueError
from . import MemoryMixin
class HexDumperMixin(MemoryMixin):
def hex_dump(self, start, size, word_size=4, words_per_row=4, endianness="Iend_BE",
symbolic_char='?', unprintable_char='.', solve=False, extra_constraints=None,
inspect=False, disable_actions=True):
"""
Returns a hex dump as a string. The solver, if enabled, is called once for every byte
potentially making this function very slow. It is meant to be used mainly as a
"visualization" for debugging.
Warning: May read and display more bytes than `size` due to rounding. Particularly,
if size is less than, or not a multiple of word_size*words_per_line.
:param start: starting address from which to print
:param size: number of bytes to display
:param word_size: number of bytes to group together as one space-delimited unit
:param words_per_row: number of words to display per row of output
:param endianness: endianness to use when displaying each word (ASCII representation is unchanged)
:param symbolic_char: the character to display when a byte is symbolic and has multiple solutions
:param unprintable_char: the character to display when a byte is not printable
:param solve: whether or not to attempt to solve (warning: can be very slow)
:param extra_constraints: extra constraints to pass to the solver is solve is True
:param inspect: whether or not to trigger SimInspect breakpoints for the memory load
:param disable_actions: whether or not to disable SimActions for the memory load
:return: hex dump as a string
"""
if endianness == "Iend_BE":
end = 1
else:
end = -1
if extra_constraints is None:
extra_constraints = []
# round up size so that chop() works
line_size = word_size * words_per_row
size = size if size % line_size == 0 else size + line_size - size % line_size
raw_mem = super().load(start, size=size, inspect=inspect, disable_actions=disable_actions)
i = start
dump_str = ""
for line in raw_mem.chop(line_size * self.state.arch.byte_width):
dump = "%x:" % i
group_str = ""
for word in line.chop(word_size * self.state.arch.byte_width):
word_bytes = ""
word_str = ""
for byte_ in word.chop(self.state.arch.byte_width)[::end]:
byte_value = None
if not self.state.solver.symbolic(byte_) or solve:
try:
byte_value = self.state.solver.eval_one(
byte_,
extra_constraints=extra_constraints
)
except SimValueError:
pass
if byte_value is not None:
word_bytes += "%02x" % byte_value
if chr(byte_value) in string.printable[:-5]:
word_str += chr(byte_value)
else:
word_str += unprintable_char
else:
word_bytes += symbolic_char*2
word_str += symbolic_char
dump += ' ' + word_bytes
group_str += word_str[::end] # always print ASCII representation in little-endian
dump += ' ' + group_str
i += line_size
dump_str += dump + '\n'
return dump_str
|
src/greplin/scales/formats.py | frenzymadness/scales | 273 | 13586 | # Copyright 2011 The scales Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatting methods for stats."""
from greplin import scales
import cgi
import six
import json
import operator
import re
OPERATORS = {
'>=': operator.ge,
'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'=': operator.eq,
'==': operator.eq,
'!=': operator.ne
}
OPERATOR = re.compile('(%s)' % '|'.join(list(OPERATORS.keys())))
def runQuery(statDict, query):
"""Filters for the given query."""
parts = [x.strip() for x in OPERATOR.split(query)]
assert len(parts) in (1, 3)
queryKey = parts[0]
result = {}
for key, value in six.iteritems(statDict):
if key == queryKey:
if len(parts) == 3:
op = OPERATORS[parts[1]]
try:
queryValue = type(value)(parts[2]) if value else parts[2]
except (TypeError, ValueError):
continue
if not op(value, queryValue):
continue
result[key] = value
elif isinstance(value, scales.StatContainer) or isinstance(value, dict):
child = runQuery(value, query)
if child:
result[key] = child
return result
def htmlHeader(output, path, serverName, query = None):
"""Writes an HTML header."""
if path and path != '/':
output.write('<title>%s - Status: %s</title>' % (serverName, path))
else:
output.write('<title>%s - Status</title>' % serverName)
output.write('''
<style>
body,td { font-family: monospace }
.level div {
padding-bottom: 4px;
}
.level .level {
margin-left: 2em;
padding: 1px 0;
}
span { color: #090; vertical-align: top }
.key { color: black; font-weight: bold }
.int, .float { color: #00c }
</style>
''')
output.write('<h1 style="margin: 0">Stats</h1>')
output.write('<h3 style="margin: 3px 0 18px">%s</h3>' % serverName)
output.write(
'<p><form action="#" method="GET">Filter: <input type="text" name="query" size="20" value="%s"></form></p>' %
(query or ''))
def htmlFormat(output, pathParts = (), statDict = None, query = None):
"""Formats as HTML, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
_htmlRenderDict(pathParts, statDict, output)
def _htmlRenderDict(pathParts, statDict, output):
"""Render a dictionary as a table - recursing as necessary."""
keys = list(statDict.keys())
keys.sort()
links = []
output.write('<div class="level">')
for key in keys:
keyStr = cgi.escape(_utf8str(key))
value = statDict[key]
if hasattr(value, '__call__'):
value = value()
if hasattr(value, 'keys'):
valuePath = pathParts + (keyStr,)
if isinstance(value, scales.StatContainer) and value.isCollapsed():
link = '/status/' + '/'.join(valuePath)
links.append('<div class="key"><a href="%s">%s</a></div>' % (link, keyStr))
else:
output.write('<div class="key">%s</div>' % keyStr)
_htmlRenderDict(valuePath, value, output)
else:
output.write('<div><span class="key">%s</span> <span class="%s">%s</span></div>' %
(keyStr, type(value).__name__, cgi.escape(_utf8str(value)).replace('\n', '<br/>')))
if links:
for link in links:
output.write(link)
output.write('</div>')
def _utf8str(x):
"""Like str(x), but returns UTF8."""
if six.PY3:
return str(x)
if isinstance(x, six.binary_type):
return x
elif isinstance(x, six.text_type):
return x.encode('utf-8')
else:
return six.binary_type(x)
def jsonFormat(output, statDict = None, query = None, pretty = False):
"""Formats as JSON, writing to the given object."""
statDict = statDict or scales.getStats()
if query:
statDict = runQuery(statDict, query)
indent = 2 if pretty else None
# At first, assume that strings are in UTF-8. If this fails -- if, for example, we have
# crazy binary data -- then in order to get *something* out, we assume ISO-8859-1,
# which maps each byte to a unicode code point.
try:
serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent)
except UnicodeDecodeError:
serialized = json.dumps(statDict, cls=scales.StatContainerEncoder, indent=indent, encoding='iso-8859-1')
output.write(serialized)
output.write('\n')
|
Kerning/Steal Kerning Groups from Font.py | justanotherfoundry/Glyphs-Scripts | 283 | 13590 | #MenuTitle: Steal Kerning Groups from Font
"""Copy kerning groups from one font to another."""
from __future__ import print_function
import vanilla
class GroupsCopy(object):
"""GUI for copying kerning groups from one font to another"""
def __init__(self):
self.w = vanilla.FloatingWindow((400, 70), "Steal kerning groups")
self.w.text_anchor = vanilla.TextBox((15, 12+2, 130, 14), "Copy groups from:", sizeStyle='small')
self.w.from_font = vanilla.PopUpButton((150, 12, 150, 17), self.GetFonts(isSourceFont=True), sizeStyle='small', callback=self.buttonCheck)
self.w.text_value = vanilla.TextBox((15, 12+2+25, 130, 14), "To selected glyphs in:", sizeStyle='small')
self.w.to_font = vanilla.PopUpButton((150, 12+25, 150, 17), self.GetFonts(isSourceFont=False), sizeStyle='small', callback=self.buttonCheck)
self.w.copybutton = vanilla.Button((-80, 12+25, -15, 17), "Copy", sizeStyle='small', callback=self.copyGroups)
self.w.setDefaultButton( self.w.copybutton )
self.w.open()
self.buttonCheck(None)
def GetFonts(self, isSourceFont):
myFontList = [ "%s - %s" % ( x.font.familyName, x.selectedFontMaster().name ) for x in Glyphs.orderedDocuments() ]
if isSourceFont:
myFontList.reverse()
return myFontList
def buttonCheck(self, sender):
fromFont = self.w.from_font.getItems()[ self.w.from_font.get() ]
toFont = self.w.to_font.getItems()[ self.w.to_font.get() ]
if fromFont == toFont:
self.w.copybutton.enable( onOff=False )
else:
self.w.copybutton.enable( onOff=True )
def copyGroups(self, sender):
fromFont = self.w.from_font.getItems()[ self.w.from_font.get() ]
toFont = self.w.to_font.getItems()[ self.w.to_font.get() ]
Doc_source = [ x for x in Glyphs.orderedDocuments() if ("%s - %s" % ( x.font.familyName, x.selectedFontMaster().name )) == fromFont ][0]
Master_source = Doc_source.selectedFontMaster().id
Font_source = Doc_source.font
Font_target = [ x.font for x in Glyphs.orderedDocuments() if ("%s - %s" % ( x.font.familyName, x.selectedFontMaster().name )) == toFont ][0]
Glyphs_selected = [ x.parent for x in Font_target.parent.selectedLayers() ]
print("Syncing kerning groups for", len(Glyphs_selected), "glyphs from", Font_source.familyName, "to", Font_target.familyName, ":")
try:
for thisGlyph in Glyphs_selected:
glyphName = thisGlyph.name
try:
sourceGlyph = Font_source.glyphs[ glyphName ]
oldL = thisGlyph.leftKerningGroup
oldR = thisGlyph.rightKerningGroup
newL = sourceGlyph.leftKerningGroup
newR = sourceGlyph.rightKerningGroup
if oldL != newL or oldR != newR:
thisGlyph.leftKerningGroup = newL
thisGlyph.rightKerningGroup = newR
print(" ", glyphName, ":", newL, "<--->", newR)
# start: temporary fix for 3.0.3 unwrapped vertical kerning
def kerningGetter(kerning):
if kerning is not None and not isinstance(kerning, str):
kerning = kerning()
return kerning
# end: temporary fix for 3.0.3 unwrapped vertical kerning
oldT = kerningGetter(thisGlyph.topKerningGroup)
oldB = kerningGetter(thisGlyph.bottomKerningGroup)
newT = kerningGetter(sourceGlyph.topKerningGroup)
newB = kerningGetter(sourceGlyph.bottomKerningGroup)
if oldT != newT or oldB != newB:
thisGlyph.leftKerningGroup = newL
thisGlyph.setTopKerningGroup_(newT)
thisGlyph.setBottomKerningGroup_(newB)
print(" ", glyphName, ":", newT, "\n ^\n |\n V\n", newB)
pass
except Exception as e:
print(" ", glyphName,": Error")
# print e
except Exception as e:
import traceback
print(traceback.format_exc())
finally:
print("Done.")
self.w.close()
GroupsCopy()
|
scripts/idapython/idapy_detect_exitats.py | felkal/fuzzware | 106 | 13592 | import idaapi
from idaapi import *
inifinite_loops = [
b"\x00\xbf\xfd\xe7", # loop: nop; b loop
b"\xfe\xe7", # loop: b loop
]
whitelist = [
"Reset_Handler",
"main"
]
def detect_noret_funcs():
exit_locs_name_pairs = []
for func_addr in Functions():
if get_func_flags(func_addr) & idaapi.FUNC_NORET:
name = get_func_name(func_addr)
if name not in whitelist:
print("noret function: '{}' at 0x{:x}".format(name, func_addr))
exit_locs_name_pairs.append((func_addr, name))
return exit_locs_name_pairs
def detect_exit_ats(add_noret_functions=False):
# 0. find BKPTs
exit_locs = []
# 1. find noret functions if requested
if add_noret_functions:
exit_locs += detect_noret_funcs()
cnt = 0
# 2. find infinite loops and BKPT instructions
for segea in Segments():
for funcea in Functions(segea, get_segm_end(segea)):
functionName = get_func_name(funcea)
for (startea, endea) in Chunks(funcea):
for head in Heads(startea, endea):
# print(functionName, ":", "0x%08x"%(head), ":", GetDisasm(head))
for loop_code in inifinite_loops:
if get_bytes(head, len(loop_code)) == loop_code:
print("Found endless loop: 0x{:x} (function {})".format(head, functionName))
exit_locs.append((head, "endless_loop_{:02d}_{}".format(cnt, functionName)))
cnt += 1
if print_insn_mnem(head) == 'BKPT':
print("Found bkpt: 0x{:x} (function {})".format(head, functionName))
exit_locs.append((head, "bkpt_{:02d}_{}".format(cnt, functionName)))
cnt += 1
return exit_locs
def print_exit_ats(add_noret_functions=False):
exit_locs = detect_exit_ats(add_noret_functions=add_noret_functions)
print("exit_at:")
for addr, name in exit_locs:
print(" {}: 0x{:08x}".format(name, addr))
def dump_exit_ats(filename="exit_ats.yml"):
exit_locs = detect_exit_ats()
with open(filename, "w") as f:
f.write("exit_at:\n")
for addr, name in exit_locs:
f.write(" {}: 0x{:08x}\n".format(name, addr))
dump_exit_ats()
|
jaqs/util/dtutil.py | WestXu/JAQS | 602 | 13594 | # encoding: utf-8
import datetime
import numpy as np
import pandas as pd
def get_next_period_day(current, period, n=1, extra_offset=0):
"""
Get the n'th day in next period from current day.
Parameters
----------
current : int
Current date in format "%Y%m%d".
period : str
Interval between current and next. {'day', 'week', 'month'}
n : int
n times period.
extra_offset : int
n'th business day after next period.
Returns
-------
nxt : int
"""
current_dt = convert_int_to_datetime(current)
if period == 'day':
offset = pd.tseries.offsets.BDay() # move to next business day
# offset = offsets.Day
elif period == 'week':
offset = pd.tseries.offsets.Week(weekday=0) # move to next Monday
elif period == 'month':
offset = pd.tseries.offsets.BMonthBegin() # move to first business day of next month
# offset = offsets.MonthBegin
else:
raise NotImplementedError("Frequency as {} not support".format(period))
offset = offset * n
next_dt = current_dt + offset
if extra_offset:
next_dt = next_dt + extra_offset * pd.tseries.offsets.BDay()
nxt = convert_datetime_to_int(next_dt)
return nxt
def convert_int_to_datetime(dt):
"""Convert int date (%Y%m%d) to datetime.datetime object."""
if isinstance(dt, pd.Series):
dt = dt.astype(str)
elif isinstance(dt, int):
dt = str(dt)
return pd.to_datetime(dt, format="%Y%m%d")
def convert_datetime_to_int(dt):
f = lambda x: x.year * 10000 + x.month * 100 + x.day
if isinstance(dt, (datetime.datetime, datetime.date)):
dt = pd.Timestamp(dt)
res = f(dt)
elif isinstance(dt, np.datetime64):
dt = pd.Timestamp(dt)
res = f(dt)
else:
dt = pd.Series(dt)
res = dt.apply(f)
return res
def shift(date, n_weeks=0):
"""Shift date backward or forward for n weeks.
Parameters
----------
date : int or datetime
The date to be shifted.
n_weeks : int, optional
Positive for increasing date, negative for decreasing date.
Default 0 (no shift).
Returns
-------
res : int or datetime
"""
delta = pd.Timedelta(weeks=n_weeks)
is_int = isinstance(date, (int, np.integer))
if is_int:
dt = convert_int_to_datetime(date)
else:
dt = date
res = dt + delta
if is_int:
res = convert_datetime_to_int(res)
return res
def combine_date_time(date, time):
return np.int64(date) * 1000000 + np.int64(time)
def split_date_time(dt):
date = dt // 1000000
time = dt % 1000000
return date, time
def date_to_month(ser):
# ser = pd.Series(ser)
res = ser % 10000 // 100
MONTH_MAP = {1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec'}
# res = res.replace(MONTH_MAP)
return res
def date_to_year(ser):
return ser // 10000
|
env/Lib/site-packages/Algorithmia/acl.py | Vivek-Kamboj/Sargam | 142 | 13606 | class Acl(object):
def __init__(self, read_acl):
self.read_acl = read_acl
@staticmethod
def from_acl_response(acl_response):
'''Takes JSON response from API and converts to ACL object'''
if 'read' in acl_response:
read_acl = AclType.from_acl_response(acl_response['read'])
return Acl(read_acl)
else:
raise ValueError('Response does not contain read ACL')
def to_api_param(self):
read_acl_string = self.read_acl.acl_string
if read_acl_string is None:
return {'read':[]}
return {'read':[read_acl_string]}
class AclInner(object):
def __init__(self, pseudonym, acl_string):
self.pseudonym = pseudonym
self.acl_string = acl_string
def __repr__(self):
return 'AclType(pseudonym=%s,acl_string=%s)' % (self.pseudonym, self.acl_string)
class AclType(object):
public = AclInner('public','user://*')
my_algos = AclInner('my_algos','algo://.my/*')
private = AclInner('private',None) # Really is an empty list
default = my_algos
types = (public, my_algos, private)
@staticmethod
def from_acl_response(acl_list):
if len(acl_list) == 0:
return AclType.private
else:
acl_string = acl_list[0]
for t in AclType.types:
if t.acl_string == acl_string:
return t
else:
raise ValueError('Invalid acl string %s' % (acl_list[0]))
class ReadAcl(object):
public = Acl(AclType.public)
private = Acl(AclType.private)
my_algos = Acl(AclType.my_algos)
|
launch/test_motion.launch.py | RoboJackets/robocup-software | 200 | 13622 | import os
from pathlib import Path
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, SetEnvironmentVariable, Shutdown
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
def generate_launch_description():
bringup_dir = Path(get_package_share_directory('rj_robocup'))
launch_dir = bringup_dir / 'launch'
stdout_linebuf_envvar = SetEnvironmentVariable(
'RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
grsim = Node(package='rj_robocup', executable='grSim', arguments=[])
radio = Node(package='rj_robocup',
executable='sim_radio_node',
output='screen',
on_exit=Shutdown())
control = Node(package='rj_robocup',
executable='control_node',
output='screen',
on_exit=Shutdown())
config_server = Node(package='rj_robocup',
executable='config_server',
output='screen',
on_exit=Shutdown())
vision_receiver_launch_path = str(launch_dir / "vision_receiver.launch.py")
vision_receiver = IncludeLaunchDescription(
PythonLaunchDescriptionSource(vision_receiver_launch_path))
ref_receiver = Node(package='rj_robocup',
executable='internal_referee_node',
output='screen',
on_exit=Shutdown())
vision_filter_launch_path = str(launch_dir / "vision_filter.launch.py")
vision_filter = IncludeLaunchDescription(
PythonLaunchDescriptionSource(vision_filter_launch_path))
return LaunchDescription([
grsim, stdout_linebuf_envvar, config_server, radio, control,
vision_receiver, vision_filter, ref_receiver
])
|
snakewm/apps/games/pong/bat.py | sigmaister/snakeware_os | 1,621 | 13632 | <filename>snakewm/apps/games/pong/bat.py
import pygame
from pygame.locals import *
class ControlScheme:
def __init__(self):
self.up = K_UP
self.down = K_DOWN
class Bat:
def __init__(self, start_pos, control_scheme, court_size):
self.control_scheme = control_scheme
self.move_up = False
self.move_down = False
self.move_speed = 450.0
self.court_size = court_size
self.length = 30.0
self.width = 5.0
self.position = [float(start_pos[0]), float(start_pos[1])]
self.rect = pygame.Rect((start_pos[0], start_pos[1]), (self.width, self.length))
self.colour = pygame.Color("#FFFFFF")
def process_event(self, event):
if event.type == KEYDOWN:
if event.key == self.control_scheme.up:
self.move_up = True
if event.key == self.control_scheme.down:
self.move_down = True
if event.type == KEYUP:
if event.key == self.control_scheme.up:
self.move_up = False
if event.key == self.control_scheme.down:
self.move_down = False
def update(self, dt):
if self.move_up:
self.position[1] -= dt * self.move_speed
if self.position[1] < 10.0:
self.position[1] = 10.0
self.rect.y = self.position[1]
if self.move_down:
self.position[1] += dt * self.move_speed
if self.position[1] > self.court_size[1] - self.length - 10:
self.position[1] = self.court_size[1] - self.length - 10
self.rect.y = self.position[1]
def render(self, screen):
pygame.draw.rect(screen, self.colour, self.rect)
|
mimic/model/rackspace_image_store.py | ksheedlo/mimic | 141 | 13633 | <filename>mimic/model/rackspace_image_store.py
"""
An image store representing Rackspace specific images
"""
from __future__ import absolute_import, division, unicode_literals
import attr
from six import iteritems
from mimic.model.rackspace_images import (RackspaceWindowsImage,
RackspaceCentOSPVImage, RackspaceCentOSPVHMImage,
RackspaceCoreOSImage, RackspaceDebianImage,
RackspaceFedoraImage, RackspaceFreeBSDImage,
RackspaceGentooImage, RackspaceOpenSUSEImage,
RackspaceRedHatPVImage, RackspaceRedHatPVHMImage,
RackspaceUbuntuPVImage, RackspaceUbuntuPVHMImage,
RackspaceVyattaImage, RackspaceScientificImage,
RackspaceOnMetalCentOSImage, RackspaceOnMetalCoreOSImage,
RackspaceOnMetalDebianImage, RackspaceOnMetalFedoraImage,
RackspaceOnMetalUbuntuImage)
from mimic.model.rackspace_images import create_rackspace_images
@attr.s
class RackspaceImageStore(object):
"""
A store for images to share between nova_api and glance_api
:var image_list: list of Rackspace images
"""
image_list = attr.ib(default=attr.Factory(list))
def create_image_store(self, tenant_id):
"""
Generates the data for each image in each image class
"""
image_classes = [RackspaceWindowsImage, RackspaceCentOSPVImage,
RackspaceCentOSPVHMImage, RackspaceCoreOSImage, RackspaceDebianImage,
RackspaceFedoraImage, RackspaceFreeBSDImage, RackspaceGentooImage,
RackspaceOpenSUSEImage, RackspaceRedHatPVImage, RackspaceRedHatPVHMImage,
RackspaceUbuntuPVImage, RackspaceUbuntuPVHMImage, RackspaceVyattaImage,
RackspaceScientificImage, RackspaceOnMetalCentOSImage,
RackspaceOnMetalCoreOSImage, RackspaceOnMetalDebianImage,
RackspaceOnMetalFedoraImage, RackspaceOnMetalUbuntuImage]
if len(self.image_list) < 1:
for image_class in image_classes:
for image, image_spec in iteritems(image_class.images):
image_name = image
image_id = image_spec['id']
minRam = image_spec['minRam']
minDisk = image_spec['minDisk']
image_size = image_spec['OS-EXT-IMG-SIZE:size']
image = image_class(image_id=image_id, tenant_id=tenant_id,
image_size=image_size, name=image_name, minRam=minRam,
minDisk=minDisk)
if 'com.rackspace__1__ui_default_show' in image_spec:
image.set_is_default()
self.image_list.append(image)
self.image_list.extend(create_rackspace_images(tenant_id))
return self.image_list
def get_image_by_id(self, image_id):
"""
Get an image by its id
"""
for image in self.image_list:
if image_id == image.image_id:
return image
def add_image_to_store(self, image):
"""
Add a new image to the list of images
"""
self.image_list.append(image)
|
glue/viewers/table/qt/data_viewer.py | HPLegion/glue | 550 | 13642 | <gh_stars>100-1000
import os
from functools import lru_cache
import numpy as np
from qtpy.QtCore import Qt
from qtpy import QtCore, QtGui, QtWidgets
from matplotlib.colors import ColorConverter
from glue.utils.qt import get_qapp
from glue.config import viewer_tool
from glue.core import BaseData, Data
from glue.utils.qt import load_ui
from glue.viewers.common.qt.data_viewer import DataViewer
from glue.viewers.common.qt.toolbar import BasicToolbar
from glue.viewers.common.tool import CheckableTool
from glue.viewers.common.layer_artist import LayerArtist
from glue.core.subset import ElementSubsetState
from glue.utils.colors import alpha_blend_colors
from glue.utils.qt import mpl_to_qt_color, messagebox_on_error
from glue.core.exceptions import IncompatibleAttribute
from glue.viewers.table.compat import update_table_viewer_state
try:
import dask.array as da
DASK_INSTALLED = True
except ImportError:
DASK_INSTALLED = False
__all__ = ['TableViewer', 'TableLayerArtist']
COLOR_CONVERTER = ColorConverter()
class DataTableModel(QtCore.QAbstractTableModel):
def __init__(self, table_viewer):
super(DataTableModel, self).__init__()
if table_viewer.data.ndim != 1:
raise ValueError("Can only use Table widget for 1D data")
self._table_viewer = table_viewer
self._data = table_viewer.data
self.show_coords = False
self.order = np.arange(self._data.shape[0])
self._update_visible()
def data_changed(self):
top_left = self.index(0, 0)
bottom_right = self.index(self.columnCount(), self.rowCount())
self._update_visible()
self.data_by_row_and_column.cache_clear()
self.dataChanged.emit(top_left, bottom_right)
self.layoutChanged.emit()
@property
def columns(self):
if self.show_coords:
return self._data.components
else:
return self._data.main_components + self._data.derived_components
def columnCount(self, index=None):
return len(self.columns)
def rowCount(self, index=None):
# Qt bug: Crashes on tables bigger than this
return min(self.order_visible.size, 71582788)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
column_name = self.columns[section].label
units = self._data.get_component(self.columns[section]).units
if units != '':
column_name += "\n{0}".format(units)
return column_name
elif orientation == Qt.Vertical:
return str(self.order_visible[section])
def data(self, index, role):
if not index.isValid():
return None
return self.data_by_row_and_column(index.row(), index.column(), role)
# The data() method gets called many times, often with the same parameters,
# for example if bringing the window to the foreground/background, shifting
# up/down/left/right by one cell, etc. This can be very slow when e.g. dask
# columns are present so we cache the most recent 65536 calls which should
# have a reasonably sensible memory footprint.
@lru_cache(maxsize=65536)
def data_by_row_and_column(self, row, column, role):
if role == Qt.DisplayRole:
c = self.columns[column]
idx = self.order_visible[row]
comp = self._data[c]
value = comp[idx]
if isinstance(value, bytes):
return value.decode('ascii')
else:
if DASK_INSTALLED and isinstance(value, da.Array):
return str(value.compute())
else:
return str(comp[idx])
elif role == Qt.BackgroundRole:
idx = self.order_visible[row]
# Find all subsets that this index is part of
colors = []
for layer_artist in self._table_viewer.layers[::-1]:
if isinstance(layer_artist.layer, BaseData):
continue
if layer_artist.visible:
subset = layer_artist.layer
try:
if subset.to_mask(view=slice(idx, idx + 1))[0]:
colors.append(subset.style.color)
except IncompatibleAttribute as exc:
# Only disable the layer if enabled, as otherwise we
# will recursively call clear and _refresh, causing
# an infinite loop and performance issues.
if layer_artist.enabled:
layer_artist.disable_invalid_attributes(*exc.args)
else:
layer_artist.enabled = True
# Blend the colors using alpha blending
if len(colors) > 0:
color = alpha_blend_colors(colors, additional_alpha=0.5)
color = mpl_to_qt_color(color)
return QtGui.QBrush(color)
def sort(self, column, ascending):
c = self.columns[column]
comp = self._data.get_component(c)
self.order = np.argsort(comp.data)
if ascending == Qt.DescendingOrder:
self.order = self.order[::-1]
self._update_visible()
self.data_by_row_and_column.cache_clear()
self.layoutChanged.emit()
def _update_visible(self):
"""
Given which layers are visible or not, convert order to order_visible.
"""
self.data_by_row_and_column.cache_clear()
# First, if the data layer is visible, show all rows
for layer_artist in self._table_viewer.layers:
if layer_artist.visible and isinstance(layer_artist.layer, BaseData):
self.order_visible = self.order
return
# If not then we need to show only the rows with visible subsets
visible = np.zeros(self.order.shape, dtype=bool)
for layer_artist in self._table_viewer.layers:
if layer_artist.visible:
mask = layer_artist.layer.to_mask()
if DASK_INSTALLED and isinstance(mask, da.Array):
mask = mask.compute()
visible |= mask
self.order_visible = self.order[visible]
class TableLayerArtist(LayerArtist):
def __init__(self, table_viewer, viewer_state, layer_state=None, layer=None):
self._table_viewer = table_viewer
super(TableLayerArtist, self).__init__(viewer_state,
layer_state=layer_state,
layer=layer)
self.redraw()
def _refresh(self):
self._table_viewer.model.data_changed()
def redraw(self):
self._refresh()
def update(self):
self._refresh()
def clear(self):
self._refresh()
@viewer_tool
class RowSelectTool(CheckableTool):
tool_id = 'table:rowselect'
icon = 'glue_row_select'
action_text = 'Select row(s)'
tool_tip = ('Select rows by clicking on rows and pressing enter '
'once the selection is ready to be applied')
status_tip = ('CLICK to select, press ENTER to finalize selection, '
'ALT+CLICK or ALT+UP/DOWN to apply selection immediately')
def __init__(self, viewer):
super(RowSelectTool, self).__init__(viewer)
self.deactivate()
def activate(self):
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
def deactivate(self):
# Don't do anything if the viewer has already been closed
if self.viewer is None:
return
self.viewer.ui.table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.viewer.ui.table.clearSelection()
class TableViewWithSelectionSignal(QtWidgets.QTableView):
selection_changed = QtCore.Signal()
def selectionChanged(self, *args, **kwargs):
self.selection_changed.emit()
super(TableViewWithSelectionSignal, self).selectionChanged(*args, **kwargs)
class TableViewer(DataViewer):
LABEL = "Table Viewer"
_toolbar_cls = BasicToolbar
_data_artist_cls = TableLayerArtist
_subset_artist_cls = TableLayerArtist
inherit_tools = False
tools = ['table:rowselect']
def __init__(self, session, state=None, parent=None, widget=None):
super(TableViewer, self).__init__(session, state=state, parent=parent)
self.ui = load_ui('data_viewer.ui',
directory=os.path.dirname(__file__))
self.setCentralWidget(self.ui)
hdr = self.ui.table.horizontalHeader()
hdr.setStretchLastSection(True)
hdr.setSectionResizeMode(hdr.Interactive)
hdr = self.ui.table.verticalHeader()
hdr.setSectionResizeMode(hdr.Interactive)
self.data = None
self.model = None
self.ui.table.selection_changed.connect(self.selection_changed)
self.state.add_callback('layers', self._on_layers_changed)
self._on_layers_changed()
def selection_changed(self):
app = get_qapp()
if app.queryKeyboardModifiers() == Qt.AltModifier:
self.finalize_selection(clear=False)
def keyPressEvent(self, event):
if self.toolbar.active_tool is self.toolbar.tools['table:rowselect']:
if event.key() in [Qt.Key_Enter, Qt.Key_Return]:
self.finalize_selection()
super(TableViewer, self).keyPressEvent(event)
def finalize_selection(self, clear=True):
model = self.ui.table.selectionModel()
selected_rows = [self.model.order_visible[x.row()] for x in model.selectedRows()]
subset_state = ElementSubsetState(indices=selected_rows, data=self.data)
mode = self.session.edit_subset_mode
mode.update(self._data, subset_state, focus_data=self.data)
if clear:
# We block the signals here to make sure that we don't update
# the subset again once the selection is cleared.
self.ui.table.blockSignals(True)
self.ui.table.clearSelection()
self.ui.table.blockSignals(False)
def _on_layers_changed(self, *args):
for layer_state in self.state.layers:
if isinstance(layer_state.layer, BaseData):
break
else:
return
self.data = layer_state.layer
self.setUpdatesEnabled(False)
self.model = DataTableModel(self)
self.ui.table.setModel(self.model)
self.setUpdatesEnabled(True)
@messagebox_on_error("Failed to add data")
def add_data(self, data):
with self._layer_artist_container.ignore_empty():
self.state.layers[:] = []
return super(TableViewer, self).add_data(data)
@messagebox_on_error("Failed to add subset")
def add_subset(self, subset):
if self.data is None:
self.add_data(subset.data)
self.state.layers[0].visible = False
elif subset.data != self.data:
raise ValueError("subset parent data does not match existing table data")
return super(TableViewer, self).add_subset(subset)
@property
def window_title(self):
if len(self.state.layers) > 0:
return 'Table: ' + self.state.layers[0].layer.label
else:
return 'Table'
def closeEvent(self, event):
"""
On close, Qt seems to scan through the entire model
if the data set is big. To sidestep that,
we swap out with a tiny data set before closing
"""
super(TableViewer, self).closeEvent(event)
if self.model is not None:
self.model._data = Data(x=[0])
event.accept()
def get_layer_artist(self, cls, layer=None, layer_state=None):
return cls(self, self.state, layer=layer, layer_state=layer_state)
@staticmethod
def update_viewer_state(rec, context):
return update_table_viewer_state(rec, context)
|
psq/queue.py | Tomesco/bookshelf-demo-project | 210 | 13644 | <gh_stars>100-1000
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from contextlib import contextmanager
import functools
import logging
from uuid import uuid4
import google.cloud.exceptions
from .globals import queue_context
from .storage import Storage
from .task import Task, TaskResult
from .utils import dumps, measure_time, unpickle, UnpickleError
logger = logging.getLogger(__name__)
PUBSUB_OBJECT_PREFIX = 'psq'
class Queue(object):
def __init__(self, publisher_client, subscriber_client, project,
name='default', storage=None, extra_context=None,
asynchronous=True):
self._async = asynchronous
self.name = name
self.project = project
if self._async:
self.publisher_client = publisher_client
self.subscriber_client = subscriber_client
self.topic_path = self._get_or_create_topic()
self.storage = storage or Storage()
self.subscription = None
self.extra_context = extra_context if extra_context else dummy_context
def _get_topic_path(self):
topic_name = '{}-{}'.format(PUBSUB_OBJECT_PREFIX, self.name)
return self.publisher_client.topic_path(self.project, topic_name)
def _get_or_create_topic(self):
topic_path = self._get_topic_path()
try:
self.publisher_client.get_topic(topic_path)
except google.cloud.exceptions.NotFound:
logger.info("Creating topic {}".format(topic_path))
try:
self.publisher_client.create_topic(topic_path)
except google.cloud.exceptions.Conflict:
# Another process created the topic before us, ignore.
pass
return topic_path
def _get_or_create_subscription(self):
"""Workers all share the same subscription so that tasks are
distributed across all workers."""
topic_path = self._get_topic_path()
subscription_name = '{}-{}-shared'.format(
PUBSUB_OBJECT_PREFIX, self.name)
subscription_path = self.subscriber_client.subscription_path(
self.project, subscription_name)
try:
self.subscriber_client.get_subscription(subscription_path)
except google.cloud.exceptions.NotFound:
logger.info("Creating shared subscription {}".format(
subscription_name))
try:
self.subscriber_client.create_subscription(
subscription_path, topic=topic_path)
except google.cloud.exceptions.Conflict:
# Another worker created the subscription before us, ignore.
pass
return subscription_path
def enqueue(self, f, *args, **kwargs):
"""Enqueues a function for the task queue to execute."""
task = Task(uuid4().hex, f, args, kwargs)
self.storage.put_task(task)
return self.enqueue_task(task)
def enqueue_task(self, task):
"""Enqueues a task directly. This is used when a task is retried or if
a task was manually created.
Note that this does not store the task.
"""
data = dumps(task)
if self._async:
self.publisher_client.publish(self.topic_path, data=data)
logger.info('Task {} queued.'.format(task.id))
else:
unpickled_task = unpickle(data)
logger.info(
'Executing task {} synchronously.'.format(unpickled_task.id)
)
with measure_time() as summary, self.queue_context():
unpickled_task.execute(queue=self)
summary(unpickled_task.summary())
return TaskResult(task.id, self)
@staticmethod
def _pubsub_message_callback(task_callback, message):
message.ack()
try:
task = unpickle(message.data)
task_callback(task)
except UnpickleError:
logger.exception('Failed to unpickle task {}.'.format(message))
def listen(self, callback):
if not self.subscription:
self.subscription = self._get_or_create_subscription()
message_callback = functools.partial(
self._pubsub_message_callback, callback)
return self.subscriber_client.subscribe(
self.subscription, callback=message_callback)
def cleanup(self):
"""Does nothing for this queue, but other queues types may use this to
perform clean-up after listening for tasks."""
pass
def queue_context(self):
"""
Returns a context manager that sets this queue as the current_queue
global. Similar to flask's app.app_context. This is used by the workers
to make the global available inside of task functions.
"""
return queue_context(self)
@contextmanager
def dummy_context():
yield
|
tests/test_utils_project.py | FingerCrunch/scrapy | 41,267 | 13667 | <gh_stars>1000+
import unittest
import os
import tempfile
import shutil
import contextlib
from pytest import warns
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.project import data_path, get_project_settings
@contextlib.contextmanager
def inside_a_project():
prev_dir = os.getcwd()
project_dir = tempfile.mkdtemp()
try:
os.chdir(project_dir)
with open('scrapy.cfg', 'w') as f:
# create an empty scrapy.cfg
f.close()
yield project_dir
finally:
os.chdir(prev_dir)
shutil.rmtree(project_dir)
class ProjectUtilsTest(unittest.TestCase):
def test_data_path_outside_project(self):
self.assertEqual(
os.path.join('.scrapy', 'somepath'),
data_path('somepath')
)
abspath = os.path.join(os.path.sep, 'absolute', 'path')
self.assertEqual(abspath, data_path(abspath))
def test_data_path_inside_project(self):
with inside_a_project() as proj_path:
expected = os.path.join(proj_path, '.scrapy', 'somepath')
self.assertEqual(
os.path.realpath(expected),
os.path.realpath(data_path('somepath'))
)
abspath = os.path.join(os.path.sep, 'absolute', 'path')
self.assertEqual(abspath, data_path(abspath))
@contextlib.contextmanager
def set_env(**update):
modified = set(update.keys()) & set(os.environ.keys())
update_after = {k: os.environ[k] for k in modified}
remove_after = frozenset(k for k in update if k not in os.environ)
try:
os.environ.update(update)
yield
finally:
os.environ.update(update_after)
for k in remove_after:
os.environ.pop(k)
class GetProjectSettingsTestCase(unittest.TestCase):
def test_valid_envvar(self):
value = 'tests.test_cmdline.settings'
envvars = {
'SCRAPY_SETTINGS_MODULE': value,
}
with set_env(**envvars), warns(None) as warnings:
settings = get_project_settings()
assert not warnings
assert settings.get('SETTINGS_MODULE') == value
def test_invalid_envvar(self):
envvars = {
'SCRAPY_FOO': 'bar',
}
with set_env(**envvars), warns(None) as warnings:
get_project_settings()
assert len(warnings) == 1
assert warnings[0].category == ScrapyDeprecationWarning
assert str(warnings[0].message).endswith(': FOO')
def test_valid_and_invalid_envvars(self):
value = 'tests.test_cmdline.settings'
envvars = {
'SCRAPY_FOO': 'bar',
'SCRAPY_SETTINGS_MODULE': value,
}
with set_env(**envvars), warns(None) as warnings:
settings = get_project_settings()
assert len(warnings) == 1
assert warnings[0].category == ScrapyDeprecationWarning
assert str(warnings[0].message).endswith(': FOO')
assert settings.get('SETTINGS_MODULE') == value
|
trainer/__init__.py | Greeser/gate-decorator-pruning | 192 | 13668 | from trainer.normal import NormalTrainer
from config import cfg
def get_trainer():
pair = {
'normal': NormalTrainer
}
assert (cfg.train.trainer in pair)
return pair[cfg.train.trainer]()
|
google/datalab/commands/_datalab.py | freyrsae/pydatalab | 198 | 13670 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Platform library - datalab cell magic."""
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import IPython
import IPython.core.display
import IPython.core.magic
except ImportError:
raise Exception('This module can only be loaded in ipython.')
import google.datalab.utils.commands
@IPython.core.magic.register_line_cell_magic
def datalab(line, cell=None):
"""Implements the datalab cell magic for ipython notebooks.
Args:
line: the contents of the datalab line.
Returns:
The results of executing the cell.
"""
parser = google.datalab.utils.commands.CommandParser(
prog='%datalab',
description="""
Execute operations that apply to multiple Datalab APIs.
Use "%datalab <command> -h" for help on a specific command.
""")
config_parser = parser.subcommand(
'config', help='List or set API-specific configurations.')
config_sub_commands = config_parser.add_subparsers(dest='command')
# %%datalab config list
config_list_parser = config_sub_commands.add_parser(
'list', help='List configurations')
config_list_parser.set_defaults(func=_config_list_fn)
# %%datalab config set -n <NAME> -v <VALUE>
config_set_parser = config_sub_commands.add_parser(
'set', help='Set configurations')
config_set_parser.add_argument(
'-n', '--name',
help='The name of the configuration value', required=True)
config_set_parser.add_argument(
'-v', '--value', help='The value to set', required=True)
config_set_parser.set_defaults(func=_config_set_fn)
project_parser = parser.subcommand(
'project', help='Get or set the default project ID')
project_sub_commands = project_parser.add_subparsers(dest='command')
# %%datalab project get
project_get_parser = project_sub_commands.add_parser(
'get', help='Get the default project ID')
project_get_parser.set_defaults(func=_project_get_fn)
# %%datalab project set -p <PROJECT_ID>
project_set_parser = project_sub_commands.add_parser(
'set', help='Set the default project ID')
project_set_parser.add_argument(
'-p', '--project', help='The default project ID', required=True)
project_set_parser.set_defaults(func=_project_set_fn)
return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
def _config_list_fn(args, cell):
ctx = google.datalab.Context.default()
return google.datalab.utils.commands.render_dictionary([ctx.config])
def _config_set_fn(args, cell):
name = args['name']
value = args['value']
ctx = google.datalab.Context.default()
ctx.config[name] = value
return google.datalab.utils.commands.render_dictionary([ctx.config])
def _project_get_fn(args, cell):
ctx = google.datalab.Context.default()
return google.datalab.utils.commands.render_text(ctx.project_id)
def _project_set_fn(args, cell):
project = args['project']
ctx = google.datalab.Context.default()
ctx.set_project_id(project)
return
|
env/lib/python3.8/site-packages/plotly/validators/waterfall/_connector.py | acrucetta/Chicago_COVI_WebApp | 11,750 | 13724 | import _plotly_utils.basevalidators
class ConnectorValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="connector", parent_name="waterfall", **kwargs):
super(ConnectorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Connector"),
data_docs=kwargs.pop(
"data_docs",
"""
line
:class:`plotly.graph_objects.waterfall.connecto
r.Line` instance or dict with compatible
properties
mode
Sets the shape of connector lines.
visible
Determines if connector lines are drawn.
""",
),
**kwargs
)
|
preprocess/step1.py | wenhuchen/KGPT | 119 | 13738 | import json
import regex
import nltk.data
from nltk.tokenize import word_tokenize
import sys
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
def tokenize(string):
return word_tokenize(string)
def split_paragraphs(text):
"""
remove urls, lowercase all words and separate paragraphs
"""
splits = regex.split(r'\n+', text)
paras = []
for split in splits[1:]: # skip the titles
split = split.strip()
if len(split) == 0:
continue
if 'Section::' in split:
continue
paras.append(split)
paras = " ".join(paras)
return sent_detector.tokenize(paras)
def split_sent(sent):
strings = regex.split('<a |</a>', sent)
new_strings = []
count = 0
for s in strings:
s = s.strip()
if s:
if 'href=' in s:
s = s.lstrip('href="')
href, text = s.split('">')
new_strings.append((text, href))
count += 1
else:
ss = tokenize(s)
new_strings.extend([(_, None) for _ in ss])
return new_strings, count / len(new_strings), count
fw = open('out-more.json', 'w')
with open('en.json', 'r') as f:
for i, line in enumerate(f):
data = json.loads(line)
entry = {"id": data['id'], "url": data['url'], 'title': data['title']}
outputs = []
if len(data['text']) > 50:
try:
sents = split_paragraphs(data['text'])
for sent in sents:
if len(sent) < 400:
output, ratio, count = split_sent(sent)
if count > 1 and ratio >= 0.10 and len(output) >= 8 and output[0][0][0].isupper():
text = [_[0] for _ in output]
hyperlink = [_[1] for _ in output]
outputs.append((text, hyperlink))
except Exception:
pass
if len(outputs) > 0:
entry['text'] = outputs
fw.write(json.dumps(entry) + '\n')
sys.stdout.write('finished {}/{} \r'.format(i, 5989879))
fw.close()
|
gluon/packages/dal/pydal/adapters/sap.py | GeorgesBrantley/ResistanceGame | 408 | 13747 | <reponame>GeorgesBrantley/ResistanceGame<filename>gluon/packages/dal/pydal/adapters/sap.py<gh_stars>100-1000
import re
from .._compat import integer_types, long
from .base import SQLAdapter
from . import adapters
@adapters.register_for("sapdb")
class SAPDB(SQLAdapter):
dbengine = "sapdb"
drivers = ("sapdb",)
REGEX_URI = (
"^(?P<user>[^:@]+)(:(?P<password>[^@]*))?"
r"@(?P<host>[^:/]+|\[[^\]]+\])/(?P<db>[^?]+)$"
)
def _initialize_(self):
super(SAPDB, self)._initialize_()
ruri = self.uri.split("://", 1)[1]
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = self.credential_decoder(m.group("user"))
password = self.credential_decoder(m.group("password"))
if password is None:
password = ""
host = m.group("host")
db = m.group("db")
self.driver_args.update(user=user, password=password, database=db, host=host)
def connector(self):
self.driver.connect(**self.driver_args)
def lastrowid(self, table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return long(self.cursor.fetchone()[0])
def create_sequence_and_triggers(self, query, table, **args):
self.execute("CREATE SEQUENCE %s;" % table._sequence_name)
self.execute(
"ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');"
% (table._rname, table._id._rname, table._sequence_name)
)
self.execute(query)
|
apps/molecular_generation/JT_VAE/src/mol_tree.py | agave233/PaddleHelix | 454 | 13756 | <reponame>agave233/PaddleHelix
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MolTree"""
import rdkit
import rdkit.Chem as Chem
from src.chemutils import get_clique_mol, tree_decomp, get_mol, get_smiles, set_atommap, enum_assemble, decode_stereo
from src.vocab import Vocab
class MolTreeNode(object):
"""MolTreeNode"""
def __init__(self, smiles, clique=[]):
self.smiles = smiles
self.mol = get_mol(self.smiles)
self.clique = [x for x in clique]
self.neighbors = []
def add_neighbor(self, nei_node):
"""add a neighbor node """
self.neighbors.append(nei_node)
def recover(self, original_mol):
"""tbd"""
clique = []
clique.extend(self.clique)
if not self.is_leaf:
for cidx in self.clique:
original_mol.GetAtomWithIdx(cidx).SetAtomMapNum(self.nid)
for nei_node in self.neighbors:
clique.extend(nei_node.clique)
if nei_node.is_leaf:
continue
for cidx in nei_node.clique:
if cidx not in self.clique or len(nei_node.clique) == 1:
atom = original_mol.GetAtomWithIdx(cidx)
atom.SetAtomMapNum(nei_node.nid)
clique = list(set(clique))
label_mol = get_clique_mol(original_mol, clique)
self.label = Chem.MolToSmiles(Chem.MolFromSmiles(get_smiles(label_mol)))
for cidx in clique:
original_mol.GetAtomWithIdx(cidx).SetAtomMapNum(0)
return self.label
def assemble(self):
"""get candidate subgraph info"""
neighbors = [nei for nei in self.neighbors if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x: x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in self.neighbors if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cands, aroma = enum_assemble(self, neighbors, [], [])
new_cands = [cand for i, cand in enumerate(cands) if aroma[i] >= 0]
if len(new_cands) > 0:
cands = new_cands
if len(cands) > 0:
self.cands, _ = zip(*cands)
self.cands = list(self.cands)
else:
self.cands = []
class MolTree(object):
"""MolTree"""
def __init__(self, smiles):
self.smiles = smiles
self.mol = get_mol(smiles)
cliques, edges = tree_decomp(self.mol)
self.nodes = []
root = 0
for i, c in enumerate(cliques):
cmol = get_clique_mol(self.mol, c)
node = MolTreeNode(get_smiles(cmol), c)
self.nodes.append(node)
if min(c) == 0: root = i
for x, y in edges:
self.nodes[x].add_neighbor(self.nodes[y])
self.nodes[y].add_neighbor(self.nodes[x])
if root > 0:
self.nodes[0], self.nodes[root] = self.nodes[root], self.nodes[0]
for i, node in enumerate(self.nodes):
node.nid = i + 1
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
node.is_leaf = (len(node.neighbors) == 1)
def size(self):
"""return nodes nums"""
return len(self.nodes)
def recover(self):
"""recover nodes"""
for node in self.nodes:
node.recover(self.mol)
def assemble(self):
"""assemble nodes"""
for node in self.nodes:
node.assemble()
def dfs(node, fa_idx):
"""dfs"""
max_depth = 0
for child in node.neighbors:
if child.idx == fa_idx: continue
max_depth = max(max_depth, dfs(child, node.idx))
return max_depth + 1
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train_path', required=True)
parser.add_argument('--vocab_path', required=True)
args = parser.parse_args()
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
with open(args.train_path, 'r') as f:
data = f.read().splitlines()
cset = set()
for item in data:
smiles = item.split()[0]
mol = MolTree(smiles)
for c in mol.nodes:
cset.add(c.smiles)
with open(args.vocab_path, 'w') as f:
for c in cset:
f.write(c + '\n')
|
alphamind/benchmarks/data/neutralize.py | rongliang-tech/alpha-mind | 186 | 13765 | # -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import datetime as dt
import numpy as np
from sklearn.linear_model import LinearRegression
from alphamind.data.neutralize import neutralize
def benchmark_neutralize(n_samples: int, n_features: int, n_loops: int) -> None:
print("-" * 60)
print("Starting least square fitting benchmarking")
print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples, n_features,
n_loops))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
start = dt.datetime.now()
for _ in range(n_loops):
calc_res = neutralize(x, y)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
benchmark_model = LinearRegression(fit_intercept=False)
benchmark_model.fit(x, y)
exp_res = y - x @ benchmark_model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
np.testing.assert_array_almost_equal(calc_res, exp_res)
def benchmark_neutralize_with_groups(n_samples: int, n_features: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting least square fitting with group benchmarking")
print(
"Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_features,
n_loops,
n_groups))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
_ = neutralize(x, y, groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
model = LinearRegression(fit_intercept=False)
for _ in range(n_loops):
for i in range(n_groups):
curr_x = x[groups == i]
curr_y = y[groups == i]
model.fit(curr_x, curr_y)
_ = curr_y - curr_x @ model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_neutralize(3000, 10, 1000)
benchmark_neutralize_with_groups(3000, 10, 1000, 30)
|
grappelli/settings.py | theatlantic/django-grappelli-old | 285 | 13774 | <filename>grappelli/settings.py
# coding: utf-8
# DJANGO IMPORTS
from django.conf import settings
# Admin Site Title
ADMIN_HEADLINE = getattr(settings, "GRAPPELLI_ADMIN_HEADLINE", 'Grappelli')
ADMIN_TITLE = getattr(settings, "GRAPPELLI_ADMIN_TITLE", 'Grappelli')
# Link to your Main Admin Site (no slashes at start and end)
ADMIN_URL = getattr(settings, "GRAPPELLI_ADMIN_URL", '/admin/') |
DockerHubPackages/code/analyzer/analyzers/python_packages.py | halcyondude/datasets | 283 | 13776 | from ..utils import run
import logging
logger = logging.getLogger(__name__)
def process_one_package(path, package, python_version="3"):
"""Get details about one precise python package in the given image.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param package: name of the python package to get info from.
:type package: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing package name, version and size
:rtype: list[string, string, int]
"""
command = f"sudo chroot {path} pip{python_version} show {package}"
info = get_ipython().getoutput(command)
for line in info:
if "Name" in line:
name = line.split(" ").pop()
if "Version" in line:
version = line.split(" ").pop()
if "Location" in line:
location = line.split(" ").pop()
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name}").pop()
# If the folder does not exist, try lowercase
if "cannot access" in result:
result = get_ipython().getoutput(
f"du --max-depth=0 {path}{location}/{name.lower()}").pop()
# If the lowercase folder do not exist either
if "cannot access" not in result:
size = int(result.split('\t').pop(0))
# List the files by hand
else:
command = f"sudo chroot {path} pip{python_version} show {package} -f"
info = get_ipython().getoutput(command)
flag = False
size = 0
for line in info:
if flag:
command = f"du {path}{location}/{line.strip()}"
size += int(get_ipython().getoutput(command).pop().split('\t').pop(0))
if 'Files' in line:
flag = True
return [name, version, size]
def get_python_packages_info(path, python_version="3"):
"""Get details about all python packages in an image filesystem.
:param path: path were the docker image filesystem is expanded.
:type path: string
:param python_version: version of python to use. can be "2" or "3". default to "3".
:type python_version: string
:return: list containing lists of each package's name, version and size
:rtype: list[list[string, string, int]]
"""
command = f"sudo chroot {path} pip{python_version} list --format freeze --no-cache-dir 2>/dev/null"
packages = [package.split('==')
for package in get_ipython().getoutput(command)]
package_list = []
for package in packages:
try:
package_list.append(process_one_package(path, package[0]))
except Exception as e:
logger.error("Error processing python packages", package[0], e)
pass
return package_list
|
tests/gold_tests/redirect/redirect_actions.test.py | cmcfarlen/trafficserver | 1,351 | 13793 | <gh_stars>1000+
'''
Test redirection behavior to invalid addresses
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import re
import os
import socket
import sys
Test.Summary = '''
Test redirection behavior to invalid addresses
'''
Test.ContinueOnFail = False
Test.Setup.Copy(os.path.join(Test.Variables.AtsTestToolsDir, 'tcp_client.py'))
dns = Test.MakeDNServer('dns')
# This record is used in each test case to get the initial redirect response from the origin that we will handle.
dnsRecords = {'iwillredirect.test': ['127.0.0.1']}
host = socket.gethostname()
ipv4addrs = set()
try:
ipv4addrs = set([ip for
(family, _, _, _, (ip, *_)) in
socket.getaddrinfo(host, port=None) if
socket.AF_INET == family])
except socket.gaierror:
pass
ipv6addrs = set()
try:
ipv6addrs = set(["[{0}]".format(ip.split('%')[0]) for
(family, _, _, _, (ip, *_)) in
socket.getaddrinfo(host, port=None) if
socket.AF_INET6 == family and 'fe80' != ip[0:4]]) # Skip link-local addresses.
except socket.gaierror:
pass
origin = Test.MakeOriginServer('origin', ip='0.0.0.0')
ArbitraryTimestamp = '12345678'
# This is for cases when the content is actually fetched from the invalid address.
request_header = {
'headers': ('GET / HTTP/1.1\r\n'
'Host: *\r\n\r\n'),
'timestamp': ArbitraryTimestamp,
'body': ''}
response_header = {
'headers': ('HTTP/1.1 204 No Content\r\n'
'Connection: close\r\n\r\n'),
'timestamp': ArbitraryTimestamp,
'body': ''}
origin.addResponse('sessionfile.log', request_header, response_header)
# Map scenarios to trafficserver processes.
trafficservers = {}
data_dirname = 'generated_test_data'
data_path = os.path.join(Test.TestDirectory, data_dirname)
os.makedirs(data_path, exist_ok=True)
def normalizeForAutest(value):
'''
autest uses "test run" names to build file and directory names, so we must transform them in case there are incompatible or
annoying characters.
This means we can also use them in URLs.
'''
if not value:
return None
return re.sub(r'[^a-z0-9-]', '_', value, flags=re.I)
def makeTestCase(redirectTarget, expectedAction, scenario):
'''
Helper method that creates a "meta-test" from which autest generates a test case.
:param redirectTarget: The target address of a redirect from origin to be handled.
:param scenario: Defines the ACL to configure and the addresses to test.
'''
config = ','.join(':'.join(t) for t in sorted((addr.name.lower(), action.name.lower()) for (addr, action) in scenario.items()))
normRedirectTarget = normalizeForAutest(redirectTarget)
normConfig = normalizeForAutest(config)
tr = Test.AddTestRun('With_Config_{0}_Redirect_to_{1}'.format(normConfig, normRedirectTarget))
if trafficservers:
tr.StillRunningAfter = origin
tr.StillRunningAfter = dns
else:
tr.Processes.Default.StartBefore(origin)
tr.Processes.Default.StartBefore(dns)
if config not in trafficservers:
trafficservers[config] = Test.MakeATSProcess('ts_{0}'.format(normConfig), enable_cache=False)
trafficservers[config].Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'http|dns|redirect',
'proxy.config.http.number_of_redirections': 1,
'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
'proxy.config.dns.resolv_conf': 'NULL',
'proxy.config.url_remap.remap_required': 0,
'proxy.config.http.redirect.actions': config,
'proxy.config.http.connect_attempts_timeout': 5,
'proxy.config.http.connect_attempts_max_retries': 0,
})
tr.Processes.Default.StartBefore(trafficservers[config])
else:
tr.StillRunningAfter = trafficservers[config]
testDomain = 'testdomain{0}.test'.format(normRedirectTarget)
# The micro DNS server can't tell us whether it has a record of the domain already, so we use a dictionary to avoid duplicates.
# We remove any surrounding brackets that are common to IPv6 addresses.
if redirectTarget:
dnsRecords[testDomain] = [redirectTarget.strip('[]')]
# A GET request parameterized on the config and on the target.
request_header = {
'headers': ('GET /redirect?config={0}&target={1} HTTP/1.1\r\n'
'Host: *\r\n\r\n').
format(normConfig, normRedirectTarget),
'timestamp': ArbitraryTimestamp,
'body': ''}
# Returns a redirect to the test domain for the given target & the port number for the TS of the given config.
response_header = {
'headers': ('HTTP/1.1 307 Temporary Redirect\r\n'
'Location: http://{0}:{1}/\r\n'
'Connection: close\r\n\r\n').
format(testDomain, origin.Variables.Port),
'timestamp': ArbitraryTimestamp,
'body': ''}
origin.addResponse('sessionfile.log', request_header, response_header)
# Generate the request data file.
command_path = os.path.join(data_path, tr.Name)
with open(command_path, 'w') as f:
f.write(('GET /redirect?config={0}&target={1} HTTP/1.1\r\n'
'Host: iwillredirect.test:{2}\r\n\r\n').
format(normConfig, normRedirectTarget, origin.Variables.Port))
# Set the command with the appropriate URL.
port = trafficservers[config].Variables.port
dir_path = os.path.join(data_dirname, tr.Name)
tr.Processes.Default.Command = \
(f"bash -o pipefail -c '{sys.executable} tcp_client.py 127.0.0.1 {port} "
f"{dir_path} | head -n 1'")
tr.Processes.Default.ReturnCode = 0
# Generate and set the 'gold file' to check stdout
goldFilePath = os.path.join(data_path, '{0}.gold'.format(tr.Name))
with open(goldFilePath, 'w') as f:
f.write(expectedAction.value['expectedStatusLine'])
tr.Processes.Default.Streams.stdout = goldFilePath
class AddressE(Enum):
'''
Classes of addresses are mapped to example addresses.
'''
Private = ('10.0.0.1', '[fc00::1]')
Loopback = (['127.1.2.3']) # [::1] is omitted here because it is likely overwritten by Self, and there are no others in IPv6.
Multicast = ('172.16.58.3', '[ff42::]')
Linklocal = ('169.254.0.1', '[fe80::]')
Routable = ('172.16.17.32', '[2001:4998:58:1836::10]') # Do not Follow redirects to these in an automated test.
Self = ipv4addrs | ipv6addrs # Addresses of this host.
Default = None # All addresses apply, nothing in particular to test.
class ActionE(Enum):
# Title case because 'return' is a Python keyword.
Return = {'config': 'return', 'expectedStatusLine': 'HTTP/1.1 307 Temporary Redirect\r\n'}
Reject = {'config': 'reject', 'expectedStatusLine': 'HTTP/1.1 403 Forbidden\r\n'}
Follow = {'config': 'follow', 'expectedStatusLine': 'HTTP/1.1 204 No Content\r\n'}
# Added to test failure modes.
Break = {'expectedStatusLine': 'HTTP/1.1 500 Cannot find server.\r\n'}
scenarios = [
{
# Follow to loopback, but alternately reject/return others.
AddressE.Private: ActionE.Reject,
AddressE.Loopback: ActionE.Follow,
AddressE.Multicast: ActionE.Reject,
AddressE.Linklocal: ActionE.Return,
AddressE.Routable: ActionE.Reject,
AddressE.Self: ActionE.Return,
AddressE.Default: ActionE.Reject,
},
{
# Follow to loopback, but alternately reject/return others, flipped from the previous scenario.
AddressE.Private: ActionE.Return,
AddressE.Loopback: ActionE.Follow,
AddressE.Multicast: ActionE.Return,
AddressE.Linklocal: ActionE.Reject,
AddressE.Routable: ActionE.Return,
AddressE.Self: ActionE.Reject,
AddressE.Default: ActionE.Return,
},
{
# Return loopback, but reject everything else.
AddressE.Loopback: ActionE.Return,
AddressE.Default: ActionE.Reject,
},
{
# Reject loopback, but return everything else.
AddressE.Loopback: ActionE.Reject,
AddressE.Default: ActionE.Return,
},
{
# Return everything.
AddressE.Default: ActionE.Return,
},
]
for scenario in scenarios:
for addressClass in AddressE:
if not addressClass.value:
# Default has no particular addresses to test.
continue
for address in addressClass.value:
expectedAction = scenario[addressClass] if addressClass in scenario else scenario[AddressE.Default]
makeTestCase(redirectTarget=address, expectedAction=expectedAction, scenario=scenario)
# Test redirects to names that cannot be resolved.
makeTestCase(redirectTarget=None, expectedAction=ActionE.Break, scenario=scenario)
dns.addRecords(records=dnsRecords)
# Make sure this runs only after local files have been created.
Test.Setup.Copy(data_path)
|
auth0/v3/test/management/test_stats.py | akmjenkins/auth0-python | 340 | 13797 | <filename>auth0/v3/test/management/test_stats.py
import unittest
import mock
from ...management.stats import Stats
class TestStats(unittest.TestCase):
def test_init_with_optionals(self):
t = Stats(domain='domain', token='<PASSWORD>', telemetry=False, timeout=(10, 2))
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get('Auth0-Client', None)
self.assertEqual(telemetry_header, None)
@mock.patch('auth0.v3.management.stats.RestClient')
def test_active_users(self, mock_rc):
mock_instance = mock_rc.return_value
s = Stats(domain='domain', token='<PASSWORD>')
s.active_users()
mock_instance.get.assert_called_with(
'https://domain/api/v2/stats/active-users',
)
@mock.patch('auth0.v3.management.stats.RestClient')
def test_daily_stats(self, mock_rc):
mock_instance = mock_rc.return_value
s = Stats(domain='domain', token='<PASSWORD>')
s.daily_stats()
mock_instance.get.assert_called_with(
'https://domain/api/v2/stats/daily',
params={'from': None, 'to': None},
)
s.daily_stats(from_date='12341212', to_date='56785656')
mock_instance.get.assert_called_with(
'https://domain/api/v2/stats/daily',
params={'from': '12341212', 'to': '56785656'},
)
|
cdlib/algorithms/internal/COACH.py | xing-lab-pitt/cdlib | 248 | 13802 | # Author: <NAME> <<EMAIL>>
# A core-attachment based method to detect protein complexes in PPI networks
# <NAME>, Kwoh, Ng (2009)
# http://www.biomedcentral.com/1471-2105/10/169
from collections import defaultdict
from itertools import combinations
import functools
# return average degree and density for a graph
def __graph_stats(graph):
avg_deg = sum(len(n) for n in graph.values()) / float(len(graph))
density = avg_deg / (len(graph) - 1)
return avg_deg, density
# return core nodes, given a graph and its average degree
__get_core_nodes = lambda g, avg: set(v for v, n in g.items() if len(n) >= avg)
# return NA score
__NA_score = lambda a, b: float(len(a & b) ** 2) / (len(a) * len(b))
def __core_removal(graph, density_threshold):
if len(graph) == 1: # need at least two nodes in the graph...
return [graph]
avg_deg, density = __graph_stats(graph)
if density >= density_threshold:
return [graph]
else:
# find and remove core nodes; create connected subcomponents
core_nodes = __get_core_nodes(graph, avg_deg)
result = []
subgraphs = []
for v, n in graph.items():
if v in core_nodes:
continue
n = n - core_nodes # note that we're reassigning n
for s in subgraphs:
if not n.isdisjoint(s):
s |= n
break
else:
subgraphs.append(n | {v})
# connected subcomponent joining
i = 0
while i < len(subgraphs) - 1:
j = i + 1
while j < len(subgraphs):
if not subgraphs[i].isdisjoint(subgraphs[j]):
subgraphs[i] |= subgraphs[j]
subgraphs.pop(j)
else:
j += 1
i += 1
# recursive core removal
for s in subgraphs:
tresults = __core_removal(
dict((v, graph[v] & s) for v in s), density_threshold
)
for tc in tresults:
nodes = set()
for v, n in tc.items():
nodes.add(v)
n |= graph[v] & core_nodes
for c in core_nodes:
tc[c] = graph[c] & (nodes | core_nodes)
result += tresults
return result
def co_ach(g, density_threshold=0.7, affinity_threshold=0.225, closeness_threshold=0.5):
# read protein-protein pairs
data = defaultdict(set)
for a, b in g.edges():
data[a].add(b)
data[b].add(a)
# step 1: find preliminary cores
SC = [] # currently-detected preliminary cores
count = 0
for vertex, neighbors in data.items():
# build neighborhood graph
vertices = {vertex} | neighbors
size1_neighbors = set()
graph = {}
for v in vertices:
n = data[v] & vertices
if len(n) > 1: # ignore size-1 vertices
graph[v] = n
else:
size1_neighbors.add(v)
if len(graph) < 2: # not enough connections in this graph
continue
graph[vertex] -= size1_neighbors
# get core graph
avg_deg, density = __graph_stats(graph)
core_nodes = __get_core_nodes(graph, avg_deg)
vertices = set(graph.keys())
for v in vertices - core_nodes:
del graph[v]
for n in graph.values():
n &= core_nodes
if len(graph) < 2: # not enough connections in this graph
continue
graph_nodes = set(graph)
# inner loop
for sg in __core_removal(graph, density_threshold):
while True:
_, density = __graph_stats(sg)
# if density threshold met, stop; else, remove min degree node
if density >= density_threshold:
break
w = min(sg.items(), key=lambda k: len(k[1]))[0]
del sg[w]
for n in sg.values():
n.discard(w)
sg_nodes = set(sg)
while graph_nodes - sg_nodes:
w = max(graph_nodes - sg_nodes, key=lambda v: len(graph[v] & sg_nodes))
new_sg = sg.copy()
for v, n in new_sg.items():
if w in graph[v]:
n.add(w)
new_sg[w] = graph[w] & sg_nodes
_, density = __graph_stats(new_sg)
if density < density_threshold:
break
sg = new_sg
sg_nodes.add(w)
# redundancy filtering
max_sim = -1
for i in range(len(SC)):
sim = __NA_score(set(SC[i]), sg_nodes)
if sim > max_sim:
max_sim = sim
index = i
if max_sim < affinity_threshold:
SC.append(sg)
else:
_, density_i = __graph_stats(SC[index])
if density * len(sg) > density_i * len(SC[index]):
SC[index] = sg
# step 2: adding peripheral proteins
clusters = set()
for core in SC:
nodes = frozenset(core)
neighbors = (
functools.reduce(lambda x, y: x | y, (data[v] for v in nodes)) - nodes
)
neighbors -= set(
v
for v in neighbors
if float(len(data[v] & nodes)) / len(nodes) <= closeness_threshold
)
clusters.add(nodes | neighbors)
return [list(c) for c in clusters]
|
python/GafferSceneUI/LightToCameraUI.py | ddesmond/gaffer | 561 | 13810 | ##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferScene
import GafferSceneUI
def filmFitMetadata():
# Take the metadata from StandardOptionsUI, except not the layout section
allOptions = GafferSceneUI.StandardOptionsUI.plugsMetadata[ "options.filmFit" ] + GafferSceneUI.StandardOptionsUI.plugsMetadata[ "options.filmFit.value" ]
optionPairs = zip( allOptions[::2], allOptions[1::2] )
return sum( [ [i,j] for i,j in optionPairs if i != "layout:section" ], [] )
Gaffer.Metadata.registerNode(
GafferScene.LightToCamera,
"description",
"""
Converts lights into cameras. Spotlights are converted to a perspective
camera with the field of view matching the cone angle, and distant lights are
converted to an orthographic camera.
""",
plugs = {
"filmFit" : filmFitMetadata(),
"distantAperture" : [
"description",
"""
The orthographic aperture used when converting distant lights
( which are theoretically infinite in extent )
""",
],
"clippingPlanes" : [
"description",
"""
Clipping planes for the created cameras. When creating a perspective camera, a near clip
<= 0 is invalid, and will be replaced with 0.01. Also, certain lights only start casting
light at some distance - if near clip is less than this, it will be increased.
""",
],
"filter" : [
"description",
"""
Specifies which lights to convert.
""",
],
}
)
|
custom_components/blitzortung/geohash_utils.py | Nag94/HomeAssistantConfig | 163 | 13818 | <filename>custom_components/blitzortung/geohash_utils.py
import math
from collections import namedtuple
from . import geohash
Box = namedtuple("Box", ["s", "w", "n", "e"])
def geohash_bbox(gh):
ret = geohash.bbox(gh)
return Box(ret["s"], ret["w"], ret["n"], ret["e"])
def bbox(lat, lon, radius):
lat_delta = radius * 360 / 40000
lon_delta = lat_delta / math.cos(lat * math.pi / 180.0)
return Box(lat - lat_delta, lon - lon_delta, lat + lat_delta, lon + lon_delta)
def overlap(a1, a2, b1, b2):
return a1 < b2 and a2 > b1
def box_overlap(box1: Box, box2: Box):
return overlap(box1.s, box1.n, box2.s, box2.n) and overlap(
box1.w, box1.e, box2.w, box2.e
)
def compute_geohash_tiles(lat, lon, radius, precision):
bounds = bbox(lat, lon, radius)
center = geohash.encode(lat, lon, precision)
stack = set()
checked = set()
stack.add(center)
checked.add(center)
while stack:
current = stack.pop()
for neighbor in geohash.neighbors(current):
if neighbor not in checked and box_overlap(geohash_bbox(neighbor), bounds):
stack.add(neighbor)
checked.add(neighbor)
return checked
def geohash_overlap(lat, lon, radius, max_tiles=9):
result = []
for precision in range(1, 13):
tiles = compute_geohash_tiles(lat, lon, radius, precision)
if len(tiles) <= 9:
result = tiles
precision += 1
else:
break
return result
|
libweasyl/libweasyl/alembic/versions/eff79a07a88d_use_timestamp_column_for_latest_.py | akash143143/weasyl | 111 | 13880 | """Use TIMESTAMP column for latest submission
Revision ID: eff<PASSWORD>0<PASSWORD>
Revises: <PASSWORD>
Create Date: 2017-01-08 22:20:43.814375
"""
# revision identifiers, used by Alembic.
revision = 'eff<PASSWORD>'
down_revision = '<PASSWORD>'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
import libweasyl
from libweasyl.legacy import UNIXTIME_OFFSET
def upgrade():
op.alter_column(
'profile',
'latest_submission_time',
new_column_name='latest_submission_time_old',
)
op.add_column(
'profile',
sa.Column('latest_submission_time', libweasyl.models.helpers.ArrowColumn(), nullable=False, server_default='epoch'),
)
op.execute(
"UPDATE profile SET latest_submission_time = TIMESTAMP WITHOUT TIME ZONE 'epoch' + "
"(latest_submission_time_old - %d) * INTERVAL '1 second'" % (UNIXTIME_OFFSET,))
op.drop_column('profile', 'latest_submission_time_old')
def downgrade():
op.alter_column(
'profile',
'latest_submission_time',
new_column_name='latest_submission_time_new',
)
op.add_column(
'profile',
sa.Column('latest_submission_time', libweasyl.models.helpers.WeasylTimestampColumn(), nullable=False, server_default='0'),
)
op.execute(
"UPDATE profile SET latest_submission_time = extract(epoch from latest_submission_time_new) + %d" % (UNIXTIME_OFFSET,))
op.drop_column('profile', 'latest_submission_time_new')
|
regression/testplan/firmware_small.py | sld-columbia/nvdla-sw | 407 | 13895 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test_plan
import settings
class Module(test_plan.Testplan):
runScript = settings.KMD_RUNSCRIPT
deviceTargets = ['sim', 'ufpga']
def __init__(self):
super(Module, self).__init__(__name__)
# Convenience globals
kmd = Module.runScript
devices = Module.deviceTargets
ces = ["Core Engine Scheduler"]
nn = ["Neural Network"]
convd = ["CONV HW - Direct"]
convi = ["CONV HW - Image"]
convw = ["CONV HW - Winograd"]
convp = ["CONV HW - Pipeline"]
sdpx1 = ["SDP X1 HW"]
sdpx2 = ["SDP X2 HW"]
sdpy = ["SDP Y HW"]
sdpf = ["SDP HW - Full"]
cdp = ["CDP HW"]
pdp = ["PDP HW"]
def registerNvSmallTests(self, testplan):
testplan.append(
[0, "Written", kmd, "CONV_D_L0_0_small", None, convd, devices, "Convolution test - Sanity test direct convolution",
"Direct convolution, 8x8x128 input cube, 3x3x128 kernel cube and 32 kernels input and weight read from DRAM, no mean and bias data, output written to DRAM through SDP."])
testplan.append(
[0, "Written", kmd, "SDP_X1_L0_0_small", None, sdpx1, devices,
"SDP test - Sanity test for SDP, only X1 enabled with ALU, X2 and Y disable. No DMA used",
"Element wise sum operation in X1, 8x8x32 input cube and 8x8x32 bias cube. Activation function as ReLU"])
testplan.append(
[0, "Written", kmd, "CDP_L0_0_small", None, cdp, devices, "CDP test - Sanity test for CDP",
"Use only linear table with LUT configured with all 1. 8x8x32 input cube and 8x8x32 output cube."])
testplan.append(
[0, "Written", kmd, "PDP_L0_0_small", None, pdp, devices, "PDP test - Sanity test for PDP with max pooling",
"Max pooling, 8x8x32 input cube, 8x8x32 output cube, no padding, 1x1 kernel size. No need to compare data. It is enough if task succeeds to pass this test."])
testplan.append(
[0, "Written", kmd, "NN_L0_1_small", None, nn, devices, "AlexNet", "AlexNet"])
def registerFirmwareSmallTests(self):
testplan = []
registerNvSmallTests(self, testplan)
for item in testplan:
test = test_plan.Test()
test.level = item[0]
test.status = item[1]
test.runscript = item[2]
test.name = item[3]
test.options = item[4]
test.features = item[5]
test.targets = item[6]
test.description = item[7]
test.dependencies = None
self.add_test(test)
def registerTests(self):
registerFirmwareSmallTests(self)
Module.register_tests = registerTests
|
airflow/providers/amazon/aws/example_dags/example_hive_to_dynamodb.py | npodewitz/airflow | 8,092 | 13908 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This DAG will not work unless you create an Amazon EMR cluster running
Apache Hive and copy data into it following steps 1-4 (inclusive) here:
https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/EMRforDynamoDB.Tutorial.html
"""
import os
from datetime import datetime
from airflow import DAG
from airflow.decorators import task
from airflow.models import Connection
from airflow.providers.amazon.aws.hooks.dynamodb import DynamoDBHook
from airflow.providers.amazon.aws.transfers.hive_to_dynamodb import HiveToDynamoDBOperator
from airflow.utils import db
DYNAMODB_TABLE_NAME = 'example_hive_to_dynamodb_table'
HIVE_CONNECTION_ID = os.getenv('HIVE_CONNECTION_ID', 'hive_on_emr')
HIVE_HOSTNAME = os.getenv('HIVE_HOSTNAME', 'ec2-123-45-67-890.compute-1.amazonaws.com')
# These values assume you set up the Hive data source following the link above.
DYNAMODB_TABLE_HASH_KEY = 'feature_id'
HIVE_SQL = 'SELECT feature_id, feature_name, feature_class, state_alpha FROM hive_features'
@task
def create_dynamodb_table():
client = DynamoDBHook(client_type='dynamodb').conn
client.create_table(
TableName=DYNAMODB_TABLE_NAME,
KeySchema=[
{'AttributeName': DYNAMODB_TABLE_HASH_KEY, 'KeyType': 'HASH'},
],
AttributeDefinitions=[
{'AttributeName': DYNAMODB_TABLE_HASH_KEY, 'AttributeType': 'N'},
],
ProvisionedThroughput={'ReadCapacityUnits': 20, 'WriteCapacityUnits': 20},
)
# DynamoDB table creation is nearly, but not quite, instantaneous.
# Wait for the table to be active to avoid race conditions writing to it.
waiter = client.get_waiter('table_exists')
waiter.wait(TableName=DYNAMODB_TABLE_NAME, WaiterConfig={'Delay': 1})
@task
def get_dynamodb_item_count():
"""
A DynamoDB table has an ItemCount value, but it is only updated every six hours.
To verify this DAG worked, we will scan the table and count the items manually.
"""
table = DynamoDBHook(resource_type='dynamodb').conn.Table(DYNAMODB_TABLE_NAME)
response = table.scan(Select='COUNT')
item_count = response['Count']
while 'LastEvaluatedKey' in response:
response = table.scan(Select='COUNT', ExclusiveStartKey=response['LastEvaluatedKey'])
item_count += response['Count']
print(f'DynamoDB table contains {item_count} items.')
# Included for sample purposes only; in production you wouldn't delete
# the table you just backed your data up to. Using 'all_done' so even
# if an intermediate step fails, the DAG will clean up after itself.
@task(trigger_rule='all_done')
def delete_dynamodb_table():
DynamoDBHook(client_type='dynamodb').conn.delete_table(TableName=DYNAMODB_TABLE_NAME)
# Included for sample purposes only; in production this should
# be configured in the environment and not be part of the DAG.
# Note: The 'hiveserver2_default' connection will not work if Hive
# is hosted on EMR. You must set the host name of the connection
# to match your EMR cluster's hostname.
@task
def configure_hive_connection():
db.merge_conn(
Connection(
conn_id=HIVE_CONNECTION_ID,
conn_type='hiveserver2',
host=HIVE_HOSTNAME,
port=10000,
)
)
with DAG(
dag_id='example_hive_to_dynamodb',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
tags=['example'],
catchup=False,
) as dag:
# Add the prerequisites docstring to the DAG in the UI.
dag.doc_md = __doc__
# [START howto_transfer_hive_to_dynamodb]
backup_to_dynamodb = HiveToDynamoDBOperator(
task_id='backup_to_dynamodb',
hiveserver2_conn_id=HIVE_CONNECTION_ID,
sql=HIVE_SQL,
table_name=DYNAMODB_TABLE_NAME,
table_keys=[DYNAMODB_TABLE_HASH_KEY],
)
# [END howto_transfer_hive_to_dynamodb]
(
configure_hive_connection()
>> create_dynamodb_table()
>> backup_to_dynamodb
>> get_dynamodb_item_count()
>> delete_dynamodb_table()
)
|
algocoin/__init__.py | dendisuhubdy/algo-coin | 252 | 13915 | <filename>algocoin/__init__.py
from .main import main as run # noqa: F401
__version__ = '0.0.3'
|
mmrazor/models/architectures/components/backbones/__init__.py | HIT-cwh/mmrazor | 553 | 13916 | <gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
from .darts_backbone import DartsBackbone
from .searchable_mobilenet import SearchableMobileNet
from .searchable_shufflenet_v2 import SearchableShuffleNetV2
__all__ = ['DartsBackbone', 'SearchableShuffleNetV2', 'SearchableMobileNet']
|
serve/api/predict.py | HalleyYoung/musicautobot | 402 | 13952 | import sys
from . import app
sys.path.append(str(app.config['LIB_PATH']))
from musicautobot.music_transformer import *
from musicautobot.config import *
from flask import Response, send_from_directory, send_file, request, jsonify
from .save import to_s3
import torch
import traceback
torch.set_num_threads(4)
data = load_data(app.config['DATA_PATH'], app.config['DATA_SAVE_NAME'], num_workers=1)
learn = music_model_learner(data, pretrained_path=app.config['MUSIC_MODEL_PATH'])
if torch.cuda.is_available(): learn.model.cuda()
# learn.to_fp16(loss_scale=512) # fp16 not supported for cpu - https://github.com/pytorch/pytorch/issues/17699
@app.route('/predict/midi', methods=['POST'])
def predict_midi():
args = request.form.to_dict()
midi = request.files['midi'].read()
print('THE ARGS PASSED:', args)
bpm = float(args['bpm']) # (AS) TODO: get bpm from midi file instead
temperatures = (float(args.get('noteTemp', 1.2)), float(args.get('durationTemp', 0.8)))
n_words = int(args.get('nSteps', 200))
seed_len = int(args.get('seedLen', 12))
# debugging 1 - send exact midi back
# with open('/tmp/test.mid', 'wb') as f:
# f.write(midi)
# return send_from_directory('/tmp', 'test.mid', mimetype='audio/midi')
# debugging 2 - test music21 conversion
# stream = file2stream(midi) # 1.
# debugging 3 - test npenc conversion
# seed_np = midi2npenc(midi) # music21 can handle bytes directly
# stream = npenc2stream(seed_np, bpm=bpm)
# debugging 4 - midi in, convert, midi out
# stream = file2stream(midi) # 1.
# midi_in = Path(stream.write("musicxml"))
# print('Midi in:', midi_in)
# stream_sep = separate_melody_chord(stream)
# midi_out = Path(stream_sep.write("midi"))
# print('Midi out:', midi_out)
# s3_id = to_s3(midi_out, args)
# result = {
# 'result': s3_id
# }
# return jsonify(result)
# Main logic
try:
full = predict_from_midi(learn, midi=midi, n_words=n_words, seed_len=seed_len, temperatures=temperatures)
stream = separate_melody_chord(full.to_stream(bpm=bpm))
midi_out = Path(stream.write("midi"))
print('Wrote to temporary file:', midi_out)
except Exception as e:
traceback.print_exc()
return jsonify({'error': f'Failed to predict: {e}'})
s3_id = to_s3(midi_out, args)
result = {
'result': s3_id
}
return jsonify(result)
# return send_from_directory(midi_out.parent, midi_out.name, mimetype='audio/midi')
# @app.route('/midi/song/<path:sid>')
# def get_song_midi(sid):
# return send_from_directory(file_path/data_dir, htlist[sid]['midi'], mimetype='audio/midi')
@app.route('/midi/convert', methods=['POST'])
def convert_midi():
args = request.form.to_dict()
if 'midi' in request.files:
midi = request.files['midi'].read()
elif 'midi_path'in args:
midi = args['midi_path']
stream = file2stream(midi) # 1.
# stream = file2stream(midi).chordify() # 1.
stream_out = Path(stream.write('musicxml'))
return send_from_directory(stream_out.parent, stream_out.name, mimetype='xml')
|
tests/test_user.py | meisnate12/trakt.py | 147 | 13953 | from __future__ import absolute_import, division, print_function
from tests.core import mock
from trakt import Trakt
from httmock import HTTMock
import pytest
def test_likes():
with HTTMock(mock.fixtures, mock.unknown):
with Trakt.configuration.auth('mock', 'mock'):
likes = Trakt['users'].likes()
assert likes is not None
likes = list(likes)
assert len(likes) == 3
assert likes[0].keys == [
('trakt', 1519)
]
assert likes[1].keys == [
('trakt', '1238362'),
('slug', 'star-wars-machete')
]
assert likes[2].keys == [
('trakt', '840781'),
('slug', 'star-wars-timeline')
]
def test_likes_invalid_response():
with HTTMock(mock.fixtures, mock.unknown):
likes = Trakt['users'].likes()
assert likes is None
def test_likes_invalid_type():
with HTTMock(mock.fixtures, mock.unknown):
with pytest.raises(ValueError):
likes = Trakt['users'].likes('invalid')
assert likes is not None
likes = list(likes)
|
ott/examples/fairness/models.py | MUCDK/ott | 232 | 13957 | <gh_stars>100-1000
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A model for to embed structured features."""
from typing import Any, Tuple
import flax.linen as nn
import jax.numpy as jnp
class FeaturesEncoder(nn.Module):
"""Encodes structured features."""
input_dims: Tuple[int]
embed_dim: int = 32
@nn.compact
def __call__(self, x):
result = []
index = 0
for d in self.input_dims:
arr = x[..., index:index+d]
result.append(arr if d == 1 else nn.Dense(self.embed_dim)(arr))
index += d
return jnp.concatenate(result, axis=-1)
class AdultModel(nn.Module):
"""A model to predict if the income is above 50k (adult dataset)."""
encoder_cls: Any
hidden: Tuple[int] = (64, 64)
@nn.compact
def __call__(self, x, train: bool = True):
x = self.encoder_cls()(x)
for h in self.hidden:
x = nn.Dense(h)(x)
x = nn.relu(x)
x = nn.Dense(1)(x)
x = nn.sigmoid(x)
return x[..., 0]
|
reikna/core/__init__.py | ringw/reikna | 122 | 13959 | <gh_stars>100-1000
from reikna.core.signature import Type, Annotation, Parameter, Signature
from reikna.core.computation import Computation
from reikna.core.transformation import Transformation, Indices
|
nngen/onnx/shape.py | RyusukeYamano/nngen | 207 | 13961 | <filename>nngen/onnx/shape.py
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def Shape(visitor, node):
input = visitor.visit(node.input[0])
shape = input.shape
if (input.get_layout() is not None and input.get_onnx_layout() is not None and
input.get_layout() != input.get_onnx_layout()):
shape = [shape[input.get_layout().index(l)] for l in input.get_onnx_layout()]
return tuple(shape)
|
Burp/lib/data.py | wisdark/HUNT | 1,628 | 13965 | <reponame>wisdark/HUNT
from __future__ import print_function
import json
import os
class Data():
shared_state = {}
def __init__(self):
self.__dict__ = self.shared_state
self.set_checklist(None)
self.set_issues()
def set_checklist(self, file_name):
is_empty = file_name is None
if is_empty:
file_name = os.getcwd() + os.sep + "conf" + os.sep + "checklist.json"
try:
with open(file_name) as data_file:
data = json.load(data_file)
self.checklist = data["checklist"]
except Exception as e:
print(e)
def get_checklist(self):
return self.checklist
def set_issues(self):
file_name = os.getcwd() + os.sep + "conf" + os.sep + "issues.json"
try:
with open(file_name) as data_file:
self.issues = json.load(data_file)
except Exception as e:
print(e)
def get_issues(self):
return self.issues
def set_bugs(self, functionality_name, test_name, request, response):
bug = {
"request": request,
"response": response
}
self.checklist["Functionality"][functionality_name]["tests"][test_name]["bugs"].append(bug)
def set_notes(self, functionality_name, test_name, notes):
self.checklist["Functionality"][functionality_name]["tests"][test_name]["notes"] = notes
|
Src/StdLib/Lib/test/xmltests.py | cwensley/ironpython2 | 2,293 | 14005 | # Convenience test module to run all of the XML-related tests in the
# standard library.
import sys
import test.test_support
test.test_support.verbose = 0
def runtest(name):
__import__(name)
module = sys.modules[name]
if hasattr(module, "test_main"):
module.test_main()
runtest("test.test_minidom")
runtest("test.test_pyexpat")
runtest("test.test_sax")
runtest("test.test_xml_etree")
runtest("test.test_xml_etree_c")
runtest("test.test_xmllib")
runtest("test.test_xmlrpc")
|
memcnn/experiment/tests/test_factory.py | classner/memcnn | 224 | 14006 | import pytest
import os
import memcnn.experiment.factory
from memcnn.config import Config
def test_get_attr_from_module():
a = memcnn.experiment.factory.get_attr_from_module('memcnn.experiment.factory.get_attr_from_module')
assert a is memcnn.experiment.factory.get_attr_from_module
def test_load_experiment_config():
cfg_fname = os.path.join(Config.get_dir(), 'experiments.json')
memcnn.experiment.factory.load_experiment_config(cfg_fname, ['cifar10', 'resnet110'])
@pytest.mark.skip(reason="Covered more efficiently by test_train.test_run_experiment")
def test_experiment_config_parser(tmp_path):
tmp_data_dir = tmp_path / "tmpdata"
cfg_fname = os.path.join(Config.get_dir(), 'experiments.json')
cfg = memcnn.experiment.factory.load_experiment_config(cfg_fname, ['cifar10', 'resnet110'])
memcnn.experiment.factory.experiment_config_parser(cfg, str(tmp_data_dir), workers=None)
def test_circular_dependency(tmp_path):
p = str(tmp_path / "circular.json")
content = u'{ "circ": { "base": "circ" } }'
with open(p, 'w') as fh:
fh.write(content)
with open(p, 'r') as fh:
assert fh.read() == content
with pytest.raises(RuntimeError):
memcnn.experiment.factory.load_experiment_config(p, ['circ'])
|
pyquil/api/__init__.py | stjordanis/pyquil | 677 | 14015 | <filename>pyquil/api/__init__.py
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
Sub-package for facilitating connections to the QVM / QPU.
"""
__all__ = [
"AbstractCompiler",
"BenchmarkConnection",
"EncryptedProgram",
"EngagementManager",
"get_qc",
"list_quantum_computers",
"local_forest_runtime",
"QAM",
"QAMExecutionResult",
"QCSClientConfiguration",
"QCSQuantumProcessor",
"QPU",
"QPUCompiler",
"QuantumComputer",
"QuantumExecutable",
"QVM",
"QVMCompiler",
"WavefunctionSimulator",
]
from qcs_api_client.client import QCSClientConfiguration
from pyquil.api._benchmark import BenchmarkConnection
from pyquil.api._compiler import QVMCompiler, QPUCompiler, QuantumExecutable, EncryptedProgram, AbstractCompiler
from pyquil.api._engagement_manager import EngagementManager
from pyquil.api._qam import QAM, QAMExecutionResult
from pyquil.api._qpu import QPU
from pyquil.api._quantum_computer import (
QuantumComputer,
list_quantum_computers,
get_qc,
local_forest_runtime,
)
from pyquil.api._qvm import QVM
from pyquil.api._wavefunction_simulator import WavefunctionSimulator
from pyquil.quantum_processor import QCSQuantumProcessor
|
notebook/datetime_fromisoformat.py | vhn0912/python-snippets | 174 | 14024 | <gh_stars>100-1000
import datetime
s = '2018-12-31'
d = datetime.date.fromisoformat(s)
print(d)
# 2018-12-31
print(type(d))
# <class 'datetime.date'>
# print(datetime.date.fromisoformat('2018-12'))
# ValueError: Invalid isoformat string: '2018-12'
print(datetime.date.fromisoformat('2018-01-01'))
# 2018-01-01
# print(datetime.date.fromisoformat('2018-1-1'))
# ValueError: Invalid isoformat string: '2018-1-1'
s = '05:00:30.001000'
t = datetime.time.fromisoformat(s)
print(t)
# 05:00:30.001000
print(type(t))
# <class 'datetime.time'>
print(datetime.time.fromisoformat('05'))
# 05:00:00
# print(datetime.time.fromisoformat('5:00:30'))
# ValueError: Invalid isoformat string: '5:00:30'
s = '2018-12-31T05:00:30.001000'
dt = datetime.datetime.fromisoformat(s)
print(dt)
# 2018-12-31 05:00:30.001000
print(type(dt))
# <class 'datetime.datetime'>
print(datetime.datetime.fromisoformat('2018-12-31x05:00:30.001000'))
# 2018-12-31 05:00:30.001000
# print(datetime.datetime.fromisoformat('2018-12-31xx05:00:30.001000'))
# ValueError: Invalid isoformat string: '2018-12-31xx05:00:30.001000'
print(datetime.datetime.fromisoformat('2018-12-31T05'))
# 2018-12-31 05:00:00
print(datetime.datetime.fromisoformat('2018-12-31'))
# 2018-12-31 00:00:00
# print(datetime.datetime.fromisoformat('2018-12-31T5:00'))
# ValueError: Invalid isoformat string: '2018-12-31T5:00'
s = '2018-12-31T05:00:30.001000'
# print(datetime.date.fromisoformat(s))
# ValueError: Invalid isoformat string: '2018-12-31T05:00:30.001000'
# print(datetime.time.fromisoformat(s))
# ValueError: Invalid isoformat string: '2018-12-31T05:00:30.001000'
d = datetime.datetime.fromisoformat(s).date()
print(d)
# 2018-12-31
print(type(d))
# <class 'datetime.date'>
t = datetime.datetime.fromisoformat(s).time()
print(t)
# 05:00:30.001000
print(type(t))
# <class 'datetime.time'>
s = '2018-12-31T05:00:30'
s_basic = s.replace('-', '').replace(':', '')
print(s_basic)
# 20181231T050030
s = '2018-12-31T05:00:30.001000'
s_basic = s.split('.')[0].replace('-', '').replace(':', '')
print(s_basic)
# 20181231T050030
s_ex = datetime.datetime.strptime(s_basic, '%Y%m%dT%H%M%S').isoformat()
print(s_ex)
# 2018-12-31T05:00:30
|
pandas/main.py | monishshah18/python-cp-cheatsheet | 140 | 14032 | <gh_stars>100-1000
"""
Summarize a column total cases column and total deaths column
Country by country data in columns, sum up and match global totals
"""
import csv
import pandas
pandas.set_option("display.max_rows", None, "display.max_columns", None)
col_list = ["Total Cases", "Country/ Other", "Total Deaths", "# 9/27/2020"]
df = pandas.read_csv("covidmilliondead.csv", usecols=col_list, thousands=',')
totalCases, totalDeaths = 0,0
for idx, cases,deaths in zip(df["# 9/27/2020"], df["Total Cases"], df["Total Deaths"]):
if idx > 0:
totalCases += cases
if deaths > 0:
totalDeaths += deaths
for idx, country, cases, deaths in zip(df["# 9/27/2020"], df["Country/ Other"], df["Total Cases"], df["Total Deaths"]):
if idx > 0:
print("\n",country)
print("Cases : ", cases, "/", totalCases, " %", "{:.5%}".format(cases/totalCases))
if deaths > 0:
print("Deaths : ", int(deaths), "/", totalDeaths, " %", "{:.5%}".format(deaths/totalDeaths))
print("")
print("Total Cases")
print(totalCases)
print("Total Deaths")
print(totalDeaths) |
Problem009/Python/solution_1.py | drocha87/ProjectEuler | 167 | 14035 | <reponame>drocha87/ProjectEuler
#!/usr/bin/env python
# coding=utf-8
# Python Script
#
# Copyleft © <NAME>
#
#
from __future__ import print_function
"""
Special Pythagorean triplet
Problem 9
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a² + b² = c²
For example, 3² + 4² = 9 + 16 = 25 = 52.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
def decompSum(n):
from itertools import combinations
m = (x for x in range(1, n // 2))
div = [3, 4, 5]
comb = combinations((x for x in m if any(d for d in div if not x % d)), 3)
for a, b, c in comb:
if a + b + c == n and a != b != c:
yield sorted((a, b, c))
def pythagorean(a, b, c):
return (a ** 2 + b ** 2) == c ** 2
def problem9(n):
for a, b, c in decompSum(n):
if pythagorean(a, b, c):
return a * b * c
print(problem9(1000))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.