max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
hackerrank/all-domains/data-structures/arrays/2d-array-ds/solution.py | arvinsim/coding-site-solutions | 0 | 12795251 | #!/bin/python
#https://www.hackerrank.com/challenges/2d-array
# KEY INSIGHTS
# 1. Variables in list comprehensians are not encapsulated. They could shadow
# local variables if you name them the same
# 2. When looping 2-dimensional arrays, the outer loop should represent the
# y-axis while the inner loop should represent the x-axis
import sys
ARRAY_DIMENSION = 6
HOURGLASS_WIDTH = 3
HOURGLASS_HEIGHT = 3
def get_2d_array_visualization(items):
visualization = ''
for rows in items:
visualization += ' '.join([str(row) for row in rows]) + '\n'
return visualization
def get_array_input():
# return [
# [1, 1, 1, 0, 0, 0],
# [0, 1, 0, 0, 0, 0],
# [1, 1, 1, 0, 0, 0],
# [0, 9, 2, -4, -4, 0],
# [0, 0, 0, -2, 0, 0],
# [0, 0, -1, -2, -4, 0],
# ]
# return [
# [-1, -1, 0, -9, -2, -2, ],
# [-2, -1, -6, -8, -2, -5],
# [-1, -1, -1, -2, -3, -4],
# [-1, -9, -2, -4, -4, -5],
# [-7, -3, -3, -2, -9, -9],
# [-1, -3, -1, -2, -4, -5],
# ]
arr = []
for arr_i in xrange(ARRAY_DIMENSION):
arr_temp = map(int,raw_input().strip().split(' '))
arr.append(arr_temp)
return arr
def is_part_of_hourglass(x, y):
if (x == 1 and y == 0) or (x == 1 and y == 2):
return False
else:
return True
def get_hourglass_sum(hourglass):
s = 0
for row in hourglass:
for item in row:
s += item
return s
def get_hourglass(arr, x, y):
hourglass = [[0 for foo in xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)]
for b in xrange(HOURGLASS_HEIGHT):
for a in xrange(HOURGLASS_WIDTH):
if is_part_of_hourglass(a, b):
current_x = a + x
current_y = b + y
hourglass[a][b] = arr[current_x][current_y]
return hourglass
arr = get_array_input()
greatest_sum = None
for x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1):
for y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1):
hg = get_hourglass(arr, x, y)
shg = get_hourglass_sum(hg)
# print(get_2d_array_visualization(hg))
# print(shg)
# print('==========')
if greatest_sum is None:
greatest_sum = shg
if shg > greatest_sum:
greatest_sum = shg
print(greatest_sum)
| 4.125 | 4 |
psosc.py | AlgoLab/pso-cancer-evolution | 1 | 12795252 | <filename>psosc.py
"""
Particle Swarm Optimization Single Cell inference
Usage:
psosc.py (-i infile) (-c cores) (-k k) (-a alpha) (-b beta)
[-p particles] [-g gamma] [-t iterations] [-d max_deletions] [-e mutfile] [-T tolerance] [-m maxtime] [-I truematrix] [--quiet] [--output output]
psosc.py --help
psosc.py --version
Options:
-i infile Matrix input file
-c cores Number of CPU cores
-k k K value of Dollo(k) model used as phylogeny tree
-a alpha False negative rate in input file or path of the file containing different FN rates for each mutations
-b beta False positive rate
-p particles Number of particles (single or multiple values, separated by commas, for a multiple run); by default it is calculated proportionally to the size of the matrix
-g gamma Loss rate in input file or path of the file containing different GAMMA rates for each mutations [default: 1]
-t iterations Number of iterations (-m argument will be ignored; not used by default)
-d max_deletions Maximum number of total deletions allowed [default: +inf]
-e mutfile Path of the mutation names. If not used, mutations will be named progressively from 1 to mutations (not used by default)
-T tolerance Tolerance, minimum relative improvement (between 0 and 1) in the last iterations in order to keep going, if iterations are not used [default: 0.005]
-m maxtime Maximum time (in seconds) of total PSOSC execution (not used by default)
-I truematrix Actual correct matrix, for algorithm testing (not used by default)
--quiet Doesn't print anything (not used by default)
--output output Limit the output (files created) to: (image | plots | text_file | all) [default: all]
"""
from Helper import Helper
from Particle import Particle
from Tree import Tree
from Data import Data
import os
import sys
import time
from docopt import docopt
from datetime import datetime
import multiprocessing
import threading
import psutil
def main(argv):
arguments = docopt(__doc__, version = "PSOSC-Cancer-Evolution 2.0")
base_dir = "results" + datetime.now().strftime("%Y%m%d%H%M%S")
helper = Helper(arguments)
if helper.multiple_runs:
runs_data = []
for r, n_particles in enumerate(helper.n_particles):
print ("\n\n======= Run number %d =======" % (r+1))
run_dir = base_dir + "/particles%d_run%d" % (n_particles, (r+1))
if not os.path.exists(base_dir):
os.makedirs(base_dir)
data = pso(helper, n_particles)
data.summary(helper, run_dir)
runs_data.append(data)
Data.runs_summary(helper.n_particles, runs_data, base_dir)
else:
data = pso(helper)
data.summary(helper, base_dir)
def pso(helper, n_particles=None):
# assigning process to cores
selected_cores = get_least_used_cores(helper.cores)
assign_to_cores(os.getpid(), selected_cores)
if n_particles == None:
n_particles = helper.n_particles
if not helper.quiet:
print("\n • %d PARTICLES START-UP" % (n_particles))
Tree.set_probabilities(helper.alpha, helper.beta)
data = Data(helper.filename, n_particles, helper.output)
data.pso_start = time.time()
# creating shared memory between processes
manager = multiprocessing.Manager()
assign_to_cores(manager._process.ident, selected_cores)
lock = manager.Lock()
ns = manager.Namespace()
# selecting particles to assign to processes
assigned_numbers = [[] for i in range(helper.cores)]
for i in range(n_particles):
assigned_numbers[i%(helper.cores)].append(i)
# coping data into shared memory
ns.best_swarm = None
ns.swarm_best_likelihoods = []
ns.particle_best_likelihoods = [[] for x in range(helper.n_particles)]
ns.iterations_performed = data.iterations_performed
ns.stop = False
ns.operations = [2,3]
ns.attach = True
if not helper.quiet:
print("\n • PSO RUNNING...")
print("\t Time\t\t Best likelihood so far")
# creating and starting processes
processes = []
for i in range(helper.cores):
processes.append(multiprocessing.Process(target = start_threads, args = (selected_cores, assigned_numbers[i], data, helper, ns, lock)))
for p in processes:
p.start()
for p in processes:
p.join()
# copying back data from shared memory
data.swarm_best_likelihoods = ns.swarm_best_likelihoods
data.particle_best_likelihoods = ns.particle_best_likelihoods
data.iterations_performed = ns.iterations_performed
data.best = ns.best_swarm.copy()
data.pso_end = time.time()
if not helper.quiet:
print("\n • FINAL RESULTS")
print("\t- time to complete pso with %d particles: %s seconds" % (data.n_particles, str(round(data.get_total_time(), 2))))
print("\t- best likelihood: %s\n" % str(round(data.best.likelihood, 2)))
return data
def start_threads(selected_cores, assigned_numbers, data, helper, ns, lock):
assign_to_cores(os.getpid(), selected_cores)
particles = []
for i in assigned_numbers:
p = Particle(helper.cells, helper.mutation_number, helper.mutation_names, i)
p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number)
if ns.best_swarm is None:
ns.best_swarm = p.current_tree.copy()
p.thread = threading.Thread(target = p.run_iterations, args = (helper, ns, lock))
particles.append(p)
for p in particles:
p.thread.start()
for p in particles:
p.thread.join()
def get_least_used_cores(n_cores):
cpu_usage = psutil.cpu_percent(percpu=True)
cores = []
for i in range(n_cores):
c = cpu_usage.index(min(cpu_usage))
cores.append(c)
cpu_usage[c] = float("+inf")
return cores
def assign_to_cores(pid, cores):
proc = psutil.Process(pid)
proc.cpu_affinity(cores)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.296875 | 2 |
drf_nested/mixins/base_nestable_mixin.py | promoteinternational/drf-nested | 1 | 12795253 | <reponame>promoteinternational/drf-nested
from rest_framework import serializers
class BaseNestableMixin(serializers.ModelSerializer):
def _get_model_pk(self):
if isinstance(self, serializers.ListSerializer):
model = self.child.Meta.model
else:
model = self.Meta.model
return model._meta.pk.attname
def _set_instance_from_queryset(self, validated_data, queryset):
pk = self._get_model_pk()
self.instance = None
if validated_data and isinstance(validated_data, dict) and pk in validated_data:
try:
instance = queryset.get(pk=validated_data.get(pk))
self.instance = instance
except queryset.model.DoesNotExist:
pass
def _set_instance_from_existing(self, validated_data, instance):
pk = self._get_model_pk()
if validated_data and isinstance(validated_data, dict) and pk in validated_data:
validated_data_pk = validated_data.get(pk)
if self.instance is not None:
model_class = instance.__class__
if self.instance.pk != validated_data_pk:
try:
self.instance = model_class.objects.get(pk=validated_data_pk)
except model_class.model.DoesNotExist:
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class Meta:
model = None
| 2.015625 | 2 |
eval.py | EkdeepSLubana/OrthoReg | 9 | 12795254 | <reponame>EkdeepSLubana/OrthoReg
# -*- coding: utf-8 -*-
import torch
import torchvision
from torchvision import datasets, models, transforms
import numpy as np
import torch.optim as optim
import os
import shutil
from models import *
from pruner import *
from config import *
from ptflops import get_model_complexity_info
import argparse
######### Parser #########
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", help="architecture model to be analyzed", default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56'])
parser.add_argument("--model_path", help="path where the model to be analyzed is stored", default='0')
parser.add_argument("--data_path", help="path to dataset", default='CIFAR100')
parser.add_argument("--pruned", help="is the model to be analyzed a pruned model?", default='False', choices=['True', 'False'])
parser.add_argument("--train_acc", help="evaluate train accuracy", default='False', choices=['True', 'False'])
parser.add_argument("--test_acc", help="evaluate test accuracy", default='False', choices=['True', 'False'])
parser.add_argument("--flops", help="calculate flops in a model", default='False', choices=['True', 'False'])
parser.add_argument("--compression", help="calculate compression ratio for model", default='False', choices=['True', 'False'])
parser.add_argument("--eval_ortho", help="evaluate how orthogonal a model is", default='False', choices=['True', 'False'])
parser.add_argument("--finetune", help="fine-tune a model", default='False', choices=['True', 'False'])
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
criterion = nn.CrossEntropyLoss()
######### Functions to evaluate different properties #########
# Accuracy
def cal_acc(net, use_loader):
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(use_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
return correct / total
# FLOPs
def cal_flops(net):
with torch.cuda.device(0):
flops, params = get_model_complexity_info(net, (3, 32, 32), as_strings=False, print_per_layer_stat=False)
print(' FLOPs: {:<8}'.format(flops))
# Compression Ratio
def cal_compression_ratio(net_path, model):
temp_path = "./temp_models/"
base_model = create_model(name=model, is_pruned=False)
if os.path.exists(temp_path):
shutil.rmtree(temp_path)
os.mkdir(temp_path)
state = {'net': base_model.state_dict()}
torch.save(state, temp_path+'temp_base.pth')
base_size = os.path.getsize(temp_path+'temp_base.pth')
model_size = os.path.getsize(net_path)
print(" Compression ratio: {:.3}".format(base_size / model_size))
shutil.rmtree(temp_path)
# Fine-tune
def finetune(net):
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Orthogonality evaluator
def eval_ortho():
if(args.model == 'vgg'):
w_mat = net.module.features[0].weight
w_mat1 = (w_mat.reshape(w_mat.shape[0],-1))
b_mat = net.module.features[0].bias
b_mat1 = (b_mat.reshape(b_mat.shape[0],-1))
params = torch.cat((w_mat1, b_mat1), dim=1)
angle_mat = torch.matmul(torch.t(params), params)
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(" Conv_{ind}: {num:.2}".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item()))
for conv_ind in [3, 7, 10, 14, 17, 21, 24, 28, 31]:
w_mat = net.module.features[conv_ind].weight
w_mat1 = (w_mat.reshape(w_mat.shape[0],-1))
b_mat = net.module.features[conv_ind].bias
b_mat1 = (b_mat.reshape(b_mat.shape[0],-1))
params = torch.cat((w_mat1, b_mat1), dim=1)
angle_mat = torch.matmul(params, torch.t(params))
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(" Conv_{ind}: {num:.2}".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item()))
elif(args.model == 'mobilenet'):
w_mat = net.module.conv1.weight
params = (w_mat.reshape(w_mat.shape[0],-1))
angle_mat = torch.matmul(torch.t(params), params)
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(" Conv_base: {num:.2}".format(num=(L_diag.cpu()/L_angle.cpu()).item()))
for lnum in range(13):
w_mat = net.module.layers[lnum].conv1.weight
params = (w_mat.reshape(w_mat.shape[0],-1))
angle_mat = torch.matmul(params.t(), params)
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(" Conv_{ind} -depthwise: {num:.2}".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item()))
w_mat = net.module.layers[lnum].conv2.weight
params = (w_mat.reshape(w_mat.shape[0],-1))
angle_mat = torch.matmul(params.t(), params)
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(" Conv_{ind} -pointwise: {num:.2}".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item()))
elif(args.model == 'resnet'):
num_blocks = [3,4,6,3]
w_mat = net.module.conv1.weight
params = (w_mat.reshape(w_mat.shape[0],-1))
angle_mat = torch.matmul(torch.t(params), params)
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(" base layer:", (L_diag.cpu()/L_angle.cpu()).item())
mod_id = 0
for module_id in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]:
for b_id in range(num_blocks[mod_id]):
w_mat = module_id[b_id].conv1.weight
params = (w_mat.reshape(w_mat.shape[0],-1))
if(params.shape[1] < params.shape[0]):
params = params.t()
angle_mat = torch.matmul(params, torch.t(params))
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item())
w_mat = module_id[b_id].conv2.weight
params = (w_mat.reshape(w_mat.shape[0],-1))
if(params.shape[1] < params.shape[0]):
params = params.t()
angle_mat = torch.matmul(params, torch.t(params))
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item())
try:
w_mat = module_id[b_id].shortcut[0].weight
params = (w_mat.reshape(w_mat.shape[0],-1))
if(params.shape[1] < params.shape[0]):
params = params.t()
angle_mat = torch.matmul(params, torch.t(params))
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(' layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item())
except:
pass
mod_id += 1
elif(args.model == 'resnet-56'):
num_blocks = [9,9,9]
w_mat = net.module.conv1.weight
params = (w_mat.reshape(w_mat.shape[0],-1))
angle_mat = torch.matmul(torch.t(params), params)
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(" base layer:", (L_diag.cpu()/L_angle.cpu()).item())
mod_id = 0
for module_id in [net.module.layer1, net.module.layer2, net.module.layer3]:
for b_id in range(num_blocks[mod_id]):
w_mat = module_id[b_id].conv1.weight
params = (w_mat.reshape(w_mat.shape[0],-1))
if(params.shape[1] < params.shape[0]):
params = params.t()
angle_mat = torch.matmul(params, torch.t(params))
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item())
w_mat = module_id[b_id].conv2.weight
params = (w_mat.reshape(w_mat.shape[0],-1))
if(params.shape[1] < params.shape[0]):
params = params.t()
angle_mat = torch.matmul(params, torch.t(params))
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item())
try:
w_mat = module_id[b_id].shortcut[0].weight
params = (w_mat.reshape(w_mat.shape[0],-1))
if(params.shape[1] < params.shape[0]):
params = params.t()
angle_mat = torch.matmul(params, torch.t(params))
L_diag = (angle_mat.diag().norm(1))
L_angle = (angle_mat.norm(1))
print(' layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item())
except:
pass
mod_id += 1
# Create model for evaluation#net = torch.nn.DataParallel(VGG())
def create_model(name, is_pruned):
if(name == 'vgg'):
if(is_pruned == True):
cfg_p = net_dict['cfg']
net = torch.nn.DataParallel(VGG_p(cfg_p))
else:
net = torch.nn.DataParallel(VGG())
elif(name == 'mobilenet'):
if(is_pruned == True):
cfg_p = net_dict['cfg']
net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:]))
else:
net = torch.nn.DataParallel(MobileNet())
elif(name == 'resnet'):
if(is_pruned == True):
cfg_p = net_dict['cfg']
net = torch.nn.DataParallel(ResPruned(cfg_p))
else:
net = torch.nn.DataParallel(ResNet34())
elif(name == 'resnet-56'):
if(is_pruned == True):
cfg_p = net_dict['cfg']
net = torch.nn.DataParallel(ResPruned_cifar(cfg_p))
else:
net = torch.nn.DataParallel(ResNet56())
return net
######### Print model name #########
print((args.model).upper())
######### Dataloader #########
if(args.train_acc == 'True' or args.test_acc == 'True' or args.finetune == 'True'):
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform_test = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
if(args.data_path=='CIFAR100'):
trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)
else:
trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)
# Testing
def test(net):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
acc = 100. * correct/total
if acc > best_acc:
print("best accuracy:", acc)
best_acc = acc
######### Load network or create new #########
if(args.train_acc == 'True' or args.test_acc == 'True' or args.flops=='True' or args.eval_ortho=='True'):
net_dict = torch.load(args.model_path)
net = create_model(name=args.model, is_pruned=(args.pruned=='True'))
net.load_state_dict(net_dict['net'])
######### FLOPs evaluation #########
if(args.flops == 'True'):
cal_flops(net)
######### Compression ratio evaluation #########
if(args.compression == 'True'):
cal_compression_ratio(net_path=args.model_path, model=args.model)
######### Train accuracy evaluation #########
if(args.train_acc == 'True'):
acc = cal_acc(net, use_loader=trainloader)
print(" Train accuracy: {:.2%}".format(acc))
######### Test accuracy evaluation #########
if(args.test_acc == 'True'):
acc = cal_acc(net, use_loader=testloader)
print(" Test accuracy: {:.2%}".format(acc))
######### Orthogonality evaluation #########
if(args.eval_ortho == 'True'):
eval_ortho()
if(args.finetune == 'True'):
net_dict = torch.load(args.model_path)
net = create_model(name=args.model, is_pruned=(args.pruned=='True'))
net.load_state_dict(net_dict['net'])
base_sched, base_epochs, wd = pruned_sched_iter, pruned_epochs_iter, wd_iter
best_acc = 0
lr_ind = 0
epoch = 0
optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd)
while(lr_ind < len(base_sched)):
optimizer.param_groups[0]['lr'] = base_sched[lr_ind]
for n in range(base_epochs[lr_ind]):
print('\nEpoch: {}'.format(epoch))
finetune(net)
test(net)
epoch += 1
lr_ind += 1
| 2.25 | 2 |
ansible_lib/firebrew.py | mrk21/ansible-lib | 3 | 12795255 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import generators
from __future__ import division
import pipes
from ansible.module_utils.basic import *
class Firebrew(object):
STATUS_SUCCESS = 0
STATUS_FAILURE = 1
STATUS_NOT_CHANGED = 2
def __init__(self, AnsibleModule = AnsibleModule):
self.module = AnsibleModule(
argument_spec = dict(
state = dict(type='str', default='present', choices=['present', 'absent']),
name = dict(type='str', required=True),
base_dir = dict(type='str'),
profile = dict(type='str'),
firefox = dict(type='str')
)
)
def build_command(self):
params = self.module.params
command = [
self.module.get_bin_path('firebrew'),
{'present': 'install', 'absent': 'uninstall'}[params['state']],
pipes.quote(params['name'])
]
for opt in ['base_dir','profile','firefox']:
if opt in params and params[opt] != None and params[opt].strip() != '':
command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt])))
return ' '.join(command)
def execute(self):
(rc,out,err) = self.module.run_command(self.build_command())
if rc == self.STATUS_SUCCESS:
self.module.exit_json(changed=True)
elif rc == self.STATUS_NOT_CHANGED:
self.module.exit_json(changed=False)
else:
self.module.fail_json(msg = err)
if __name__ == '__main__':
Firebrew().execute()
| 2.0625 | 2 |
main/consecutive-array-elements/consecutive-array-elements.py | EliahKagan/old-practice-snapshot | 0 | 12795256 | <filename>main/consecutive-array-elements/consecutive-array-elements.py
#!/usr/bin/env python3
def read_record():
return list(map(int, input().split()))
def is_consecutive(a):
return max(a) - min(a) + 1 == len(a) == len(frozenset(a))
for _ in range(int(input())):
input() # don't need n
print('Yes' if is_consecutive(read_record()) else 'No')
| 4 | 4 |
examples/util/lookups.py | OptionMetrics/petl | 495 | 12795257 | from __future__ import division, print_function, absolute_import
# lookup()
##########
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
lkp = etl.lookup(table1, 'foo', 'bar')
lkp['a']
lkp['b']
# if no valuespec argument is given, defaults to the whole
# row (as a tuple)
lkp = etl.lookup(table1, 'foo')
lkp['a']
lkp['b']
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.lookup(table2, ('foo', 'bar'), 'baz')
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.lookup(table1, 'foo', 'bar', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
# lookupone()
#############
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
# if the specified key is not unique and strict=False (default),
# the first value wins
lkp = etl.lookupone(table1, 'foo', 'bar')
lkp['a']
lkp['b']
# if the specified key is not unique and strict=True, will raise
# DuplicateKeyError
try:
lkp = etl.lookupone(table1, 'foo', strict=True)
except etl.errors.DuplicateKeyError as e:
print(e)
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz')
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.lookupone(table1, 'foo', 'bar', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
# dictlookup()
##############
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
lkp = etl.dictlookup(table1, 'foo')
lkp['a']
lkp['b']
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.dictlookup(table2, ('foo', 'bar'))
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.dictlookup(table1, 'foo', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
# dictlookupone()
#################
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
# if the specified key is not unique and strict=False (default),
# the first value wins
lkp = etl.dictlookupone(table1, 'foo')
lkp['a']
lkp['b']
# if the specified key is not unique and strict=True, will raise
# DuplicateKeyError
try:
lkp = etl.dictlookupone(table1, 'foo', strict=True)
except etl.errors.DuplicateKeyError as e:
print(e)
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.dictlookupone(table2, ('foo', 'bar'))
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.dictlookupone(table1, 'foo', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
| 2.46875 | 2 |
OptWind/WindFarm/flow_field.py | ArjunRameshV/OptWind | 1 | 12795258 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import numpy as np
class FlowField(object):
def __init__(self, wf_design, getAkf, height_ref, alpha=0.04,
ws_binned=np.linspace(1, 30, 30),
wd_binned=np.linspace(0, 330, 12),
z0=0.01
):
self.complete_layout = wf_design.complete_layout
self.getAkf = getAkf
self.height_ref = height_ref
self.alpha = alpha
self.ws_binned = ws_binned
self.wd_binned = wd_binned
self.z0=z0
self.wt_types = wf_design.wt_types
self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2],
height_ref)
self.num_wt = wf_design.num_wt
self.num_ws_bin = len(ws_binned)
self.num_wd_bin = len(wd_binned)
self.ws_bin_size = ws_binned[1] - ws_binned[0]
self.wd_bin_size = wd_binned[1] - wd_binned[0]
self.R_list = wf_design.D_list/2
self.Ar_list = wf_design.Ar_list
self.type_list = np.array([int(t) for t in self.complete_layout[:, 3]],
dtype='int')
self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))
self.v_ikl_ideal = np.zeros(
(self.num_wt, self.num_ws_bin, self.num_wd_bin))
self.v_ikl_real = np.zeros(
(self.num_wt, self.num_ws_bin, self.num_wd_bin))
self.pdf_ikl = np.zeros(
(self.num_wt, self.num_ws_bin, self.num_wd_bin))
def wind_shear_log(self, H, H_ref):
return np.log(H/self.z0)/np.log(H_ref/self.z0)
def change_layout(self, complete_layout_new):
""" Assume only locations of turbines changed, and number, hub-height
and types of turbines remained the same."""
self.complete_layout = complete_layout_new
def cal_flow_field(self):
######################################################################
# 1. calculate ideal wind speed
v_ik = np.expand_dims(
np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1),
np.expand_dims(self.ws_binned, axis=0)), axis=-1)
self.v_ikl_ideal = np.concatenate([v_ik
for l_wd in range(self.num_wd_bin)],
axis=-1)
######################################################################
# 2. calculate pdf of local ideal wind speed
x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1)
for l_wd in range(self.num_wd_bin)], axis=-1)
y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1)
for l_wd in range(self.num_wd_bin)], axis=-1)
wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0)
for l_wt in range(self.num_wt)], axis=0)
A_il, k_il, f_il = self.getAkf(x_il, y_il, wd_il)
for k_ws in range(self.num_ws_bin):
self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull(
self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il
dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))
dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))
R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))
A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))
#######################################################################
# 3. calculate real wind speed
# calculate M_ijl matrix
for l_wd in range(self.num_wd_bin):
rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0
cos_rotate = np.cos(rotate_angle)
sin_rotate = np.sin(rotate_angle)
x_rotated = (self.complete_layout[:, 0]*cos_rotate +
self.complete_layout[:, 1]*sin_rotate)
y_rotated = (self.complete_layout[:, 1]*cos_rotate -
self.complete_layout[:, 0]*sin_rotate)
downwind_order = np.argsort(x_rotated)
for i_up in range(self.num_wt-1):
index_up = downwind_order[i_up]
index_down = downwind_order[i_up+1:]
dist_down[index_down, index_up, l_wd] = (
x_rotated[index_down] - x_rotated[index_up])
dist_cross[index_down, index_up, l_wd] = np.sqrt(
(y_rotated[index_down] - y_rotated[index_up])**2 +
(self.complete_layout[index_down, 2] -
self.complete_layout[index_up, 2])**2)
R_wake[index_down, index_up, l_wd] = (
self.alpha*dist_down[index_down, index_up, l_wd] +
self.R_list[index_up])
R = np.concatenate([
np.expand_dims(self.R_list[i_wt]*np.ones(
(self.num_wt, self.num_wd_bin)), axis=1)
for i_wt in range(self.num_wt)], axis=1)
R1 = np.concatenate([
np.expand_dims(self.R_list[i_wt]*np.ones(
(self.num_wt, self.num_wd_bin)), axis=0)
for i_wt in range(self.num_wt)], axis=0)
A_ol = self.cal_overlapping_area(R_wake,
R,
dist_cross)
self.M_ijl = np.where(dist_down>0,
(A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4,
0)
# calculate N_jk matrix
v_jk = self.v_ikl_ideal[:, :, 0]
N_jk = np.zeros_like(v_jk)
for m_type in set(self.type_list):
index_cal = self.type_list == m_type
N_jk[index_cal, :] = self.wt_types[m_type].get_Ct(
v_jk[index_cal, :])
N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2
self.v_ikl_real = self.v_ikl_ideal - np.sqrt(
np.einsum('jk, ijl->ikl', N_jk, self.M_ijl))
def cal_flow_field_naive(self):
######################################################################
# 1. calculate ideal wind speed
v_ik = np.expand_dims(
np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1),
np.expand_dims(self.ws_binned, axis=0)), axis=-1)
self.v_ikl_ideal = np.concatenate([v_ik
for l_wd in range(self.num_wd_bin)],
axis=-1)
######################################################################
# 2. calculate pdf of local ideal wind speed
x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1)
for l_wd in range(self.num_wd_bin)], axis=-1)
y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1)
for l_wd in range(self.num_wd_bin)], axis=-1)
wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0)
for l_wt in range(self.num_wt)], axis=0)
A_il, k_il, f_il = self.getAkf(x_il, y_il, wd_il)
for k_ws in range(self.num_ws_bin):
self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull(
self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il
#######################################################################
# 3. calculate real wind speed
# calculate M_ijl matrix
for l_wd in range(self.num_wd_bin):
rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0
cos_rotate = np.cos(rotate_angle)
sin_rotate = np.sin(rotate_angle)
x_rotated = (self.complete_layout[:, 0]*cos_rotate +
self.complete_layout[:, 1]*sin_rotate)
y_rotated = (self.complete_layout[:, 1]*cos_rotate -
self.complete_layout[:, 0]*sin_rotate)
downwind_order = np.argsort(x_rotated)
for i_up in range(self.num_wt-1):
index_up = downwind_order[i_up]
index_down = downwind_order[i_up+1:]
dist_down = x_rotated[index_down] - x_rotated[index_up]
dist_cross = np.sqrt(
(y_rotated[index_down] - y_rotated[index_up])**2 +
(self.complete_layout[index_down, 2] -
self.complete_layout[index_up, 2])**2)
R_wake = self.alpha*dist_down + self.R_list[index_up]
A_ol = self.cal_overlapping_area(R_wake,
self.R_list[index_down],
dist_cross)
self.M_ijl[index_down, index_up, l_wd] = (
(A_ol/self.Ar_list[index_down])**2 /
(1 + self.alpha*dist_down/self.R_list[index_up])**4)
# calculate N_jk matrix
v_jk = self.v_ikl_ideal[:, :, 0]
N_jk = np.zeros_like(v_jk)
for m_type in set(self.type_list):
index_cal = self.type_list == m_type
N_jk[index_cal, :] = self.wt_types[m_type].get_Ct(
v_jk[index_cal, :])
N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2
self.v_ikl_real = self.v_ikl_ideal - np.sqrt(
np.einsum('jk, ijl->ikl', N_jk, self.M_ijl))
def cal_overlapping_area(self, R1, R2, d):
""" Calculate the overlapping area of two circles with radius R1 and
R2, centers distanced d.
The calculation formula can be found in Eq. (A1) of :
[Ref] <NAME>, <NAME>, Solving the wind farm layout optimization
problem using Random search algorithm, Reneable Energy 78 (2015)
182-192
Note that however there are typos in Equation (A1), '2' before alpha
and beta should be 1.
Parameters
----------
R1: array:float
Radius of the first circle [m]
R2: array:float
Radius of the second circle [m]
d: array:float
Distance between two centers [m]
Returns
-------
A_ol: array:float
Overlapping area [m^2]
"""
# treat all input as array
R1, R2, d = np.array(R1), np.array(R2), np.array(d),
A_ol = np.zeros_like(R1)
p = (R1 + R2 + d)/2.0
# make sure R_big >= R_small
Rmax = np.where(R1 < R2, R2, R1)
Rmin = np.where(R1 < R2, R1, R2)
# full wake cases
index_fullwake = (d<= (Rmax -Rmin))
A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2
# partial wake cases
index_partialwake = np.logical_and(d > (Rmax -Rmin),
d < (Rmin + Rmax))
alpha = np.arccos(
(Rmax[index_partialwake]**2.0 + d[index_partialwake]**2
- Rmin[index_partialwake]**2)
/(2.0*Rmax[index_partialwake]*d[index_partialwake]) )
beta = np.arccos(
(Rmin[index_partialwake]**2.0 + d[index_partialwake]**2
- Rmax[index_partialwake]**2)
/(2.0*Rmin[index_partialwake]*d[index_partialwake]) )
A_triangle = np.sqrt( p[index_partialwake]*
(p[index_partialwake]-Rmin[index_partialwake])*
(p[index_partialwake]-Rmax[index_partialwake])*
(p[index_partialwake]-d[index_partialwake]) )
A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2
+ beta*Rmin[index_partialwake]**2
- 2.0*A_triangle )
return A_ol
def cal_pdf_Weibull(self, v, A, k):
return ((k / A) * (v / A) ** (k - 1) * np.exp(-(v / A) ** k))
| 2.265625 | 2 |
lambdata/fibo.py | mudesir/lambdata-mudesir | 0 | 12795259 | """ Fibonancci series up to n"""
def fib(n): # write Fibonacci series up to n
a, b = 0, 1
while a < n:
print(a, end=' ')
a, b = b, a + b
print()
def fib2(n): # return Fibonacci series up to n
result = []
a, b = 0, 1
while a < n:
result.append(a)
a, b = b, a + b
return result
def num(n1, n2):
n3=random.randint(n1, n2)
return n3 | 4.03125 | 4 |
DataTypes.py | jingr1/SelfDrivingCar | 0 | 12795260 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-10-14 19:45:05
# @Author : jingray (<EMAIL>)
# @Link : http://www.jianshu.com/u/01fb0364467d
# @Version : $Id$
import os
# STRINGS
print("STRINGS")
my_string_1 = "hello"
my_string_2 = 'world'
my_multiline_string = """
Dear World,
Hello. I am a multiline python string.
I'm enclosed in triple quotes. I'd write
them here, but that would end the string!
I know! I'll use a slash as an escape character.
Triple quotes look like this: \"\"\"
Sincerely,
Python
"""
newline_character = "\n"
print(my_string_1, my_string_2)
print(my_multiline_string)
print(newline_character)
print("-----------")
print(newline_character)
# NUMBERS AND BOOLEANS
print("NUMBERS")
my_float = 0.5
my_integer = 7
my_negative = -3.5
my_fraction = 1/2
# what do you think THIS line of code will assign to the variable
# does_half_equal_point_five?
does_half_equal_point_five = (my_fraction == my_float)
print("The absolute value of", my_negative, "is", abs(my_negative))
print(my_integer, "squared is equal to", my_integer ** 2)
print("Does", my_fraction, "equal", my_float, "?", does_half_equal_point_five)
for left_num in range(10):
for right_num in range(10):
product = left_num * right_num
print(left_num, "x", right_num, "=", product)
print ("\n")
#List
my_list = [1, 2, 3, "a", "b", "c"]
print("my_list is:", my_list)
print("Enumerating a list...")
for i, item in enumerate(my_list):
print("item number", i, "is", item)
print("Another way to enumerate using a list 'method'...")
for item in my_list:
index = my_list.index(item)
print("item", item, "has index", index)
#List Comprehensions
numbers_0_to_9 = [x for x in range(10)]
print("Numbers 0 to 9", numbers_0_to_9)
squares = [x * x for x in range(10)]
print("Squares ", squares)
odds = [x for x in range(10) if x % 2 == 1]
print("Odds ", odds)
# This example uses a data type called a namedtuple which is similar to a struct data type in other languages.
from collections import namedtuple
Person = namedtuple("Person", ["name", "age", "gender"])
people = [
Person("Andy", 30, "m"),
Person("Ping", 1, "m"),
Person("Tina", 32, "f"),
Person("Abby", 14, "f"),
Person("Adah", 13, "f"),
Person("Sebastian", 42, "m"),
Person("Carol" , 68, "f"),
]
# first, let's show how this namedtuple works.
andy = people[0]
print("name: ", andy.name)
print("age: ", andy.age)
print("gender:", andy.gender)
# now let's show what we can do with a list comprehension
#
male_names = [person.name for person in people if person.gender=="m"]
print("Male names:", male_names)
teen_names = [p.name for p in people if 13 <= p.age <= 18 ]
print("Teen names:", teen_names)
# random
import random as rd
a = rd.random()
b = rd.random()
c = rd.random()
print("a is", a)
print("b is", b)
print("c is", c)
| 4.40625 | 4 |
pitch_table.py | andrewtron3000/jampy | 0 | 12795261 | pitch_table = {
"C-1" : 8.176,
"C#-1": 8.662,
"D-1" : 9.177,
"D#-1": 9.723,
"E-1" : 10.301,
"F-1" : 10.913,
"F#-1": 11.562,
"G-1" : 12.250,
"G#-1": 12.978,
"A-1" : 13.750,
"A#-1": 14.568,
"B-1" : 15.434,
"C0" : 16.352,
"C#0" : 17.324,
"D0" : 18.354,
"D#0" : 19.445,
"E0" : 20.602,
"F0" : 21.827,
"F#0" : 23.125,
"G0" : 24.500,
"G#0" : 25.957,
"A0" : 27.500,
"A#0" : 29.135,
"B0" : 30.868,
"C1" : 32.703,
"C#1" : 34.648,
"D1" : 36.708,
"D#1" : 38.891,
"E1" : 41.203,
"F1" : 43.654,
"F#1" : 46.249,
"G1" : 48.999,
"G#1" : 51.913,
"A1" : 55.000,
"A#1" : 58.270,
"B1" : 61.735,
"C2" : 65.406,
"C#2" : 69.296,
"D2" : 73.416,
"D#2" : 77.782,
"E2" : 82.407,
"F2" : 87.307,
"F#2" : 92.499,
"G2" : 97.999,
"G#2" : 103.826,
"A2" : 110.000,
"A#2" : 116.541,
"B2" : 123.471,
"C3" : 130.813,
"C#3" : 138.591,
"D3" : 146.832,
"D#3" : 155.563,
"E3" : 164.814,
"F3" : 174.614,
"F#3" : 184.997,
"G3" : 195.998,
"G#3" : 207.652,
"A3" : 220.000,
"A#3" : 233.082,
"B3" : 246.942,
"C4" : 261.626,
"C#4" : 277.183,
"D4" : 293.665,
"D#4" : 311.127,
"E4" : 329.628,
"F4" : 349.228,
"F#4" : 369.994,
"G4" : 391.995,
"G#4" : 415.305,
"A4" : 440.000,
"A#4" : 466.164,
"B4" : 493.883,
"C5" : 523.251,
"C#5" : 554.365,
"D5" : 587.330,
"D#5" : 622.254,
"E5" : 659.255,
"F5" : 698.456,
"F#5" : 739.989,
"G5" : 783.991,
"G#5" : 830.609,
"A5" : 880.000,
"A#5" : 932.328,
"B5" : 987.767,
"C6" : 1046.502,
"C#6" : 1108.731,
"D6" : 1174.659,
"D#6" : 1244.508,
"E6" : 1318.510,
"F6" : 1396.913,
"F#6" : 1479.978,
"G6" : 1567.982,
"G#6" : 1661.219,
"A6" : 1760.000,
"A#6" : 1864.655,
"B6" : 1975.533,
"C7" : 2093.005,
"C#7" : 2217.461,
"D7" : 2349.318,
"D#7" : 2489.016,
"E7" : 2637.020,
"F7" : 2793.826,
"F#7" : 2959.955,
"G7" : 3135.963,
"G#7" : 3322.438,
"A7" : 3520.000,
"A#7" : 3729.310,
"B7" : 3951.066,
"C8" : 4186.009,
"C#8" : 4434.922,
"D8" : 4698.636,
"D#8" : 4978.032,
"E8" : 5274.041,
"F8" : 5587.652,
"F#8" : 5919.911,
"G8" : 6271.927,
"G#8" : 6644.875,
"A8" : 7040.000,
"A#8" : 7458.620,
"B8" : 7902.133,
"C9" : 8372.018,
"C#9" : 8869.844,
"D9" : 9397.273,
"D#9" : 9956.063,
"E9" : 10548.08,
"F9" : 11175.30,
"F#9" : 11839.82,
"G9" : 12543.85,
}
| 1.609375 | 2 |
code/utils/memory_file_utils.py | ahillbs/minimum_scan_cover | 0 | 12795262 | """These utils can be used to get an StringIO objects that
can be written like a file but does not close at the end of a "with" statement.
Objects can be retrieved with the virtual path.
This was implemented to use in configargparser in mind.
To use it, call for example:
holder = StringIOHolder()
parser = configargparse.ArgumentParser(description="Parser for the instance evolver", config_file_open_func=holder)
...
parsed = parser.parse_args()
parser.write_config_file(parsed, ["virt_path"])
holder["virt_path"].getvalue() # Holds the config data
"""
import io
class NotWithCloseStringIO(io.StringIO):
"""This class is just the normal StringIO with the exception of not closing the memory file on exit of a "with" statement
"""
def __exit__(self, type, value, traceback):
pass
class StringIOHolder():
"""Holds NotWithCloseStringIO objects and can be called to replace an "open" call and write to memory file.
File content is then
"""
def __init__(self):
self._string_ios = {}
def __call__(self, virt_path, bla):
self._string_ios[virt_path] = NotWithCloseStringIO()
return self.string_ios[virt_path]
def __get__(self, key):
return self._string_ios[key]
def close(self):
for key in self._string_ios:
self._string_ios[key].close() | 3.28125 | 3 |
tests/lib/test_time_util.py | daisuke19891023/covid19-yamanashi-scraping | 4 | 12795263 | import pytest
from src.lib.time_util import TimeUtil
import datetime
@pytest.fixture(scope="module", autouse=True)
def tmu_object():
tmu = TimeUtil()
yield tmu
class TestTimeUtil:
@pytest.mark.parametrize("test_input, expected_wareki, expected_other", [(
'令和2年10月31日',
'令和',
'2年10月31日'
),
('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)])
def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other):
wareki, other = tmu_object.get_wareki(test_input)
assert wareki == expected_wareki
assert other == expected_other
def test_get_ymd_int_each(self, tmu_object):
result = tmu_object.get_ymd_int_each('2年3月9日')
assert result == [2, 3, 9]
def test_get_ymd_int_each_2020(self, tmu_object):
result = tmu_object.get_ymd_int_each('3月1日', need_year=False)
assert result == [3, 1]
def test_parse_date_span(self, tmu_object):
target_char = "test1~ \ntest2"
result = tmu_object.parse_date_span(target_char)
assert result == ["test1", "test2"]
def test_get_ad_dt_fmt(self, tmu_object):
iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29)
assert iso_format == "2020-04-29T00:00:00+09:00"
def test_get_ad_date_iso_fmt(self, tmu_object):
iso_format = tmu_object.get_ad_date_iso_fmt(4, 3)
assert iso_format == "2020-04-03T00:00:00+09:00"
def test_get_ad_default_year_dt_fmt(self, tmu_object):
datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3)
assert datetime_format == datetime.datetime(
2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST'))
def test_convert_wareki_to_ad(self, tmu_object):
result = tmu_object.convert_wareki_to_ad('令和2年10月23日')
assert result == "2020-10-23T00:00:00+09:00"
def test_convert_wareki_to_ad_error(self, tmu_object):
with pytest.raises(ValueError):
tmu_object.convert_wareki_to_ad('大正2年10月23日')
@pytest.mark.parametrize(
"pattern, end, start, need_day, expected", [
("No_start_No_needDay", datetime.datetime(2020, 3, 2), None, False, [{"日付": "2020-03-01T00:00:00+09:00",
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0}]),
("start_No_needDay", datetime.datetime(
2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone(
datetime.timedelta(hours=9), name='JST')), False, [{"日付": "2020-03-01T00:00:00+09:00",
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0}]),
("start_needDay", datetime.datetime(
2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone(
datetime.timedelta(hours=9), name='JST')), True, [{"日付": "2020-03-01T00:00:00+09:00", "day": 1,
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0, "day": 2}]),
("NO_start_needDay", datetime.datetime(
2020, 3, 2), None, True, [{"日付": "2020-03-01T00:00:00+09:00", "day": 1,
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0, "day": 2}])
]
)
def test_create_dt_dict(self, tmu_object, pattern, end, start, need_day, expected):
print(pattern)
result = tmu_object.create_dt_dict(
end, start=start, need_day=need_day)
assert result == expected
def test_get_dt_dict_from_text(self, tmu_object):
target_char = "3月1日~ \n3月2日"
result = tmu_object.get_dt_dict_from_text(target_char)
assert result == [{"日付": "2020-03-01T00:00:00+09:00", "day": 1,
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0, "day": 2}]
if __name__ == '__main__':
pytest.main(['-v', __file__])
| 2.484375 | 2 |
scripts/Portfolio.py | jcoffi/FuturesAndOptionsTradingSimulation | 14 | 12795264 |
class Portfolio:
# define array of items that are the trades in the portfolio
# run-time interpretation will handle polymorphism for me
Trades = []
Cash = 0.0
Name = ''
def __init__(self,name=''):
self.Name = name
self.Trades = []
self.Cash = 0.0
def Append(self,X):
self.Trades.append(X)
def TradeCount(self):
return len(self.Trades)
def NPV(self,SCENARIO):
dblTotal = 0.0
for trade in self.Trades:
dblTotal += trade.NPV(SCENARIO)
return dblTotal
def Deltas(self,SCENARIO,RISK):
for trade in self.Trades:
trade.Deltas(SCENARIO,RISK)
def Gammas(self,SCENARIO,RISK):
for trade in self.Trades:
trade.Gammas(SCENARIO,RISK)
def Vegas(self,SCENARIO,RISK):
for trade in self.Trades:
trade.Vegas(SCENARIO,RISK)
def Rhos(self,SCENARIO,RISK):
for trade in self.Trades:
trade.Rhos(SCENARIO,RISK)
| 3.125 | 3 |
tests/api/testDepositLog.py | starkbank/sdk-python | 6 | 12795265 | <reponame>starkbank/sdk-python<filename>tests/api/testDepositLog.py
import starkbank
from unittest import TestCase, main
from starkbank.error import InputErrors
from tests.utils.user import exampleProject
starkbank.user = exampleProject
class TestDepositLogGet(TestCase):
def test_success(self):
logs = list(starkbank.deposit.log.query(limit=10))
logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in logs}, types={log.type for log in logs}))
print("Number of logs:", len(logs))
class TestDepositLogInfoGet(TestCase):
def test_success(self):
logs = starkbank.deposit.log.query()
log_id = next(logs).id
logs = starkbank.deposit.log.get(id=log_id)
def test_fail_invalid_log(self):
log_id = "123"
with self.assertRaises(InputErrors) as context:
log = starkbank.deposit.log.get(id=log_id)
errors = context.exception.errors
for error in errors:
print(error)
self.assertEqual('invalidDepositLog', error.code)
self.assertEqual(1, len(errors))
if __name__ == '__main__':
main()
| 3.078125 | 3 |
main.py | shizacat/pdf-add-watermark | 0 | 12795266 | #!/usr/bin/env python3
"""
sudo apt-get install libqpdf-dev
"""
import zlib
import argparse
import pikepdf
from pikepdf import Pdf, PdfImage, Name
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib import units
class PdfWatermark:
def __init__(self, pdf_in: str, pdf_out: str, text: str):
self.pdf_in = pdf_in
self.pdf_out = pdf_out
self.pdf_watermark = "wm.pdf"
self.wm_font_size = 20
self.wm_text = text
self.wm_alpha = 0.2
def apply(self):
self._create_watermark_pdf()
with pikepdf.open(self.pdf_in) as pdf_main:
with pikepdf.open(self.pdf_watermark) as pdf_wm:
for page in pdf_main.pages:
page.add_underlay(pdf_wm.pages[0])
pdf_main.save(self.pdf_out)
def _create_watermark_pdf(self):
c = canvas.Canvas(self.pdf_watermark)
pdfmetrics.registerFont(
TTFont('times new roman', 'Times New Roman.ttf'))
c.setFont('times new roman', self.wm_font_size)
pw, ph = c._pagesize
c.setFillGray(0.5, self.wm_alpha)
c.saveState()
c.translate(500, 100)
c.rotate(45)
c.drawCentredString(pw / 2 - 50, ph - 400, self.wm_text)
c.restoreState()
c.save()
def main_cli():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
type=str,
required=True,
help="The PDF file in which will be inserted watermark"
)
parser.add_argument(
"--out",
type=str,
required=True,
help="The PDF file in which will be saved result"
)
parser.add_argument(
"--text",
type=str,
required=True,
help="The text of watermark"
)
args = parser.parse_args()
srv = PdfWatermark(args.input, args.out, args.text)
srv.apply()
if __name__ == "__main__":
main_cli()
| 2.484375 | 2 |
code/test1-eng.py | jdellert/ccnc | 0 | 12795267 | <reponame>jdellert/ccnc<gh_stars>0
# TEST 1: Validation of shared bigram significance test on English data
from ccnc.algorithm import ccnc_statistic
from ccnc.data import LexicalDataset, ShuffledVariant
from ccnc.filters import AnySubsequenceFilter
from clics2.model import Clics2Model
import statistics
# define segments for English IPA representation (determines tokenization,
# and thereby what counts as a shared two-segment subsequence)
english_segments = ["a","ɑ","ɒ","ɑː","æ","ʌ","aʊ","b","d","dʒ","ð","ɛ","ə","eɪ","ɛə","f",
"g","h","ɪ","i","iː","aɪ","ɪə","j","k","l","m","n","ŋ","ɔː","əʊ","ɔɪ",
"p","r","s","ʃ","t","tʃ","θ","u","ʊ","uː","ɜː","ʊə","w","v","z"]
if __name__ == '__main__':
eng_lexicon = LexicalDataset("../eng-data/english.tsv",english_segments)
concepts = sorted(eng_lexicon.concept_to_forms.keys())
print(str(len(concepts)) + " concepts: " + str(concepts))
# load the CLICS2 network from the associated (data-specific) model files
network = Clics2Model("../clics-data/clics2-network-ids.txt", "../clics-data/clics2-network-edges.txt")
any_bigram_filter = AnySubsequenceFilter(2)
print("English:")
true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True)
num_samples = 1000
num_scores_above_true_score = 0
scores = list()
for i in range(0,num_samples):
pseudo_eng = ShuffledVariant(eng_lexicon)
resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False)
scores.append(resampled_score)
if resampled_score > true_score:
num_scores_above_true_score += 1
print("p-value: " + str(num_scores_above_true_score/num_samples))
mu = sum(scores)/len(scores)
sigma = statistics.stdev(scores)
zscore = (true_score - mu)/sigma
print("z-score: " + str(zscore))
print()
| 2.34375 | 2 |
hw_asr/model/quartznet_model.py | art591/dla_asr | 0 | 12795268 | <reponame>art591/dla_asr<gh_stars>0
from torch import nn
from torch.nn import Sequential
from hw_asr.base import BaseModel
class TCSConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True):
super().__init__()
tcsconv = []
if separable:
tcsconv += [
nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation),
nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation)
]
else:
tcsconv += [
nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation)
]
tcsconv.append(nn.BatchNorm1d(out_channels))
if activation is not None:
tcsconv.append(activation())
self.tcsconv = nn.Sequential(*tcsconv)
def forward(self, x):
return self.tcsconv(x)
class ResidualTCSConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU):
super().__init__()
self.num_blocks = num_blocks
self.layers = []
for i in range(num_blocks):
if i + 1 == num_blocks:
self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None))
continue
self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation))
self.layers = nn.Sequential(*self.layers)
self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'),
nn.BatchNorm1d(out_channels))
self.last_activation = activation()
def forward(self, x):
y = self.layers(x)
return self.last_activation(self.res_block(x) + y)
class Quartznet(BaseModel):
def __init__(self, n_feats, n_class, *args, **kwargs):
super().__init__(n_feats, n_class, *args, **kwargs)
self.model = nn.Sequential(*
[
nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16),
ResidualTCSConvBlock(256, 256, 33, 5),
ResidualTCSConvBlock(256, 256, 39, 5),
ResidualTCSConvBlock(256, 512, 51, 5),
ResidualTCSConvBlock(512, 512, 63, 5),
ResidualTCSConvBlock(512, 512, 75, 5),
TCSConv(512, 512, 87, dilation=2),
TCSConv(512, 1024, 1, separable=False),
nn.Conv1d(1024, n_class, kernel_size=1, padding='same')
]
)
def forward(self, spectrogram, *args, **kwargs):
res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2)
return res
def transform_input_lengths(self, input_lengths):
return input_lengths // 2
class QuartznetSmall(BaseModel):
def __init__(self, n_feats, n_class, *args, **kwargs):
super().__init__(n_feats, n_class, *args, **kwargs)
self.model = nn.Sequential(*
[
nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16),
ResidualTCSConvBlock(256, 256, 33, 5),
TCSConv(256, 256, 1, separable=False),
nn.Conv1d(256, n_class, kernel_size=1, padding='same')
]
)
def forward(self, spectrogram, *args, **kwargs):
res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2)
return res
def transform_input_lengths(self, input_lengths):
return input_lengths // 2
| 1.96875 | 2 |
app/migrations/0007_auto_20190909_1207.py | LuoBingjun/Pic_demo | 1 | 12795269 | <reponame>LuoBingjun/Pic_demo
# Generated by Django 2.2 on 2019-09-09 04:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20190908_1239'),
]
operations = [
migrations.RemoveField(
model_name='record',
name='result',
),
migrations.AddField(
model_name='record',
name='classify',
field=models.CharField(blank=True, max_length=128),
),
]
| 1.578125 | 2 |
Introduction to Artificial Intelligence/PA3/src/rnn.py | youyl/Programming-Assignments-THU | 0 | 12795270 | <reponame>youyl/Programming-Assignments-THU<filename>Introduction to Artificial Intelligence/PA3/src/rnn.py<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
class Rnn(nn.Module):
def __init__(self):
super(Rnn,self).__init__()
self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True)
self.final=nn.Linear(600,8)
def forward(self,x):
tmp1,tmp2=self.lstm(x)#[500,45,300]
x=torch.cat((tmp1[0],tmp1[-1]),1)#[45,600]
output=self.final(x)#[45,8]
return output | 3.78125 | 4 |
scholarship_graph/profiles.py | Tutt-Library/cc-scholarship-graph | 1 | 12795271 | """Profiles for Scholarship App"""
__author__ = "<NAME>"
import base64
import bibcat
import datetime
import hashlib
import io
import os
import pprint
import smtplib
import subprocess
import threading
import uuid
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import mimetypes
import click
import rdflib
import requests
from bs4 import BeautifulSoup
from flask import current_app
from github import Github, GithubException
import utilities
from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI
from .sparql import add_qualified_generation, add_qualified_revision
BF = rdflib.Namespace("http://id.loc.gov/ontologies/bibframe/")
CITE = rdflib.Namespace("https://www.coloradocollege.edu/library/ns/citation/")
PROV = rdflib.Namespace("http://www.w3.org/ns/prov#")
SCHEMA = rdflib.Namespace("http://schema.org/")
class GitProfile(object):
def __init__(self, config):
self.graph_hashes = {}
cc_github = Github(config.get("GITHUB_USER"),
config.get("GITHUB_PWD"))
self.triplestore_url = config.get("TRIPLESTORE_URL")
self.tutt_github = cc_github.get_organization("Tutt-Library")
# Start retrieving and parsing latest RDF for current academic year
# and CC people
now = datetime.datetime.utcnow()
if now.month < 7:
start_year = now.year - 1
end_year = now.year
else:
start_year = now.year
end_year = now.year + 1
self.current_year_path = "/KnowledgeGraph/cc-{0}-{1}.ttl".format(
start_year, end_year)
self.current_year = rdflib.Graph()
self.cc_people = rdflib.Graph()
self.tiger_repo = self.tutt_github.get_repo("tiger-catalog")
for content in self.tiger_repo.get_dir_contents("/KnowledgeGraph/"):
raw_turtle = self.__get_content__("tiger_repo",
content)
if content.name.startswith(self.current_year_path.split("/")[-1]):
self.current_year_git = content
self.current_year.parse(data=raw_turtle,
format='turtle')
if content.name.startswith("cc-people"):
self.cc_people_git = content
self.cc_people.parse(data=raw_turtle,
format='turtle')
self.graph_hashes["cc_people"] = hashlib.sha1(
self.cc_people.serialize(format='n3')).hexdigest()
self.graph_hashes["current_year"] = hashlib.sha1(
self.current_year.serialize(format='n3')).hexdigest()
# Start retrieving and parsing latest RDF for creative works,
# research statements, and FAST subjects
self.creative_works = rdflib.Graph()
self.research_statements = rdflib.Graph()
self.fast_subjects = rdflib.Graph()
self.scholarship_repo = self.tutt_github.get_repo("cc-scholarship-graph")
for content in self.scholarship_repo.get_dir_contents("/data/"):
raw_turtle = self.__get_content__("scholarship_repo",
content)
if content.name.startswith("cc-research-statements"):
self.research_statements_git = content
self.research_statements.parse(
data=raw_turtle,
format='turtle')
if content.name.startswith("cc-fast-subjects"):
self.fast_subjects_git = content
self.fast_subjects.parse(
data=raw_turtle,
format='turtle')
if content.name.startswith("creative-works"):
self.creative_works_git = content
self.creative_works.parse(
data=raw_turtle,
format='turtle')
self.graph_hashes["creative_works"] = hashlib.sha1(
self.creative_works.serialize(format='n3')).hexdigest()
self.graph_hashes["research_statements"] = hashlib.sha1(
self.research_statements.serialize(format='n3')).hexdigest()
self.graph_hashes["fast_subjects"] = hashlib.sha1(
self.fast_subjects.serialize(format='n3')).hexdigest()
def __get_content__(self, repo_name, content):
raw_turtle = None
try:
raw_turtle = content.decoded_content
except GithubException:
repo = getattr(self, repo_name)
blob = repo.get_git_blob(content.sha)
raw_turtle = base64.b64decode(blob.content)
return raw_turtle
def __save_graph__(self, **kwargs):
git_repo = kwargs.get("git_repo")
file_path = kwargs.get("file_path")
graph_name = kwargs.get("graph_name")
branch = kwargs.get("branch")
message = kwargs.get("message", "Updating {}".format(graph_name))
graph = getattr(self, graph_name)
graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest()
if graph_sha1 == self.graph_hashes[graph_name]:
return
git_graph = getattr(self, "{}_git".format(graph_name))
if branch:
git_repo.update_file(file_path,
message,
graph.serialize(format='turtle'),
git_graph.sha,
branch=branch)
else:
git_repo.update_file(file_path,
message,
graph.serialize(format='turtle'),
git_graph.sha)
def update_all(self, person_label, action="Add", connection=None):
self.__save_graph__(
git_repo=self.tiger_repo,
file_path="/KnowledgeGraph/cc-people.ttl",
graph_name="cc_people",
message="{} {} to CC People".format(action, person_label))
self.__save_graph__(
git_repo=self.tiger_repo,
file_path=self.current_year_path,
graph_name="current_year",
message="{} person to Department for school year".format(action))
self.__save_graph__(
git_repo=self.scholarship_repo,
file_path="/data/cc-research-statements.ttl",
graph_name="research_statements",
message="{} Research Statement for {}".format(
action, person_label))
self.__save_graph__(
git_repo=self.scholarship_repo,
file_path ="/data/cc-fast-subjects.ttl",
graph_name="fast_subjects",
message="Fast subject added")
self.__save_graph__(
git_repo=self.scholarship_repo,
file_path ="/data/creative-works.ttl",
graph_name="creative_works",
message="Creative Works added")
if connection:
self.__reload_triplestore__(connection)
def __reload_triplestore__(self, config_mgr):
data_upload = []
for row in config_mgr.get("CONNECTIONS"):
if row.get("name").startswith("datastore"):
for directory_row in row.get("data_upload"):
data_upload.append(directory_row[1])
# Pull in the latest changes in each repository
for directory in data_upload:
os.chdir(directory)
result = subprocess.run(['git', 'pull', 'origin', 'master'])
click.echo(result.returncode, result.stdout)
config_mgr.conns.datastore.mgr.reset()
class ProfileUpdateThread(threading.Thread):
def __init__(self, **kwargs):
threading.Thread.__init__(self)
config = kwargs.get("config")
cc_github = Github(config.get("GITHUB_USER"),
config.get("GITHUB_PWD"))
self.tutt_github = cc_github.get_organization("Tutt-Library")
self.statement_msg = kwargs.get("msg")
self.person_iri = kwargs.get("person")
self.research_statements = rdflib.Graph()
self.fast_subjects = rdflib.Graph()
self.profile = kwargs.get("profile")
self.scholarship_repo = self.tutt_github.get_repo("cc-scholarship-graph")
for content in self.scholarship_repo.get_dir_contents("/data/"):
try:
raw_turtle = content.decoded_content
except GithubException:
blob = self.scholarship_repo.get_git_blob(content.sha)
raw_turtle = base64.b64decode(blob.content)
if content.name.startswith("cc-research-statements"):
self.research_statements_git = content
self.research_statements.parse(
data=raw_turtle,
format='turtle')
if content.name.startswith("cc-fast-subjects"):
self.fast_subjects_git = content
self.fast_subjects.parse(
data=raw_turtle,
format='turtle')
def __save_graph__(self, **kwargs):
file_path = kwargs.get("file_path")
branch = kwargs.get("branch")
graph_name = kwargs.get("graph_name")
graph = getattr(self, graph_name)
message = kwargs.get("message", "Updating {}".format(graph_name))
git_graph = getattr(self, "{}_git".format(graph_name))
if branch:
self.scholarship_repo.update_file(file_path,
message,
graph.serialize(format='turtle'),
git_graph.sha,
branch=branch)
else:
self.scholarship_repo.update_file(file_path,
message,
graph.serialize(format='turtle'),
git_graph.sha)
def __update_fast_subjects__(self):
existing_subjects, new_subjects = set(), set()
existing_stmt = self.research_statements.value(
predicate=SCHEMA.accountablePerson,
object=self.person_iri)
for row in self.research_statements.objects(
subject=existing_stmt,
predicate=SCHEMA.about):
existing_subjects.add(row)
for fast_heading in self.profile.graph.objects(
subject=existing_stmt,
predicate=SCHEMA.about):
new_subjects.add(fast_heading)
for subject in list(existing_subjects.difference(new_subjects)):
self.research_statements.remove((existing_stmt,
SCHEMA.about,
subject))
for subject in list(new_subjects.difference(existing_subjects)):
# Add new subject to research statements and fast subjects
self.research_statements.add((existing_stmt,
SCHEMA.about,
subject))
self.fast_subjects.add((subject,
rdflib.RDF.type,
BF.Topic))
subject_label = self.profile.graph.value(subject=subject,
predicate=rdflib.RDFS.label)
if subject_label is not None:
self.fast_subjects.add((subject,
rdflib.RDFS.label,
subject_label))
def __update_research_statements__(self):
existing_stmt = self.research_statements.value(
predicate=SCHEMA.accountablePerson,
object=self.person_iri)
current_description = self.research_statements.value(
subject=existing_stmt,
predicate=SCHEMA.description)
new_description = self.profile.graph.value(
subject=existing_stmt,
predicate=SCHEMA.description)
if new_description is not None \
and str(current_description) != str(new_description):
self.research_statements.remove((existing_stmt,
SCHEMA.description,
current_description))
self.research_statements.replace('"','\"')
self.research_statements.add((existing_stmt,
SCHEMA.description,
new_description))
def run(self):
# Function iterates and commits any changes to
self.__update_fast_subjects__()
self.__update_research_statements__()
self.__save_graph__(
file_path="/data/cc-research-statements.ttl",
graph_name="research_statements",
message=self.statement_msg)
self.__save_graph__(
file_path ="/data/cc-fast-subjects.ttl",
graph_name="fast_subjects",
message="Fast subject added")
class EmailProfile(object):
"""Simple Email Profile class that creates a local RDF graph for new
profile or editing profile that is send via email to the Administrators
for review."""
def __init__(self, config, person_iri):
self.config = config
self.triplestore_url = self.config.get("TRIPLESTORE_URL")
self.graph = rdflib.Graph()
self.graph.namespace_manager.bind("bf", BF)
self.graph.namespace_manager.bind("cite", CITE)
self.graph.namespace_manager.bind("schema", SCHEMA)
self.graph.namespace_manager.bind("prov", PROV)
self.email = config.get("EMAIL")
self.recipients = config.get("ADMINS")
self.person_iri = person_iri
def __send_email__(self, subject, body):
"""Sends email to administrators with attached profile graph"""
message = MIMEMultipart()
message["From"] = self.email.get("user")
message["To"] = ",".join(["<{0}>".format(r) for r in self.recipients])
message["Subject"] = subject
email_server = smtplib.SMTP(
self.email.get("host"),
self.email.get("port"))
email_server.ehlo()
if self.email.get("tls"):
email_server.starttls()
body = MIMEText(body, _charset="UTF-8")
message.attach(body)
graph_turtle = io.StringIO(
self.graph.serialize(format='turtle').decode())
attachment = MIMEText(graph_turtle.read())
attachment.add_header('Content-Disposition',
'attachment',
filename='profile.ttl')
message.attach(attachment)
email_server.login(
self.email.get("user"),
self.email.get("password"))
recipients = list(set(self.recipients)) # Quick dedup
email_server.sendmail(self.email.get("user"),
recipients,
message.as_string())
email_server.close()
def __add_article__(self, work_iri, work_form):
"""Article specific data added to creative work
Args:
work_iri(rdflib.URIRef): Creative Work IRI for Article
work_form(Flask.request.form): Dict of form values
"""
self.graph.add((work_iri,
rdflib.RDF.type,
SCHEMA.ScholarlyArticle))
self.graph.add((work_iri,
SCHEMA.name,
rdflib.Literal(work_form.article_title.data)))
if work_form.page_start.data !=None:
self.graph.add((work_iri,
SCHEMA.pageStart,
rdflib.Literal(work_form.page_start.data)))
if work_form.page_end.data !=None:
self.graph.add((work_iri,
SCHEMA.pageEnd,
rdflib.Literal(work_form.page_end.data)))
journal = rdflib.BNode()
self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical))
self.graph.add((journal,
SCHEMA.name,
rdflib.Literal(work_form.journal_title.data)))
issue, volume = None, None
if work_form.volume_number.data != None:
volume = rdflib.BNode()
self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume))
self.graph.add((volume,
SCHEMA.volumeNumber,
rdflib.Literal(work_form.volume_number.data)))
self.graph.add((volume, SCHEMA.partOf, journal))
if work_form.issue_number.data != None:
issue = rdflib.BNode()
self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue))
self.graph.add((issue,
SCHEMA.issueNumber,
rdflib.Literal(work_form.issue_number.data)))
if volume is not None:
self.graph.add((issue,
SCHEMA.partOf,
volume))
else:
self.graph.add((issue,
SCHEMA.partOf,
journal))
self.graph.add((work_iri, SCHEMA.partOf, issue))
elif volume is not None:
self.graph.add((work_iri, SCHEMA.partOf, volume))
else:
# Add work_iri to Journal as last resort
self.graph.add((work_iri, SCHEMA.partOf, journal))
if work_form.month.data != None:
self.graph.add((work_iri,
CITE.month,
rdflib.Literal(work_form.month.data)))
def __add_book__(self, work, work_form):
self.graph.add((work, rdflib.RDF.type, SCHEMA.Book))
self.graph.add((work,
SCHEMA.title,
rdflib.Literal(work_form.book_title.data)))
if work_form.isbn.data is not None:
self.graph.add((work,
SCHEMA.isbn,
rdflib.Literal(work_form.isbn.data)))
if work_form.editionStatement.data is not None:
self.graph.add(
(work,
SCHEMA.editionStatement,
rdflib.Literal(work_form.editionStatement.data)))
if work_form.editor.data is not None:
self.graph.add((work,
SCHEMA.editor,
rdflib.Literal(work_form.editor.data)))
if work_form.provisionActivityStatement.data is not None:
self.graph.add(
(work,
SCHEMA.provisionActivityStatement,
rdflib.Literal(work_form.provisionActivityStatement.data)))
if work_form.notes.data is not None:
self.graph.add(
(work,
SCHEMA.description,
rdflib.Literal(work_form.notes.data)))
def __populate_work__(self, work_form, generated_by=None):
"""Populates graph with new work
Args:
form(Flask.request.form): Dict of form values
"""
if len(work_form.iri.data) > 0:
work_iri = rdflib.URIRef(work_form.iri.data)
else: # Mint IRI for new work
if "doi" in work_form and len(work_form.doi.data) > 0:
work_iri = rdflib.URIRef(work_form.doi.data)
else:
work_iri = rdflib.URIRef(
"http://catalog.coloradocollege.edu/{}".format(
uuid.uuid1()))
self.graph.add((work_iri,
SCHEMA.dataPublished,
rdflib.Literal(work_form.datePublished.data)))
self.graph.add((work_iri,
CITE.authorString,
rdflib.Literal(work_form.author_string.data)))
if generated_by:
add_qualified_generation(self.graph,
work_iri,
generated_by)
citation_type = work_form.citation_type.data
self.graph.add((work_iri,
CITE.citationType,
rdflib.Literal(citation_type)))
if "author" in work_form and len(work_form.author.data) > 0:
self.person_iri = rdflib.URIRef(work_form.author.data)
self.graph.add((work_iri,
SCHEMA.author,
self.person_iri))
elif generated_by:
self.person_iri = generated_by
self.graph.add((work_iri,
SCHEMA.author,
generated_by))
if "url" in work_form and len(work_form.url.data) > 0:
self.graph.add((work_iri,
SCHEMA.url,
rdflib.URIRef(work_form.url.data)))
if work_form.abstract.data != None:
self.graph.add((work_iri,
SCHEMA.about,
rdflib.Literal(work_form.abstract.data)))
if citation_type.startswith("article"):
self.__add_article__(work_iri, work_form)
elif citation_type.startswith("book chapter"):
self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter))
book_bnode = rdflib.BNode()
self.graph.add((work_iri, SCHEMA.partOf, book_bnode))
self.__add_book__(book_bnode, work_form)
elif citation_type.startswith("book"):
self.__add_book__(work_iri, work_form)
else:
abort(500)
if work_form.abstract.data != None:
self.graph.add((work_iri,
SCHEMA.about,
rdflib.Literal(work_form.abstract.data)))
return work_iri
def add(self, work_form, generated_by=None):
work_iri = self.__populate_work__(work_form, generated_by)
email_body = "Properties and Values for Creative Work {}".format(work_iri)
for row in work_form._fields:
if row.startswith("csrf_token"):
continue
field = getattr(work_form, row)
email_body += "\n{}:\t{}".format(row, field.data)
self.__send_email__("Added New Work", email_body)
return work_iri
def new(self, message):
"""Adds a new profile"""
self.__send_email__("Add new profile", message)
def update(self, message):
"""Edits existing profile"""
global BACKGROUND_THREAD
BACKGROUND_THREAD = ProfileUpdateThread(
config=self.config,
msg=message,
person=self.person_iri,
profile=self)
BACKGROUND_THREAD.start()
self.__send_email__("Updating Profile", message)
def __email_work__(**kwargs):
"""Function takes a work graph and configuration and emails the graph in
turtle format to the administrators for review before adding to production.
Keyword args:
work_graph(rdflib.Graph): RDF Graph of Citation
config: Configuration includes logins and administor
"""
work_graph = kwargs.get("graph")
config = kwargs.get("config")
sender = config.get('EMAIL')['user']
recipients = config.get("ADMINS")
subject = kwargs.get('subject')
text = kwargs.get('text')
carbon_copy = kwargs.get("carbon_copy", [])
message = MIMEMultipart()
message["From"] = sender
message["Subject"] = subject
message["To"] = ",".join(["<{0}>".format(r) for r in recipients])
if len(carbon_copy) > 0:
message["Cc"] = ','.join(carbon_copy)
recipients.extend(carbon_copy)
body = MIMEText(text, _charset="UTF-8")
message.attach(body)
if work_graph:
work_turtle = io.StringIO(
work_graph.serialize(format='turtle').decode())
attachment = MIMEText(work_turtle.read())
attachment.add_header('Content-Disposition',
'attachment',
filename='work.ttl')
message.attach(attachment)
#try:
server = smtplib.SMTP(config.get('EMAIL')['host'],
config.get('EMAIL')['port'])
server.ehlo()
if config.get('EMAIL')['tls']:
server.starttls()
server.ehlo()
server.login(sender,
config.get("EMAIL")["password"])
recipients = list(set(recipients)) # Quick dedup
server.sendmail(sender, recipients, message.as_string())
server.close()
def generate_citation_html(citation):
soup = BeautifulSoup("", 'lxml')
div = soup.new_tag("div", **{"class": "row"})
col_1 = soup.new_tag("div", **{"class": "col-1"})
citation_type = citation.get("ENTRYTYPE")
if citation_type.startswith("article"):
col_1.append(soup.new_tag("i", **{"class": "fas fa-file-alt"}))
elif citation_type.endswith("book"):
col_1.append(soup.new_tag("i", **{"class": "fas fa-book"}))
under_review = soup.new_tag("em")
under_review.string = "In Review"
col_1.append(under_review)
div.append(col_1)
col_2 = soup.new_tag("div", **{"class": "col-7"})
if "article_title" in citation:
name = citation.get("article_title")
elif "title" in citation:
name = citation.get("title")
if "url" in citation:
work_link = soup.new_tag("a", href=citation.get("url"))
work_link.string = name
col_2.append(work_link)
else:
span = soup.new_tag("span")
span.string = name
col_2.append(span)
if "journal_title" in citation:
em = soup.new_tag("em")
em.string = citation.get("journal_title")
col_2.append(em)
if "year" in citation:
span = soup.new_tag("span")
span.string = "({0})".format(citation.get("year"))
col_2.append(span)
vol_number = citation.get("volume_number")
if vol_number and len(vol_number) > 0:
span = soup.new_tag("span")
span.string = "v. {}".format(vol_number)
col_2.append(span)
issue_number = citation.get("issue_number")
if issue_number and len(issue_number ) > 0:
span = soup.new_tag("span")
span.string = " no. {}".format(issue_number)
col_2.append(span)
page_start = citation.get("page_start")
if page_start and len(page_start) > 0:
span = soup.new_tag("span")
span.string = "p. {}".format(page_start)
col_2.append(span)
page_end = citation.get("page_end")
if page_end and len(page_end) > 0:
span = soup.new_tag("span")
if "page_start" in citation:
page_string = "- {}."
else:
page_string = "{}."
span.string = page_string.format(page_end)
col_2.append(span)
div.append(col_2)
col_3 = soup.new_tag("div", **{"class": "col-4"})
iri = citation.get("iri")
if iri:
edit_click = "editCitation('{}');".format(iri)
delete_click = "deleteCitation('{}');".format(iri)
edit_a = soup.new_tag("a", **{"class": "btn btn-warning disabled",
"onclick": edit_click,
"type=": "input"})
edit_a.append(soup.new_tag("i", **{"class": "fas fa-edit"}))
col_3.append(edit_a)
delete_a = soup.new_tag("a", **{"class": "btn btn-danger",
"onclick": delete_click,
"type=": "input"})
delete_a.append(soup.new_tag("i", **{"class": "fas fa-trash-alt"}))
col_3.append(delete_a)
div.append(col_3)
return div.prettify()
def __reconcile_article__(work_graph, connection):
SCHEMA = rdflib.Namespace("http://schema.org/")
for row in work_graph.query(
"""SELECT ?entity ?label WHERE { ?entity rdf:type schema:Periodical ;
schema:name ?label . } """):
entity, label = row
break
volume, issue = None, None
volume_or_issue = work_graph.value(predicate=SCHEMA.partOf,
object=entity)
schema_class = work_graph.value(subject=volume_or_issue,
predicate=rdflib.RDF.type)
if schema_class is SCHEMA.volumeNumber:
volume = volume_or_issue
issue = work_graph.value(predicate=SCHEMA.partOf,
object=volume)
elif schema_class is SCHEMA.issueNumber:
issue = volume_or_issue
result = connection.datastore.query("""SELECT ?periodical
WHERE {{
?periodical schema:name ?name .
FILTER(CONTAINS(?name, "{0}"))
}}""".format(label))
if result and len(result) > 0:
periodical = result[0].get("periodical").get("value")
if periodical != str(entity):
new_work = rdflib.URIRef(periodical)
bibcat.replace_iri(work_graph, entity, new_work)
entity = new_work
if volume is not None:
vol_num = work_graph.value(subject=volume,
predicate=SCHEMA.volumeNumber)
result = connection.datastore.query("""SELECT ?volume
WHERE {{
?volume schema:partOf ?work ;
schema:volumeNumber ?volumeNumber .
BIND(<{0}> as ?work)
BIND("{1}" as ?volumeNumber)
}}""".format(entity, vol_num))
if result and len(result) > 0:
new_volume = rdflib.URIRef(result[0].get("volume").get("value"))
bibcat.replace_iri(work_graph, volume, new_volume)
if issue is not None:
issue_number = work_graph.value(subject=issue,
predicate=SCHEMA.issueNumber)
result = connection.datastore.query("""SELECT ?issue
WHERE {{
?issue rdf:type schema:issueNumber ;
schema:issueNumber ?issue_number .
OPTIONAL {{ ?issue schema:partOf ?volume . }}
OPTIONAL {{ ?issue schema:partOf ?periodical . }}
BIND(<{0}> as ?volume)
BIND(<{1}> as ?periodical)
BIND("{2}" as ?issue_number)
}}""".format(volume, periodical, issue_number) )
if result and len(result) > 0:
new_issue = rdflib.URIRef(result[0].get("issue").get("value"))
bibcat.replace_iri(work_graph, issue, new_issue)
def add_creative_work(**kwargs):
"""Calls utilities to populate and save to datastore"""
config = kwargs.get("config")
profile = EmailProfile(config)
current_user = kwargs.get("current_user")
config_manager = kwargs.get('config_manager')
connection = config_manager.conns
generated_by = kwargs.get("generated_by")
work_form = kwargs.get("work_form")
BF = config_manager.nsm.bf
SCHEMA = config_manager.nsm.schema
sparql = EMAIL_LOOKUP.format(
current_user.data.get('mail').lower())
email_results = connection.datastore.query(sparql)
if len(email_results) > 0:
generated_by = rdflib.URIRef(
email_results[0].get("person").get('value'))
work_iri = rdflib.URIRef(profile.add(work_form, generated_by))
#profile.update("Added or Updated Creative Work")
return {"message": "New work has been submitted for review",
"status": True,
"iri": work_iri}
def add_profile(**kwargs):
"""Adds a profile stub to scholarship graph"""
config = kwargs.get("config")
current_user = kwargs.get("current_user")
config_manager = kwargs.get('config_manager')
profile = EmailProfile(config)
connection = config_manager.conns
BF = config_manager.nsm.bf
SCHEMA = config_manager.nsm.schema
results = connection.datastore.query(
EMAIL_LOOKUP.format(
current_user.data.get('mail').lower()))
if len(results) > 0:
generated_by = rdflib.URIRef(results[0].get("person").get('value'))
else:
generated_by = None
form = kwargs.get("form")
if form.get("orcid"):
person_uri = form.get("orcid")
else:
person_uri = "http://catalog.coloradocollege.edu/{}".format(
uuid.uuid1())
person_iri = rdflib.URIRef(person_uri)
if generated_by is None:
generated_by = person_iri
profile.graph.add(
(person_iri,
rdflib.RDF.type,
BF.Person.rdflib))
given_name = form.get("given_name")
if given_name is not None:
profile.graph.add(
(person_iri,
SCHEMA.givenName.rdflib,
rdflib.Literal(given_name, lang="en")))
family_name = form.get("family_name")
if family_name is not None:
profile.graph.add((person_iri,
SCHEMA.familyName.rdflib,
rdflib.Literal(family_name, lang="en")))
label = "{} {}".format(given_name, family_name)
profile.graph.add((person_iri,
rdflib.RDFS.label,
rdflib.Literal(label, lang="en")))
email = form.get("email")
profile.graph.add((person_iri,
SCHEMA.email.rdflib,
rdflib.Literal(email)))
add_qualified_generation(profile.graph,
person_iri,
generated_by)
dept_year = kwargs.get("year-iri")
if dept_year is not None:
dept_year_iri = rdflib.URIRef(dept_year_iri)
title = kwargs.get("title-iri")
profile.graph.add(
(dept_year_iri,
rdflib.URIRef(title),
person_iri))
statement = kwargs.get("statement", form.get("research_stmt"))
if statement is not None:
statement_iri = rdflib.URIRef("http://catalog.coloradocollege.edu/{}".format(
uuid.uuid1()))
profile.graph.add(
(statement_iri,
rdflib.RDF.type,
SCHEMA.DigitalDocument.rdflib))
profile.graph.add(
(statement_iri,
rdflib.RDFS.label,
rdflib.Literal("Research Statement for {}".format(label),
lang="en")))
profile.graph.add(
(statement_iri,
SCHEMA.accountablePerson.rdflib,
person_iri))
profile.graph.add(
(statement_iri,
SCHEMA.description.rdflib,
rdflib.Literal(statement, lang="en")))
add_qualified_generation(profile.graph,
statement_iri,
generated_by)
form_subjects = form.getlist("subjects")
new_subjects = {}
for row in form_subjects:
fast_id, fast_label = row.split("==")
if fast_id.startswith("http"):
fast_uri = fast_id
else:
fast_uri = "http://id.worldcat.org/fast/{}".format(fast_id[3:])
new_subjects[fast_uri] = fast_label
for fast_subject, fast_label in new_subjects.items():
iri_subject = rdflib.URIRef(fast_subject)
profile.graph.add(
(statement_iri,
SCHEMA.about.rdflib,
iri_subject))
existing_label = profile.fast_subjects.value(
subject=iri_subject,
predicate=rdflib.RDFS.label)
if existing_label is None:
profile.graph.add(
(iri_subject,
rdflib.RDF.type,
BF.Topic.rdflib))
profile.graph.add(
(iri_subject,
rdflib.RDFS.label,
rdflib.Literal(fast_label, lang="en")))
message = "New {} as {} to Colorado College's Scholarship Graph".format(
label,
person_iri)
profile.new(message)
def delete_creative_work(**kwargs):
config = kwargs.get("config")
git_profile = GitProfile(config)
current_user = kwargs.get("current_user")
config_manager = kwargs.get('config_manager')
author = kwargs.get("author")
connection = config_manager.conns
iri = kwargs.get("iri")
__email_work__(
config=config,
carbon_copy=[current_user.data.get('mail'),],
subject="Delete Request",
text="Delete citation {} for {}\nrequested by {} on {}".format(
iri,
author,
current_user.data.get('mail'),
datetime.datetime.utcnow().isoformat())
)
return {"message": "Deletion of {} for {} under review".format(
iri, author),
"status": True}
def edit_creative_work(**kwargs):
config = kwargs.get("config")
git_profile = GitProfile(config)
current_user_email = kwargs.get("current_user_email")
config_manager = kwargs.get('config_manager')
connection = config_manager.conns
revised_by = kwargs.get("revised_by")
raw_citation = kwargs.get("citation")
work_type = kwargs.get("work_type", "article")
if revised_by is None and current_user_email:
sparql = EMAIL_LOOKUP.format(
current_user_email.lower())
email_results = connection.datastore.query(sparql)
if len(email_results) > 0:
revised_by = rdflib.URIRef(
email_results[0].get("person").get('value'))
temp_work = rdflib.Graph()
temp_work.namespace_manager.bind("cite",
rdflib.Namespace("https://www.coloradocollege.edu/library/ns/citation/"))
for prefix, namespace in git_profile.cc_people.namespaces():
temp_work.namespace_manager.bind(prefix, namespace)
if work_type.startswith("article"):
citation = utilities.Article_Citation(raw_citation,
temp_work,
git_profile.cc_people,
False)
citation.populate()
citation.populate_article()
citation.add_article()
elif work_type.startswith("book"):
citation = utilities.Book_Citation(raw_citation,
temp_work,
git_profile.cc_people,
False)
citation.populate()
citation.populate_book()
citation.add_book()
if revised_by:
add_qualified_revision(temp_work,
rdflib.URIRef(citation.iri),
revised_by)
email_subject = 'Edited Creative Work {}'.format(citation.iri)
__email_work__(graph=temp_work,
config=config,
carbon_copy=[current_user_email,],
subject=email_subject,
text="Edited {} revised by {} on {}, see attached RDF turtle file".format(
citation.citation_type,
revised_by,
datetime.datetime.utcnow().isoformat())
)
return {"message": "Changes to work has been submitted for review",
"status": True}
def update_profile(**kwargs):
"""Updates existing triples based on form values"""
config_manager = kwargs.get('config_manager')
connection = config_manager.conns
BF = config_manager.nsm.bf
SCHEMA = config_manager.nsm.schema
form = kwargs.get('form')
current_user = kwargs.get("current_user")
output = ''
person_iri = rdflib.URIRef(form.get("iri"))
profile = EmailProfile(config_manager, person_iri)
msg = ""
results = connection.datastore.query(
EMAIL_LOOKUP.format(
current_user.data.get('mail').lower()))
if len(results) > 0:
generated_by = rdflib.URIRef(results[0].get("person").get('value'))
else:
generated_by = person_iri
msg = "{} made the following changes to {}'s academic profile:\n".format(
generated_by,
form['label'])
statement_iri_results = connection.datastore.query(
RESEARCH_STMT_IRI.format(
person_iri))
if len(statement_iri_results) > 0:
statement_iri = rdflib.URIRef(
statement_iri_results[0].get("iri").get("value"))
add_qualified_revision(profile.graph,
statement_iri,
generated_by)
else:
statement_iri = rdflib.URIRef(
"http://catalog.coloradocollege.edu/{}".format(uuid.uuid1()))
profile.graph.add(
(statement_iri,
rdflib.RDF.type,
SCHEMA.DigitalDocument.rdflib))
profile.graph.add(
(statement_iri,
SCHEMA.accountablePerson.rdflib,
person_iri))
profile.graph.add(
(statement_iri,
rdflib.RDFS.label,
rdflib.Literal("Research Statement for {} {}".format(
form.get('given_name'),
form.get('family_name')), lang="en")))
add_qualified_generation(
profile.graph,
statement_iri,
generated_by)
citations = form.getlist("citations")
for uri in citations:
profile.graph.add(
(rdflib.URIRef(uri),
SCHEMA.author.rdflib,
person_iri))
statement = form.get("research_stmt")
if len(statement) > 0:
profile.graph.add(
(statement_iri,
SCHEMA.description.rdflib,
rdflib.Literal(statement, lang="en")))
form_subjects = form.getlist("subjects")
new_subjects = {}
for row in form_subjects:
fast_id, fast_label = row.split("==")
if fast_id.startswith("http"):
fast_uri = fast_id
else:
fast_uri = "http://id.worldcat.org/fast/{}".format(fast_id[3:])
iri_subject = rdflib.URIRef(fast_uri)
profile.graph.add(
(statement_iri,
SCHEMA.about.rdflib,
iri_subject))
profile.graph.add(
(iri_subject,
rdflib.RDF.type,
BF.Topic.rdflib))
profile.graph.add(
(iri_subject,
rdflib.RDFS.label,
rdflib.Literal(fast_label, lang="en")))
profile.update(msg)
return {"message": msg,
"status": True}
| 2.25 | 2 |
DeepLearning from scratch/example3.py | Nikeshbajaj/MachineLearningFromScratch | 15 | 12795272 | <reponame>Nikeshbajaj/MachineLearningFromScratch
'''
Example 3: Deep Neural Network from scrach
@Author _ <NAME>
PhD Student at Queen Mary University of London &
University of Genova
Conact _ http://nikeshbajaj.in
n[dot]<EMAIL>
bajaj[dot]<EMAIL>
'''
import numpy as np
import matplotlib.pyplot as plt
from DeepNet import deepNet
import DataSet as ds
plt.close('all')
Xi, yi = ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False)
[n,N] =Xi.shape
r = np.random.permutation(N)
X = Xi[:,r[:N//2]]
y = yi[:,r[:N//2]]
Xts =Xi[:,r[N//2:]]
yts =yi[:,r[N//2:]]
print(X.shape, y.shape,Xts.shape,yts.shape)
NN = deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0])
plt.ion()
for i in range(10):
NN.fit(itr=10)
NN.PlotLCurve()
NN.PlotBoundries(Layers=True)
NN.PlotLCurve()
NN.PlotBoundries(Layers=True)
print(NN)
yi,yp = NN.predict(X)
yti,ytp = NN.predict(Xts)
print('Accuracy::: Training :',100*np.sum(yi==y)/yi.shape[1], ' Testing :',100*np.sum(yti==yts)/yti.shape[1]) | 3.640625 | 4 |
session-3/tflite/converter.py | darkling-thrush/MLIntroduction | 0 | 12795273 | '''
Created on Dec 23, 2019
@author: mohammedmostafa
'''
import tensorflow as tf
modelPath = "../model/CNDetector_5.h5"
converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
lite_model = converter.convert()
open("../model/CNDetector_Lite_5.tflite", "wb").write(lite_model)
| 2.46875 | 2 |
am_exps/__init__.py | AxlAlm/SegNLP | 1 | 12795274 |
from .jpnn_exp import jpnn
from .lstm_cnn_crf_exp import lstm_cnn_crf
from .lstm_crf_exp import lstm_crf
from .lstm_dist_exp import lstm_dist
from .lstm_er_exp import lstm_er
| 1.015625 | 1 |
scripts/AIA_CH_spoca_jobs.py | bmampaey/spoca4rwc | 0 | 12795275 | <gh_stars>0
#!/usr/bin/env python3
import os
import sys
import logging
import argparse
from glob import glob
from datetime import datetime, timedelta
from job import Job, JobError
from AIA_quality import get_quality, get_quality_errors
# Path to the classification program
classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x'
# Path to the classification program config file
classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config'
# Path to the centers file
classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt'
# The frequency to run the classification program
classification_run_frequency = timedelta(hours = 4)
# Path to the get_CH_map program
get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x'
# Path to the get_CH_map program config file
get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config'
# Path to the tracking program
tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x'
# Path to the tracking program config file
tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config'
# Path to the tracking color file
tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt'
# The minimum number of files that overlaps with the previous tracking (see maxDeltaT)
tracking_overlap = 6
# The number of CH maps to run the tracking program on
tracking_run_count = 3
# Directory to output the maps
maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/'
# Directory where the prepped AIA files are located
aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits'
# Directory where the prepped HMI files are located
hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits'
# Wavelengths of AIA data to run the classification program on
AIA_wavelengths = [193]
def date_range(start, end, step):
'''Equivalent to range for date'''
date = start.replace()
while date < end:
yield date
date += step
def parse_tracking_color_file(tracking_color_file):
try:
with open(tracking_color_file, 'tr') as f:
text = f.readline()
last_color = int(text.split(':')[1])
except Exception as why:
logging.warning('Could not read tracking color from file "%s": %s', tracking_color_file, why)
return 0
else:
logging.debug('Found last color %s from file %s', last_color, tracking_color_file)
return last_color
def get_good_file(file_pattern, ignore_bits = None):
'''Return the first file that matches the file_pattern and has a good quality'''
for file_path in sorted(glob(file_pattern)):
# Get the quality of the file
if ignore_bits is None:
quality = get_quality(file_path)
else:
quality = get_quality(file_path, ignore_bits)
# A quality of 0 means no defect
if quality == 0:
return file_path
else:
logging.info('Skipping file %s with bad quality: %s', file_path, get_quality_errors(quality))
def get_AIA_files(date, wavelengths):
'''Return a list of AIA files for the specified date and wavelengths'''
file_paths = list()
for wavelength in wavelengths:
file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength))
if file_path is None:
raise FileNotFoundError('AIA file for date %s and wavelength %s was not found' % (date, wavelengths))
else:
file_paths.append(file_path)
return file_paths
def get_HMI_files(date):
'''Return a list of HMI files for the specified date'''
file_path = get_good_file(hmi_file_pattern.format(date=date))
if file_path is None:
raise FileNotFoundError('HMI file for date %s was not found' % date)
else:
return [file_path]
def create_segmented_map(AIA_images, date):
'''Run the classification program'''
# File path for the Segmented map to create
segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits')
# Create a job for the classification program with the appropriate parameters
job = Job(classification_exec, *AIA_images, config = classification_config_file, centersFile = classification_centers_file, output = segmented_map)
logging.info('Running job\n%s', job)
# Run the classification job
return_code, output, error = job()
# Check if the job ran succesfully
if return_code != 0:
raise JobError(return_code, output, error, job_name = 'classification', AIA_images = AIA_images)
elif not os.path.exists(segmented_map):
raise JobError(message = 'Could not find output file {segmented_map}', segmented_map = segmented_map)
else:
logging.debug('Job ran without errors, output: %s', output)
return segmented_map
def create_CH_map(segmented_map, date, images):
'''Run the get_CH_map program'''
# File path for the CH map to create
CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits')
# Create a job for the get_CH_map program with the appropriate parameters
job = Job(get_CH_map_exec, segmented_map, *images, config = get_CH_map_config_file, output = CH_map)
logging.info('Running job\n%s', job)
# Run the get_CH_map job
return_code, output, error = job()
# Check if the job ran succesfully
if return_code != 0:
raise JobError(return_code, output, error, job_name = 'get_CH_map', segmented_map = segmented_map)
elif not os.path.exists(CH_map):
raise JobError(message = 'Could not find output file {CH_map}', CH_map = CH_map)
else:
logging.debug('Job ran without errors, output: %s', output)
return CH_map
def track_maps(tracked_maps, untracked_maps, newly_tracked_maps):
'''Run the tracking program'''
# File paths of the maps to run the tracking on
maps = tracked_maps[-tracking_overlap:] + untracked_maps
# last color of the previous tracking
last_color = parse_tracking_color_file(tracking_color_file)
# Create a job for the tracking program with the appropriate parameters
job = Job(tracking_exec, *maps, config = tracking_config_file, maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(), newColor = last_color)
logging.info('Running job\n%s', job)
# Run the tracking job
return_code, output, error = job()
# Check if the job ran succesfully
if return_code != 0:
raise JobError(return_code, output, error, job_name = 'tracking', maps = maps)
else:
logging.debug('Job ran without errors, output: %s', output)
try:
with open(tracking_color_file, 'tw') as f:
f.write(output)
except Exception as why:
logging.error('Could not write tracking color to file "%s": %s', tracking_color_file, why)
return tracked_maps + untracked_maps, [], newly_tracked_maps + untracked_maps
def run_spoca_jobs(start_date, end_date, tracked_maps = None, untracked_maps = None):
'''Run the SPoCA jobs to create and track the CHMaps'''
# If no tracked maps were given, we assumed all existing are
if tracked_maps is None:
tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits')))
# If no untracked maps were given, we assume none are
if untracked_maps is None:
untracked_maps = list()
else:
# We need to remove the untracked maps from the tracked maps
for untracked_map in untracked_maps:
tracked_maps = list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps))
# We will return the list of all newly tracked maps
newly_tracked_maps = list()
# Start the loop
for date in date_range(start_date, end_date, classification_run_frequency):
# Get the AIA files for the classification
try:
AIA_images = get_AIA_files(date, AIA_wavelengths)
except FileNotFoundError as why:
logging.warning('Missing AIA files for date %s, skipping missing files!', date)
continue
# Get the list of HMI images
try:
HMI_images = get_HMI_files(date)
except FileNotFoundError as why:
# It's okay if HMI files are missing, we just won't have HMI stats for the CH
HMI_images = list()
# Create the Segmented map
segmented_map = create_segmented_map(AIA_images, date)
# Create the CH map
CH_map = create_CH_map(segmented_map, date, AIA_images + HMI_images)
# Add the CH map to the list of untracked maps
untracked_maps.append(CH_map)
# If we have enough untracked maps, we run the tracking program
if len(untracked_maps) >= tracking_run_count:
tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps)
else:
logging.debug('Not enough maps to run tracking, need %s but have only %s', tracking_run_count, len(untracked_maps))
# Track the remaing untracked maps
if untracked_maps:
tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps)
return newly_tracked_maps
# Start point of the script
if __name__ == '__main__':
# Get the arguments
parser = argparse.ArgumentParser(description = 'Create and track CH maps')
parser.add_argument('--debug', '-d', default = False, action = 'store_true', help = 'Set the logging level to debug')
parser.add_argument('--log_file', '-l', help = 'The file path of the log file')
parser.add_argument('--start_date', '-s', default = '2010-05-20', help = 'Start date of AIA files, in form YYYY-MM-DD')
parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date of AIA files, in form YYYY-MM-DD')
parser.add_argument('--tracked_maps', '-t', metavar = 'MAP', nargs='*', help = 'File paths of previously tracked CH maps')
parser.add_argument('--untracked_maps', '-u', metavar = 'MAP', nargs='*', help = 'File paths of not yet tracked CH maps')
args = parser.parse_args()
# Setup the logging
if args.log_file:
logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s', filename=args.log_file)
else:
logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s')
# Parse the start and end date
start_date = datetime.strptime(args.start_date, '%Y-%m-%d')
end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else datetime.utcnow()
# Run the SPoCA jobs
try:
CH_maps = run_spoca_jobs(start_date, end_date, args.tracked_maps, args.untracked_maps)
except Exception as why:
logging.critical(str(why))
sys.exit(1)
| 2.015625 | 2 |
frontend/model/migrations/versions/d385c3eb6937_users_autoincrement_set_to_pk.py | MarioBartolome/GII_0_17.02_SNSI | 1 | 12795276 | """USERS - Autoincrement set to PK
Revision ID: d385c3eb6937
Revises: ee2cbe4166fb
Create Date: 2018-02-16 11:23:29.705565
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd385c3eb6937'
down_revision = 'ee2cbe4166fb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True))
op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True))
op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_surname'), table_name='user')
op.drop_column('user', 'surname')
op.drop_column('user', 'name')
# ### end Alembic commands ###
| 1.476563 | 1 |
algeria.py | yasserkaddour/covid19-icu-data-algeria | 5 | 12795277 | # A large portion of the code came from the COVID-19 Dataset project by Our World in Data
# https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter
# Mainly contributed by <NAME> https://github.com/lucasrodes
# The code is under completely open access under the Creative Commons BY license
# https://creativecommons.org/licenses/by/4.0/
import os
import pandas as pd
import re
import tweepy
try:
from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET
except ImportError:
TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET')
class TwitterAPI:
def __init__(self, consumer_key: str, consumer_secret: str):
self._api = self._get_api(consumer_key, consumer_secret)
def _get_api(self, consumer_key, consumer_secret):
auth = tweepy.AppAuthHandler(consumer_key, consumer_secret)
return tweepy.API(auth)
def get_tweets(self, username, num_tweets=30):
tweets = tweepy.Cursor(self._api.user_timeline,
screen_name=username,
include_rts=False,
tweet_mode='extended',
exclude_replies=False,
).items(num_tweets)
return tweets
class TwitterCollectorBase:
def __init__(self, api, username: str, location: str, num_tweets=100):
self.username = username
self.location = location
self.tweets = api.get_tweets(self.username, num_tweets)
self.tweets_relevant = []
self.output_path = "./algeria-covid19-icu-data.csv"
self._data_old = self._get_current_data()
def _set_output_path(self, paths, output_path):
if output_path is None:
if paths is not None:
return paths.tmp_vax_out_proposal(self.location)
else:
raise AttributeError(
"Either specify attribute `paths` or method argument `output_path`")
def _get_current_data(self):
if os.path.isfile(self.output_path):
return pd.read_csv(self.output_path)
else:
None
@property
def last_update(self):
if self._data_old is not None:
return self._data_old.date.max()
else:
return None
def _propose_df(self):
raise NotImplementedError
def propose_df(self):
df = (
self._propose_df()
.pipe(self.merge_with_current_data)
.sort_values("date")
)
return df
def build_post_url(self, tweet_id: str):
return f"https://twitter.com/{self.username}/status/{tweet_id}"
def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame:
if df.empty:
return self._data_old
if self._data_old is not None:
df_current = self._data_old[~self._data_old.date.isin(df.date)]
df = pd.concat([df, df_current]).sort_values(by="date")
return df
def stop_search(self, dt):
if self._data_old is None:
return False
elif dt >= self.last_update:
return False
elif dt < self.last_update:
return True
def to_csv(self):
df = self.propose_df()
df.to_csv(self.output_path, index=False)
class Algeria(TwitterCollectorBase):
def __init__(self, api, **kwargs):
super().__init__(
api=api,
username="Sante_Gouv_dz",
location="Algeria",
**kwargs
)
def _propose_df(self):
data = []
for tweet in self.tweets:
match = re.search(r"مؤشرات الترصد لوباء كوفيد-19", tweet.full_text) or re.search(
r"حصيلة وباء كورونا كوفيد-19 ليوم", tweet.full_text) or re.search(
r"نوافيكم بالحصيلة الكاملة", tweet.full_text)
match2 = re.search(r"العناية المركز", tweet.full_text)
if match and match2:
dt_match = re.search(
r"(\d{1,2})\s*([ء-ي]+)\s*[ء-ي]*(202\d)", tweet.full_text)
dt = dt_match.group(
3)+"-"+arabicMonthToNum(dt_match.group(2))+"-"+dt_match.group(1).zfill(2)
if self.stop_search(dt):
break
new_cases_line = re.findall(
"^.*جديدة.*$", tweet.full_text, re.MULTILINE)[0]
new_cases = int(re.search(r'\d+', new_cases_line).group(0))
recoveries_line = re.findall(
"^.*للشفاء.*$", tweet.full_text, re.MULTILINE)[0]
recoveries = int(re.search(r'\d+', recoveries_line).group(0))
in_icu_line = re.findall(
"^.*العناية المركز.*$", tweet.full_text, re.MULTILINE)[0]
in_icu = int(re.search(r'\d+', in_icu_line).group(0))
new_deaths_line = re.findall(
"^.*وفيات.*$", tweet.full_text, re.MULTILINE)
if(new_deaths_line):
new_deaths = int(
re.search(r'\d+', new_deaths_line[0]).group(0))
else:
if(re.findall(
"^.*وفاة واحدة.*$", tweet.full_text, re.MULTILINE)[0]):
new_deaths = 1
data.append({
"date": dt,
"new_cases": new_cases,
"recoveries": recoveries,
"in_icu": in_icu,
"death": new_deaths,
"text": tweet.full_text,
"source_url": self.build_post_url(tweet.id),
})
df = pd.DataFrame(data)
return df
def arabicMonthToNum(month):
return {
'جانفي': "01",
'فيفري': "02",
'مارس': "03",
'أفريل': "04",
'ماي': "05",
'جوان': "06",
'جويلية': "07",
'اوت': "08",
'أوت': "08",
'سبتمبر': "09",
'أكتوبر': "10",
'اكتوبر': "10",
'كتوبر': "10",
'نوفمبر': "11",
'ديسمبر': "12"
}[month]
def main():
api = TwitterAPI(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
Algeria(api).to_csv()
if __name__ == "__main__":
main()
| 2.6875 | 3 |
hiMoon/haplotype.py | sadams2013/hiMoon | 4 | 12795278 | <reponame>sadams2013/hiMoon
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itertools
import pandas as pd
import numpy as np
from pulp import *
from .gene import AbstractGene
from . import LOGGING
class NoVariantsException(Exception):
"""
Exception to call if a sample is attempted that has zero variants defined.
"""
pass
class Haplotype:
def __init__(self, gene: AbstractGene, sample_prefix: str, config = None) -> None:
"""
Create a new haplotype object
This object is not a subclass, but inherits data from the Gene class
Conceptually, Gene is a fairly abstract class that has meta information used
by the subject and haplotype classes.
Args:
gene (Gene): gene.Gene object
sample_prefix (str): Sample ID
"""
self.phased = gene.phased
self.config = config
self.solver = gene.solver
self.matched = False
self.sample_prefix = sample_prefix
self.genotypes = gene.get_sample_vars(sample_prefix)
if len(self.genotypes) == 0:
raise NoVariantsException
self.translation_table = gene.get_translation_table_copy()
self.chromosome = gene.chromosome
self.version = gene.version
self.reference = gene.reference
def table_matcher(self) -> None:
"""
Matches variants in the translation table with the subject's variants
"""
self.matched = True
matches = self.translation_table.apply(self._match, axis = 1)
self.translation_table["MATCH"] = [m[0] for m in matches]
self.translation_table["STRAND"] = [m[1] for m in matches]
self.translation_table["VAR_ID"] = self.translation_table.apply(
lambda x: f'{x["ID"]}_{str(x.iloc[6]).strip("<>")}_{str(x.iloc[7]).strip("<>")}',
axis = 1
)
self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table["MATCH"] == 99].tolist())
haps = self.translation_table["Haplotype Name"]
no_match = self.translation_table[self.translation_table["MATCH"] == 0].iloc[:,0] # Haplotypes where there is any variant not matching
drops = []
for i in no_match.unique():
if sum([i == k for k in no_match]) > 0:
drops.append(i)
self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that don't match 100%
self.variants = self.translation_table.loc[:,["VAR_ID", "MATCH", "STRAND", "Type", "Variant Start"]].drop_duplicates() # List of matched variants
self.haplotypes = [hap for hap in self.translation_table.iloc[:,0].unique().tolist()] # List of possible haplotypes
def _mod_vcf_record(self, alt: str, ref: str) -> str:
"""
Modifies record from VCF to standardized form
Args:
alt (str): alt allele
ref (str): ref allele
Returns:
str: reformatted alt allele
"""
if alt is None:
return "-"
if "<" in alt:
return f"s{alt.strip('<>')}"
elif len(ref) > len(alt):
return "id-"
elif len(ref) < len(alt):
return f'id{alt[1:]}' # Remove first position
else:
return f's{alt}'
def _mod_tt_record(self, var_type: str, alt: str) -> list:
"""
Modifies the translation table ref to a standardized form
Args:
var_type (str): insertion, deletion, or substitution
alt (str): allele from translation table
Returns:
[list]: modified allele as list based on iupac
"""
alt = alt.strip("<>")
if var_type == "insertion":
return [f'id{alt}']
elif var_type == "deletion":
return [f'id-']
else:
try:
return [f's{a}' for a in self.config.IUPAC_CODES[alt]]
except KeyError:
return [f's{alt}']
def _match(self, row: pd.core.series.Series) -> (int, int):
"""
Evaluate match in a single translation table row with a sample
Args:
row (pd.core.series.Series): single row from translation table
genotypes ([type]): list of genotypes
Returns:
int: 99 (missing), 0, 1, or 2 (corresponds to the number of matched alleles for a particular position)
"""
strand = 0
if row.iloc[8] in ["insertion", "deletion"]:
new_pos = int(row["ID"].split("_")[1]) - 1
ID = f'{row["ID"].split("_")[0]}_{new_pos}_SID'
else:
ID = row["ID"]
try:
genotype = self.genotypes[ID]
except KeyError: # Not in VCF
return int(self.config.MISSING_DATA_PARAMETERS["missing_variants"]), strand
try:
vcf_geno = [self._mod_vcf_record(g, genotype["ref"]) for g in genotype["alleles"]]
except AttributeError:
return int(self.config.MISSING_DATA_PARAMETERS["missing_variants"]), strand
if vcf_geno == ["-", "-"]:
return int(self.config.MISSING_DATA_PARAMETERS["missing_variants"]), strand
tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7])
alt_matches = sum([vcf_geno.count(a) for a in tt_alt_geno])
if alt_matches == 1 and genotype["phased"]:
strand = 1 if max([vcf_geno.index(a) for a in tt_alt_geno]) == 1 else -1
elif alt_matches == 2 and genotype["phased"]:
strand = 3
return alt_matches, strand
def _haps_from_prob(self, lp_problem: object) -> tuple:
"""
Take a optimally solved lp problem
Produce called haplotypes
Args:
lp_problem (object): solved lp problem
Returns:
tuple: called haplotypes and associated information
"""
is_ref = False
haps = []
variants = []
for v in lp_problem.variables():
if v.varValue:
if v.varValue > 0:
if v.name.split("_")[0] == f'c{self.chromosome}':
variants.append(v.name)
else:
haps.append((v.name, v.varValue))
if len(haps) == 0:
called = [self.reference, self.reference]
is_ref = True
elif len(haps) == 2:
called = [haps[0][0], haps[1][0]]
else:
called = np.array([np.repeat(i[0], i[1]) for i in haps]).flatten().tolist()
if len(called) == 1:
called.append(self.reference)
return called, variants, len(haps), is_ref
def _solve(self, hap_prob: object) -> object:
if self.solver == "GLPK":
hap_prob.solve(GLPK(msg=0))
else:
hap_prob.solve(PULP_CBC_CMD(msg=0))
def lp_hap(self) -> tuple:
"""
Build and run the LP problem
Returns:
tuple: list of possible haplotypes and list of associated variants
"""
possible_haplotypes = []
haplotype_variants = []
num_vars = self.variants.shape[0]
num_haps = len(self.haplotypes)
hap_vars = []
for hap in self.haplotypes:
trans = self.translation_table[self.translation_table.iloc[:,0] == hap]
hap_vars.append([1 if var in trans["VAR_ID"].unique() else 0 for var in self.variants["VAR_ID"]])
hap_prob = LpProblem("Haplotype Optimization", LpMaximize)
# Define the haplotypes and variants variables
haplotypes = [LpVariable(hap, cat = "Integer", lowBound=0, upBound=2) for hap in self.haplotypes]
variants = [LpVariable(var, cat = "Binary") for var in self.variants["VAR_ID"]]
# Set constraint of two haplotypes selected
hap_prob += (lpSum(haplotypes[i] for i in range(num_haps)) <= int(self.config.LP_PARAMS["max_haps"])) # Cannot choose more than x haplotypes (may be increased to find novel sub-alleles)
# Limit alleles that can be chosen based on zygosity
for i in range(num_vars): # Iterate over every variant
# A variant allele can only be used once per haplotype, up to two alleles per variant
hap_prob += (variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))))
# A given variant cannot be used more than "MATCH"
hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) <= self.variants.iloc[i,1] * variants[i])
# Any CNV variants defined, if matched with a haplotype, MUST be used
# Otherwise, variants like CYP2D6*5 will be missed by the other methods
if self.variants.iloc[i,3] == "CNV":
hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) == self.variants.iloc[i,1])
if self.phased:
for i in range(num_haps):
hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <= 1 # max one strand
hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) <= 1
hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) >= -1
# Set to maximize the number of variant alleles used
hap_prob += lpSum(
self.translation_table[
(self.translation_table.iloc[:,0] == self.haplotypes[i]) &
(self.translation_table["MATCH"] > 0)
].shape[0] * haplotypes[i] for i in range(num_haps))
self._solve(hap_prob)
if hap_prob.status != 1:
if self.phased:
LOGGING.warning(f"No feasible solution found, {self.sample_prefix} will be re-attempted with phasing off.")
return None, None
else:
LOGGING.warning(f"No feasible solution found, {self.sample_prefix} will not be called")
return [], []
else:
called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob)
if is_ref:
possible_haplotypes.append(tuple(called))
haplotype_variants.append(tuple(variants))
return possible_haplotypes, haplotype_variants
max_opt = hap_prob.objective.value()
opt = max_opt
while opt >= (max_opt - float(self.config.LP_PARAMS["optimal_decay"])) and not is_ref and hap_prob.status >= 0:
possible_haplotypes.append(tuple(sorted(called)))
haplotype_variants.append(tuple(sorted(variants)))
hap_prob += lpSum([h.value() * h for h in haplotypes]) <= hap_len - 1
self._solve(hap_prob)
if hap_prob.status != 1:
break
opt = hap_prob.objective.value()
new_called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob)
if new_called == called or len(new_called) == 0:
break
called = new_called
return possible_haplotypes, haplotype_variants
def _get_strand_constraint(self, i: int, default: list) -> list:
"""
Helps to assemble the constraint for phased data
Looks at all strands that are part of a haplotype
Removes homozygous calls
Args:
i (int): haplotype index
default (list): default return if nothing matches or if all are homozygous
Returns:
list: [description]
"""
sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]]["STRAND"].unique()
sc = np.delete(sc, np.where(sc == [3]))
return(sc if sc.size > 0 else np.array(default))
def optimize_hap(self) -> ():
"""
Solve for the most likely diplotype
Returns:
(): Results
"""
if not self.matched:
print("You need to run the table_matcher function with genotyped before you can optimize")
sys.exit(1)
called, variants = self.lp_hap()
if called is None:
# Happens when a phased call attempt fails
self.phased = False
called, variants = self.lp_hap()
if len(called) > 1:
LOGGING.warning(f"Multiple genotypes possible for {self.sample_prefix}.")
return called, variants
| 2.4375 | 2 |
pioneer/das/api/sensors/radar_conti.py | leddartech/pioneer.das.api | 8 | 12795279 | <reponame>leddartech/pioneer.das.api
from pioneer.das.api.interpolators import nearest_interpolator
from pioneer.das.api.samples import Sample, XYZVI
from pioneer.das.api.sensors.sensor import Sensor
class RadarConti(Sensor):
def __init__(self, name, platform):
super(RadarConti, self).__init__(name,
platform,
{'xyzvi': (XYZVI, nearest_interpolator)})
self.amplitude_type = 'velocity' # types = ['i', 'velocity']
def get_corrected_cloud(self, _timestamp, pts, _dtype):
return pts
| 2.234375 | 2 |
scripts/f247.py | Eve-ning/aleph0 | 0 | 12795280 | from math import pi
import numpy as np
from aleph.consts import *
from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent
from reamber.osu.OsuMap import OsuMap
# notes: 01:37:742 (97742|2,125moves993|2) -
SHAKES = np.array(
[100560, 100790, 101018, 101245,
104124, 104340, 104556, 104770,
107487, 107692, 107896, 108099,
110674, 110867, 111059, 111156, 111252, 111348,
113698, 113882, 114065, 114248,
116577, 116753, 116928, 117103,
119326, 119494, 119661, 119827,
121953, 122114, 122275, 122434,
122594, 122673, 122752, 122831, 123068,
123147, 123226, 123304, 123383, 123539,
123618, 123696, 123773, 123851, 124007,
124084, 124162, 124239, 124316, 124471,
124547, 124624, 124701, 124778, 124932,
125008, 125084, 125160, 125236, 125388,
125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993])
def f247(m: OsuMap):
notes = sorted([n for n in m.notes.hits() if 97742 < n.offset <= 125993])
BASE_SHAKE_AMP = 0.010
INC_SHAKE_AMP = 0.0010
SHAKE_WINDOW = 250
NOTE_DURATION = 2000
# noinspection PyTypeChecker
events = [
*[SvOsuMeasureLineEvent(
firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset - t,
startX=n.offset - NOTE_DURATION - t, endX=n.offset - t,
startY=-1 + en / 500 , endY=1 - en / 500,
funcs=[
lambda x, n=n, t=t:
# This flips the board if it's < 2
(-1 if n.column < 2 else 1) *
(
np.piecewise(x,
[(i <= x) & (x < i + SHAKE_WINDOW) for i in SHAKES],
[*[lambda x, i=i, es=es:
(BASE_SHAKE_AMP + es * INC_SHAKE_AMP)
* np.sin((x - i) * pi / (SHAKE_WINDOW - es * 3))
for es, i in enumerate(SHAKES)],
lambda x: 0])
+ (x - (n.offset - t)) / NOTE_DURATION
)
]) for en, n in enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)]
]
svs, bpms = svOsuMeasureLineMD(events,
scalingFactor=SCALE,
firstOffset=97742,
lastOffset=125993,
paddingSize=PADDING,
endBpm=250)
m.svs.extend(svs)
m.bpms.extend(bpms)
| 2 | 2 |
bin/flt-include-doc-map.py | tapaswenipathak/MkTechDocs | 9 | 12795281 | #!/usr/bin/env python3
# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# This filter is identical to the include filter, except
# that while building a document, it outputs a document
# map on stderr so that a script can figure out where each part of
# the document came from. E.g.
#
# ```include
# includethisfile.md
# ```
# This filter is recursive, so you markdown can include
# other markdown to any level.
#
import os
import sys
import json
import re
from subprocess import Popen, PIPE
from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space
REFFILE=""
def md_to_json(s):
p = Popen(["pandoc", "-f", "markdown", "-t", "json"], stdin=PIPE, stdout=PIPE)
p.stdin.write(s.encode())
(stdout, stderr) = p.communicate()
if str(stderr) != "None":
sys.stderr.write("WARNING: Conversion to json had results in stderr: " + str(stderr))
return stdout.decode()
def get_contents_of_file(f, levels=u"0"):
numLevels = int(levels)
# Return the contents of file unchanged if no change in level is needed
if numLevels == 0:
if os.path.isfile(f):
with open(f, "r") as myFile:
return myFile.read()
else:
sys.stderr.write("WARNING: cannot read " + f)
return "FILE NOT FOUND: " + f
# Alter the level
alterLevelBy = abs(numLevels)
pre = "in" if numLevels > 0 else "de"
if alterLevelBy > 5:
sys.stderr.write("WARNING: Header change out of bounds. Will stick at a maximum of 6 or minimum of 0\n")
alterLevelBy = 5
p = Popen(["pandoc", "-f", "markdown", "-t", "markdown", "-F", "flt-" + pre + "crement-header-" + str(alterLevelBy) + ".py", f], stdout=PIPE)
(stdout, stderr) = p.communicate()
stdout = stdout.decode()
if stderr is not None:
stderr = stderr.decode()
if stderr != "None":
sys.stderr.write("WARNING: Conversion to json had results in stderr: " + stderr)
return stdout
def docmap(key, value, format, meta):
global REFFILE
if key == 'Header':
[level, attr, inline] = value
[ids, classes, keyvals] = attr
# Change the reference file if we see a new level-1 header
if level == 1 and 'fromfile' in meta:
reffile = re.sub("\.md", ".html", meta['fromfile']['c'])
REFFILE="~~" + reffile + "~~"
sys.stderr.write(reffile + "\n")
return Header(level, [REFFILE + str(ids), [], []], inline)
elif key == 'CodeBlock':
[[ident, classes, keyvals], code] = value
if "include" in classes:
rv = []
for l in code.splitlines():
l = l.strip()
if os.path.isfile(l):
(headingLevel, dummy) = get_value(keyvals, "heading-level")
if not headingLevel:
headingLevel = 0
contents = get_contents_of_file(l, headingLevel)
doc = json.loads(md_to_json(contents))
if 'meta' in doc:
meta = doc['meta']
elif doc[0]: # old API
meta = doc[0]['unMeta']
else:
meta = {}
# Add a file to the meta info
meta['fromfile']= {u'c':l, u't':'MetaString'}
altered = walk(doc, docmap, format, meta)
rv.append(altered['blocks'])
else:
sys.stderr.write("WARNING: Can't read file '" + l + "'. Skipping.")
# Return a flattened list using nested list comprehension
#
# The following is equivalent to:
#
# flattened = []
# for sublist in rv:
# for item in sublist:
# flattened.append(item)
# return flattened
return [item for sublist in rv for item in sublist]
if __name__ == "__main__":
toJSONFilter(docmap)
| 2.703125 | 3 |
src/rez/command.py | alexey-pelykh/rez | 0 | 12795282 | # SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from rez.config import config
class Command(object):
"""An interface for registering custom Rez subcommand
To register plugin and expose subcommand, the plugin module..
* MUST have a module-level docstring (used as the command help)
* MUST provide a `setup_parser()` function
* MUST provide a `command()` function
* MUST provide a `register_plugin()` function
* SHOULD have a module-level attribute `command_behavior`
For example, a plugin named 'foo' and this is the `foo.py`:
'''The docstring for command help, this is required.
'''
from rez.command import Command
command_behavior = {
"hidden": False, # optional: bool
"arg_mode": None, # optional: None, "passthrough", "grouped"
}
def setup_parser(parser, completions=False):
parser.add_argument("--hello", ...)
def command(opts, parser=None, extra_arg_groups=None):
if opts.hello:
print("world")
class CommandFoo(Command):
schema_dict = {}
@classmethod
def name(cls):
return "foo"
def register_plugin():
return CommandFoo
"""
def __init__(self):
self.type_settings = config.plugins.extension
self.settings = self.type_settings.get(self.name())
@classmethod
def name(cls):
"""Return the name of the Command and rez-subcommand."""
raise NotImplementedError
| 2.546875 | 3 |
src/streetool/classifications.py | actionprojecteu/action-tool | 0 | 12795283 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright (c) 2021
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
import os
import os.path
import logging
#--------------
# local imports
# -------------
from streetool.utils import get_image, paging
# -----------------------
# Module global variables
# -----------------------
log = logging.getLogger("streetoool")
# ----------------
# Module constants
# ----------------
def dynamic_sql(options):
columns = list()
headers = list()
where = ""
if options.workflow:
columns.append("workflow_id")
headers.append("Workflow Id")
if options.user:
columns.append("user_id")
headers.append("User Id")
elif options.anon_user:
columns.append("user_ip")
headers.append("User IP")
where = "WHERE user_id IS NULL"
if options.subject:
columns.append("subject_id")
headers.append("Subject Id")
if options.classification:
columns.append("classification_id")
headers.append("Classification Id")
if options.source:
columns.append("cluster_id")
headers.append("Source Id")
if len(columns) == 0:
raise ValueError("At least one --<flag> must be specified")
headers.append("# Classif")
sql = f"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}"
return sql, headers
# ========
# COMMANDS
# ========
def view(connection, options):
sql, headers = dynamic_sql(options)
cursor = connection.cursor()
cursor.execute(sql)
paging(
iterable = cursor,
headers = headers,
) | 2.125 | 2 |
kerasAC/interpret/form_modisco_inputs.py | kundajelab/kerasAC | 6 | 12795284 | import argparse
import math
import pysam
import shap
import tensorflow
from deeplift.dinuc_shuffle import dinuc_shuffle
from scipy.spatial.distance import jensenshannon
from scipy.special import logit, softmax
tensorflow.compat.v1.disable_v2_behavior()
import kerasAC
import matplotlib
import pandas as pd
from kerasAC.interpret.deepshap import *
from kerasAC.interpret.profile_shap import *
from kerasAC.util import *
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import get_custom_objects
from kerasAC.custom_losses import *
from kerasAC.metrics import *
def parse_args():
parser = argparse.ArgumentParser(description="Argument Parser for SNP scoring")
parser.add_argument("--model_hdf5")
parser.add_argument("--peak_file")
parser.add_argument("--npeaks_to_sample",type=int,default=30000)
parser.add_argument("--out_prefix")
parser.add_argument(
"--ref_fasta", default="/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta"
)
parser.add_argument("--dinuc_shuffle_input",action='store_true',default=False)
parser.add_argument("--chrom_sizes", default="/data/hg38.chrom.sizes")
parser.add_argument("--flank_size", type=int, default=1057)
parser.add_argument("--batch_size",type=int,default=100)
return parser.parse_args()
def load_model_wrapper(model_hdf5):
# load the model!
custom_objects = {
"recall": recall,
"sensitivity": recall,
"specificity": specificity,
"fpr": fpr,
"fnr": fnr,
"precision": precision,
"f1": f1,
"ambig_binary_crossentropy": ambig_binary_crossentropy,
"ambig_mean_absolute_error": ambig_mean_absolute_error,
"ambig_mean_squared_error": ambig_mean_squared_error,
"MultichannelMultinomialNLL": MultichannelMultinomialNLL,
}
get_custom_objects().update(custom_objects)
return load_model(model_hdf5)
def combine_mult_and_diffref(mult, orig_inp, bg_data):
to_return = []
for l in [0]:
projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype("float")
assert len(orig_inp[l].shape) == 2
for i in range(orig_inp[l].shape[-1]):
hypothetical_input = np.zeros_like(orig_inp[l]).astype("float")
hypothetical_input[:, i] = 1.0
hypothetical_difference_from_reference = (
hypothetical_input[None, :, :] - bg_data[l]
)
hypothetical_contribs = hypothetical_difference_from_reference * mult[l]
projected_hypothetical_contribs[:, :, i] = np.sum(
hypothetical_contribs, axis=-1
)
to_return.append(np.mean(projected_hypothetical_contribs, axis=0))
to_return.append(np.zeros_like(orig_inp[1]))
return to_return
def shuffle_several_times(s):
numshuffles = 20
return [
np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]),
np.array([s[1] for i in range(numshuffles)]),
]
def main():
args = parse_args()
chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\n')
chrom_size_dict={}
for line in chrom_sizes:
tokens=line.split('\t')
chrom_size_dict[tokens[0]]=int(tokens[1])
ref=pysam.FastaFile(args.ref_fasta)
# load the model
model = load_model_wrapper(args.model_hdf5)
print("loaded model")
# create the count & profile explainers
model_wrapper = (model.input, model.outputs[1][:, 0:1])
count_explainer = shap.DeepExplainer(
model_wrapper,
data=create_background_atac,
combine_mult_and_diffref=combine_mult_and_diffref_atac
)
prof_explainer = create_explainer(model, ischip=False, task_index=0)
print("made explainers")
#read in the peaks
peaks=pd.read_csv(args.peak_file,header=None,sep='\t')
nrow=peaks.shape[0]
tosample=round(int(args.npeaks_to_sample)/nrow,2)
peaks = peaks.sample(frac=tosample).reset_index(drop=True)
nrow=peaks.shape[0]
print("sampled peaks:"+str(nrow))
#allocate space for numpy arrays for modisco
hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4))
hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4))
observed_profile_scores=np.empty((nrow,2*args.flank_size,4))
observed_count_scores=np.empty((nrow,2*args.flank_size,4))
seq=np.empty((nrow,2*args.flank_size,4))
print("pre-allocted output arrays")
#generate one-hot-encoded inputs
start_index=0
while start_index < nrow:
cur_batch_size=min(args.batch_size,nrow-start_index)
print(str(start_index)+":"+str(start_index+cur_batch_size))
batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist()
batch_start_pos=peaks[1]+peaks[9]-args.flank_size
batch_start_pos=batch_start_pos.tolist()
batch_start_pos=[max(0,i) for i in batch_start_pos]
batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)]
seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)]
if args.dinuc_shuffle_input is True:
seq_batch=[dinuc_shuffle(i) for i in seq_batch]
seq_batch=one_hot_encode(seq_batch)
seq[start_index:start_index+cur_batch_size,:,:]=seq_batch
#get the hypothetical scores for the batch
hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None)
observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch
hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0])
observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch
start_index+=args.batch_size
#save
print("saving outputs")
np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores)
np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores)
np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores)
np.save(args.out_prefix+'.observed.count.npy',observed_count_scores)
np.save(args.out_prefix+'.seq.npy',seq)
if __name__ == "__main__":
main()
| 1.851563 | 2 |
wd_cw05/wd_cw05_zadanie_2.py | pawel2706111/wizualizacja-danych | 0 | 12795285 | # Przeciąż metodę __add__() dla klasy Kwadrat,
# która będzie zwracała instancje klasy Kwadrat o nowym boku,
# będącym sumą boków dodawanych do siebie kwadratów.
class Ksztalty:
def __init__(self, x, y):
self.x=x
self.y=y
self.opis = "To będzie klasa dla ogólnych kształtów"
def pole(self):
return self.x * self.y
def obwod(self):
return 2 * self.x + 2 * self.y
def dodaj_opis(self, text):
self.opis = text
def skalowanie(self, czynnik):
self.x = self.x * czynnik
self.x = self.y * czynnik
class Kwadrat(Ksztalty):
def __init__(self, x):
self.x = x
self.y = x
def __add__(self, other):
return self.x + other.x
kw1 = Kwadrat(5)
kw2 = Kwadrat(6)
kw3 = Kwadrat(kw1 + kw2)
kw4 = Kwadrat(kw1 + kw3)
print('kw1 ma wymiary:', kw1.x,'x', kw1.y)
print('kw2 ma wymiary:', kw2.x,'x', kw2.y)
print('kw3 ma wymiary:', kw3.x,'x', kw3.y)
print('kw4 ma wymiary:', kw4.x,'x', kw4.y) | 4.0625 | 4 |
cpmpy/ski_assignment.py | tias/hakank | 279 | 12795286 | <gh_stars>100-1000
"""
Ski assignment in cpmpy
From <NAME>, Jr.:
PIC 60, Fall 2008 Final Review, December 12, 2008
http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf
'''
5. Ski Optimization! Your job at Snapple is pleasant but in the winter
you've decided to become a ski bum. You've hooked up with the Mount
Baldy Ski Resort. They'll let you ski all winter for free in exchange
for helping their ski rental shop with an algorithm to assign skis to
skiers. Ideally, each skier should obtain a pair of skis whose height
matches his or her own height exactly. Unfortunately, this is generally
not possible. We define the disparity between a skier and his or her
skis to be the absolute value of the difference between the height of
the skier and the pair of skis. Our objective is to find an assignment
of skis to skiers that minimizes the sum of the disparities.
...
Illustrate your algorithm by explicitly filling out the A[i, j] table
for the following sample data:
* Ski heights: 1, 2, 5, 7, 13, 21.
* Skier heights: 3, 4, 7, 11, 18.
'''
This cpmpy model was written by <NAME> (<EMAIL>)
See also my cpmpy page: http://hakank.org/cpmpy/
"""
from cpmpy import *
import cpmpy.solvers
import numpy as np
from cpmpy_hakank import *
def ski_assignment():
# data
num_skis = 6
num_skiers = 5
ski_heights = [1, 2, 5, 7, 13, 21]
skier_heights = [3, 4, 7, 11, 18]
# which ski to choose for each skier
x = intvar(0,num_skis-1,shape=num_skiers,name="x")
z = intvar(0, sum(ski_heights), name="z")
model = Model(minimize=z)
# constraints
model += [AllDifferent(x)]
# model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)] )]
model += [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in range(num_skiers)] )]
ss = CPM_ortools(model)
num_solutions = 0
if ss.solve():
num_solutions += 1
print("total differences:", z.value())
for i in range(num_skiers):
x_val = x[i].value()
ski_height = ski_heights[x[i].value()]
diff = ski_height - skier_heights[i]
print('Skier %i: Ski %i with length %2i (diff: %2i)' %\
(i, x_val, ski_height, diff))
print()
print()
print('num_solutions:', num_solutions)
return ss
ss = ski_assignment()
| 3.265625 | 3 |
maya/maximumreplacer/maximumreplacer.py | KasumiL5x/misc-scripts | 0 | 12795287 | #
# Maximum Replacer
# <NAME>, 2019
# GitHub: KasumiL5x
import re
import PySide2.QtCore as QC
import PySide2.QtGui as QG
import PySide2.QtWidgets as QW
import shiboken2
import maya.cmds as mc
import maya.mel as mel
import maya.OpenMayaUI as omui
def get_maya_window():
ptr = omui.MQtUtil.mainWindow()
parent = shiboken2.wrapInstance(long(ptr), QW.QDialog)
return parent
#end
class MaximumReplacer(QW.QDialog):
def __init__(self, parent=get_maya_window()):
QW.QDialog.__init__(self, parent=parent)
# [(short_name, long_name), ...]
self.selected_items = []
# [(regexed_short_name, different_from_original), ...] maps 1-1 with the above in size
self.regexed_items = []
self.setWindowFlags(QC.Qt.Window)
self.setWindowTitle('Maximum Replacer')
self.setMinimumWidth(380)
self.setMinimumHeight(400)
self.setLayout(QW.QVBoxLayout())
self.layout().setContentsMargins(5, 5, 5, 5)
self.layout().setSpacing(5)
self.layout().setAlignment(QC.Qt.AlignTop)
# Selection Section
#
gb_selection = QW.QGroupBox()
gb_selection.setLayout(QW.QHBoxLayout())
gb_selection.layout().setContentsMargins(2,2,2,2)
gb_selection.layout().setSpacing(5)
gb_selection.setTitle('Selection')
self.layout().addWidget(gb_selection)
#
self.rb_select_all = QW.QRadioButton()
self.rb_select_all.setText('All')
self.rb_select_all.setChecked(True)
gb_selection.layout().addWidget(self.rb_select_all)
#
self.rb_select_sel = QW.QRadioButton()
self.rb_select_sel.setText('Selected')
gb_selection.layout().addWidget(self.rb_select_sel)
#
self.txt_filter_name = QW.QLineEdit()
self.txt_filter_name.setPlaceholderText('Pattern...')
gb_selection.layout().addWidget(self.txt_filter_name)
#
self.txt_filter_type = QW.QLineEdit()
self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...')
gb_selection.layout().addWidget(self.txt_filter_type)
# Expression Section
#
gb_expression = QW.QGroupBox()
gb_expression.setLayout(QW.QVBoxLayout())
gb_expression.layout().setContentsMargins(2,2,2,2)
gb_expression.layout().setSpacing(5)
gb_expression.setTitle('Regular Expression')
self.layout().addWidget(gb_expression)
#
expr_widget = QW.QWidget()
expr_widget.setLayout(QW.QHBoxLayout())
expr_widget.layout().setContentsMargins(2,2,2,2)
expr_widget.layout().setSpacing(5)
gb_expression.layout().addWidget(expr_widget)
#
lbl_regex = QW.QLabel()
lbl_regex.setText('Pattern')
expr_widget.layout().addWidget(lbl_regex)
#
self.txt_replace_expr = QW.QLineEdit()
self.txt_replace_expr.setPlaceholderText('Regex...')
expr_widget.layout().addWidget(self.txt_replace_expr)
#
subs_widget = QW.QWidget()
subs_widget.setLayout(QW.QHBoxLayout())
subs_widget.layout().setContentsMargins(2,2,2,2)
subs_widget.layout().setSpacing(5)
gb_expression.layout().addWidget(subs_widget)
#
lbl_subst = QW.QLabel()
lbl_subst.setText('Substitute')
subs_widget.layout().addWidget(lbl_subst)
#
self.txt_replace_subs = QW.QLineEdit()
self.txt_replace_subs.setPlaceholderText('Substitute...')
subs_widget.layout().addWidget(self.txt_replace_subs)
# Preview Section
#
gb_preview = QW.QGroupBox()
gb_preview.setLayout(QW.QVBoxLayout())
gb_preview.layout().setContentsMargins(2,2,2,2)
gb_preview.layout().setSpacing(5)
gb_preview.setTitle('Preview')
self.layout().addWidget(gb_preview)
#
self.lv_preview = QW.QListWidget()
gb_preview.layout().addWidget(self.lv_preview)
# Button!
self.btn_commit = QW.QPushButton()
self.btn_commit.setText('Commit')
self.layout().addWidget(self.btn_commit)
# footer
footer_widget = QW.QWidget()
footer_widget.setLayout(QW.QHBoxLayout())
footer_widget.layout().setContentsMargins(0,0,0,0)
footer_widget.layout().setSpacing(5)
self.layout().addWidget(footer_widget)
# copyright!
info_lbl = QW.QLabel()
info_lbl.setTextFormat(QC.Qt.RichText)
info_lbl.setOpenExternalLinks(True)
info_lbl.setText('Maximum Replacer v1.2 <a href=\"http://www.dgreen.me/\">www.dgreen.me</a>')
footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft)
# update while typing checkbox
self.chk_update_while_typing = QW.QCheckBox()
self.chk_update_while_typing.setText('Update while typing')
self.chk_update_while_typing.setChecked(True)
footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight)
# connections
self.txt_filter_name.textChanged.connect(self.on_text_changed)
self.txt_filter_type.textChanged.connect(self.on_text_changed)
self.txt_replace_expr.textChanged.connect(self.on_text_changed)
self.txt_replace_subs.textChanged.connect(self.on_text_changed)
self.txt_filter_name.editingFinished.connect(self.on_text_edited)
self.txt_filter_type.editingFinished.connect(self.on_text_edited)
self.txt_replace_expr.editingFinished.connect(self.on_text_edited)
self.txt_replace_subs.editingFinished.connect(self.on_text_edited)
self.rb_select_all.clicked.connect(self.update)
self.rb_select_sel.clicked.connect(self.update)
self.btn_commit.clicked.connect(self.commit)
# initial
self.update()
#end
# called when any text changes in text fields
def on_text_changed(self):
if not self.chk_update_while_typing.isChecked():
return
self.update()
#end
# called when changes have been committed in text fields (e.g. return pressed)
def on_text_edited(self):
if self.chk_update_while_typing.isChecked():
return
self.update()
#end
def edit_done(self):
print 'Editing done'
#end
def get_real_short_names(self, selected):
result = []
for x in mc.ls(sl=selected, shortNames=True):
result.append(x[x.rfind('|')+1:]) # basically strip all after last | (the |+1 becomes 0 if the find fails, so it's okay to fail)
return result
#end
def get_selection(self, regex=None):
result = []
# all objects
if self.rb_select_all.isChecked():
result = zip(self.get_real_short_names(False), mc.ls(long=True))
# selected objects
if self.rb_select_sel.isChecked():
result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True))
# filter by type
filter_type = self.txt_filter_type.text()
if len(filter_type):
to_remove = []
for idx in range(len(result)):
node_type = mc.nodeType(result[idx][1])
try:
if None == re.search(filter_type, node_type):
to_remove.append(idx)
except:
continue
#end for
# remove all non-matching elements
result = [x for idx, x in enumerate(result) if idx not in to_remove]
#end
# filter by expression
pattern = self.txt_filter_name.text()
if len(pattern):
to_remove = []
for idx in range(len(result)):
try:
if None == re.search(pattern, result[idx][0]):
to_remove.append(idx)
except:
continue
#end for
# remove all non-matching elements
result = [x for idx, x in enumerate(result) if idx not in to_remove]
#end
return result
#end
def calculate_regexed_names(self):
pattern = self.txt_replace_expr.text()
subs = self.txt_replace_subs.text()
result = []
for x in self.selected_items:
subbed_name = x[0]
try:
subbed_name = re.sub(pattern, subs, x[0])
subbed_name = mel.eval('formValidObjectName(\"{0}\");'.format(subbed_name)) # make it maya-valid
result.append((subbed_name, subbed_name != x[0])) # (regex name, changed from original)
except:
result.append((subbed_name, False)) # failed so just pass through data and make it not changed
return result
#end
def update(self):
# 1. get the selection
self.selected_items = self.get_selection()
# 2. get the regex'd versions
self.regexed_items = self.calculate_regexed_names()
# 3. update list view with a preview of changes
bold_font = QG.QFont('', -1, QG.QFont.Bold, False)
self.lv_preview.clear()
for x in range(len(self.selected_items)):
short_old = self.selected_items[x][0]
short_new = self.regexed_items[x][0]
if self.regexed_items[x][1]:
txt = short_old + ' => ' + short_new
else:
txt = short_old
self.lv_preview.addItem(txt)
if self.regexed_items[x][1]:
self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font)
#end
def commit(self):
# safety check
if None == self.selected_items or None == self.regexed_items:
return
# confirm dialog
number_different = len([x for x in self.regexed_items if x[1]])
dialog_msg = 'Confirm rename of ' + str(number_different) + ' objects?'
dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No')
if 'No' == dialog_result:
return
# undo chunk for all names
mc.undoInfo(openChunk=True, chunkName='MaximumReplacer')
# rename all objects (in REVERSE order as to not break the hierarchy)
for x in reversed(range(len(self.selected_items))):
# ignore nodes that don't need changing
if not self.regexed_items[x][1]:
continue
old_name = self.selected_items[x][1] # old LONG name
new_name = self.regexed_items[x][0] # new SHORT name
try:
mc.rename(old_name, new_name)
except Exception as e:
print 'Failed to rename %s: %s' % (old_name, e)
# end chunk!
mc.undoInfo(closeChunk=True)
# refresh view
self.update()
#end
#end
def create():
global g_maximum_replacer_inst
try:
g_maximum_replacer_inst.close()
g_maximum_replacer_inst.deleteLater()
except:
pass
g_maximum_replacer_inst = MaximumReplacer()
g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose)
g_maximum_replacer_inst.show()
#end
# uncomment this to run directly from the script editor (or call it from a shelf)
# create()
| 2.078125 | 2 |
poradnia/advicer/migrations/0017_auto_20190404_0337.py | efefre/poradnia | 23 | 12795288 | # Generated by Django 1.11.13 on 2019-04-04 01:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("advicer", "0016_auto_20190404_0320")]
operations = [
migrations.AlterField(
model_name="advice",
name="subject",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Subject"
),
)
]
| 1.421875 | 1 |
python/GV.py | lehaianh3112/ProjectLeHaiAnh | 2 | 12795289 | from __future__ import division, print_function, unicode_literals
import streamlit as st
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
st.title('Mô hình dự đoán giá nhà đất tại hồ gươm ')
# x1 là diện tích của lô đất(m2)
# x2 là chiều dài mặt tiền (m)
# x3 là số tầng nhà
# x4 là khoảng cách tới hồ gươm (m)
X = np.array([[40, 8, 2, 1800],
[36, 3.5, 6, 450],
[35, 4.5, 6, 450],
[39, 9, 2, 1800],
[40, 9, 1, 1800],
[36, 4.5, 5, 450],
[36, 4.5, 6, 450],
[40, 9, 2, 1800],
[36, 4.5, 7, 450],
[40, 9, 3, 1800],
[44, 4, 5, 350],
[41, 9, 2, 1800],
[37, 4.5, 6, 450],
[36, 5.5, 6, 450],
[40, 10, 2, 1800],
[45, 3, 4, 350],
[45, 4, 3, 350],
[45, 4, 4, 350],
[45, 4, 5, 350],
[45, 5, 4, 350],
[45, 3, 4, 350],
[60, 2.3, 5, 450],
[59, 3.3, 5, 450],
[60, 3.3, 4, 450],
[85, 4, 4, 950],
[85, 4, 5, 950],
[60, 3.3, 5, 450],
[61, 6, 1, 800],
[62, 5, 1, 800],
[85, 4, 6, 950],
[84, 6, 5, 950],
[86, 2.5, 3, 900],
[60, 3.3, 6, 450],
[85, 5, 5, 950],
[85, 3.5, 3, 900],
[86, 3.5, 2, 900],
[31.2, 3, 4, 450],
[61, 3.3, 5, 450],
[62, 6, 1, 800],
[85, 6, 5, 950],
[86, 3.5, 3, 900],
[62, 6, 2, 800],
[86, 3.5, 4, 900],
[87, 3.5, 3, 900],
[30.2, 4, 4, 450],
[62, 6, 3, 800],
[86, 4.5, 3, 900],
[86, 6, 5, 950],
[60, 4.3, 5, 450],
[62, 7, 1, 800],
[63, 6, 1, 800],
[31.2, 4, 4, 450],
[31.2, 4, 3, 450],
[62, 4, 5, 550],
[31.2, 4, 5, 450],
[63, 5, 3, 550],
[63, 4, 5, 550],
[32.2, 4 , 4, 450],
[31.2, 5, 4, 450],
[63, 5, 5, 550],
[64, 4, 5, 550],
[63, 5, 6 , 550],
[63, 6, 4, 550],
[80, 5.8, 7, 1100],
[80, 4.8, 8, 1100],
[80, 5.8, 8, 1100],
[79, 5.8, 8, 1100],
[80, 5.8, 9, 1100],
[81, 5.8, 8, 1100],
[80, 6.8, 8, 1100],
[80, 3.5, 6, 300],
[80, 4.5, 5, 300],
[80, 4.5, 6, 300],
[79, 4.5, 6, 300],
[81, 4.5, 6, 300],
[88, 3.5, 4, 850],
[88, 4.5, 3, 850],
[88, 4.5, 4, 850],
[87, 4.5, 4, 850],
[88, 4.5, 5, 850],
[89, 4.5, 4, 850],
[88, 5.5, 4, 850],
[80, 5.5, 7, 300],
[63, 6, 4, 250],
[62, 7, 4, 250],
[63, 7, 3, 250],
[63, 7, 4, 250],
[63, 7, 5, 250],
[64, 7, 4, 250],
[63, 8, 4, 250],
[140, 4.5, 5, 500],
[139, 5.5, 5, 500],
[140, 5.5, 4, 500],
[140, 5.5, 5, 500],
[140, 5.5, 6, 500],
[141, 5.5, 5, 500],
[140, 6.5, 5, 500]])
Y = np.array([[
19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5,
20.5, 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22,
22.5, 29, 30, 30.5, 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35,
31.5, 31.5, 31.63, 31.7, 32, 32, 32, 32, 32, 32.3, 32.3, 32.37,
32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, 33.5, 33.6, 34, 34, 34.3,
34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537,
44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, 55.46, 55.5, 55.54, 56,
56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5
]]).T
def duel_plot(X1, X2, Y):
fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.plot(Y, X[:, 0])
ax1.set_title('xét diện tích với giá tiền')
ax1.set_xlabel('giá tiền')
ax1.set_ylabel('Diện tích m2')
ax2.plot(Y, X[:, 1])
ax2.set_title('xét số mét mặt tiền với giá tiền')
ax2.set_xlabel('giá tiền')
ax2.set_ylabel('số mét mặt tiền')
return fig
def duel_plot2(X4, X5, Y):
fig = plt.figure(figsize=(15, 5))
ax3 = fig.add_subplot(1, 2, 1)
ax4 = fig.add_subplot(1, 2, 2)
ax3.plot(Y, X[:, 2])
ax3.set_title('xét số tầng nhà với giá tiền')
ax3.set_xlabel('giá tiền')
ax3.set_ylabel('số tầng nhà')
ax4.plot(Y, X[:, 3])
ax4.set_title('xét khoảng cách với giá tiền')
ax4.set_xlabel('giá tiền')
ax4.set_ylabel('khoảng cách tới hồ gươm')
return fig
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot(duel_plot(X[:, 0], X[:, 1], Y))
st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y))
st.sidebar.title('Dự đoán giá các mẫu nhà')
dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ')
cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ')
tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ')
kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m) ')
one = np.ones((X.shape[0], 1))
Xbar = np.concatenate((one, X), axis=1)
x_train, x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2)
A = np.dot(Xbar.T, Xbar)
b = np.dot(Xbar.T, Y)
w = np.dot(np.linalg.pinv(A), b)
w_0 = w[0][0]
w_1 = w[1][0]
w_2 = w[2][0]
w_3 = w[3][0]
w_4 = w[4][0]
st.write("Độ chính xác (R2 square) : ", r2_score(y_test, np.dot(x_test, w)))
vd = np.array([dt_name, cd_name, tn_name, kc_name, 1])
if st.sidebar.button('Dự đoán'):
y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \
float(tn_name)+w_4*float(kc_name) + w_0
st.sidebar.write('Giá của ngôi nhà là : ', y1, 'tỷ đồng')
| 2.640625 | 3 |
tests/test_toBig.py | andreacosolo/granite | 3 | 12795290 | #################################################################
# Libraries
#################################################################
import sys, os
import pytest
import bitarray
from granite.toBig import (
main as main_toBig
)
from granite.lib.shared_functions import *
#################################################################
# Tests
#################################################################
def test_run_toBig_rdthr_2_all():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '3', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11001, 11007, 11010]
ins_expect = [11005, 11022]
del_expect = [11017, 11030]
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_2_2():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11001, 11002, 11007, 11010, 11013, 11023]
ins_expect = [11005, 11022]
del_expect = [11017, 11030]
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_17_all():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '3', 'rdthr': '17', 'ncores': None, 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11007]
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_17_2():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '2', 'rdthr': '17', 'ncores': '1', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11007, 11010]
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_abthr_15_all():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11007, 11010, 11013]
ins_expect = [11022]
del_expect = [11030]
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_abthr_25_all():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': '25',
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11010]
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_abthr_25_2():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '2', 'rdthr': None, 'ncores': '1', 'abthr': '25',
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11007, 11010]
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_2_1_single():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz'],
'outputfile': 'tests/files/main_test.out',
'fithr': '1', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11001, 11002, 11007, 11010, 11013, 11023]
ins_expect = [11005, 11022]
del_expect = [11017, 11030]
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_2_2_single():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz'],
'outputfile': 'tests/files/main_test.out',
'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = []
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
#################################################################
# Errors
#################################################################
def test_run_toBig_rdthr_2_all_miss_pos():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '4', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run and Tests
with pytest.raises(Exception) as e:
assert main_toBig(args)
assert str(e.value) == '\nERROR in file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is not consistent with other files\n'
#end def
| 1.875 | 2 |
src/melissa/__init__.py | aleksandrgordienko/melissa-quiz | 0 | 12795291 | <reponame>aleksandrgordienko/melissa-quiz<gh_stars>0
from melissa.melissa import Melissa
| 0.945313 | 1 |
Lecture_notes/数据提取与验证码的识别(下)/code/lol_test.py | littleturings/2021PythonWebCrawler | 1 | 12795292 | <reponame>littleturings/2021PythonWebCrawler
from selenium import webdriver
from time import sleep
driver = webdriver.Chrome()
driver.get("https://www.huya.com/g/lol")
while True:
names = driver.find_elements_by_class_name("nick")
counts = driver.find_elements_by_class_name("js-num")
for name, count in zip(names,counts):
print(name.text,":",count.text)
if driver.page_source.find("laypage_next") != -1:
driver.find_element_by_class_name("laypage_next").click()
sleep(3)
else:
break
driver.quit() | 3.15625 | 3 |
notas/urls.py | shiminasai/cafodca | 0 | 12795293 | from django.conf.urls import url
from django.views.generic import ListView, DetailView
from models import Notas
from .views import *
urlpatterns = [
url(r'^$', list_notas,name='notas_list'),
# url(r'^$', 'lista_notas', name="notas_list"),
url(r'^pais/(?P<id>\d+)/$', lista_notas_pais, name="notas_list_pais"),
url(r'^coparte/(?P<id>\d+)/$', lista_notas_copartes, name="notas_list_copartes"),
# url(r'^ver/(?P<id>\d+)/$', 'comentar_nota', name='comentar-nota'),
url(r'^(?P<id>\d+)/$', nota_detail, name='notas-detail'),
url(r'^crear/$', crear_nota, name="crear-nota"),
# url(r'^editar/(?P<id>\d+)/$', 'editar_nota', name='editar-nota'),
# url(r'^borrar/(?P<id>\d+)/$', 'borrar_nota', name='borrar-nota'),
url(r'^imagenes/$', ver_imagenes, name="imagenes-nota"),
url(r'^videos/$', ver_videos, name="videos-nota"),
]
| 1.992188 | 2 |
dev/tools/leveleditor/direct/directbase/DirectStart.py | CrankySupertoon01/Toontown-2 | 1 | 12795294 | <gh_stars>1-10
# This is a hack fix to get the graphics pipes to load with my
# hacked up Panda3D. If you want to load the level editor
# using DirectX 8 or DirectX 9, uncomment the import for the
# pipe you want to use. Make sure the pipe you want to load
# first is imported first.
# DirectX 9
#try:
# import libpandadx9
#except:
# pass
# DirectX 8
#try:
# import libpandadx8
#except:
# pass
# OpenGL
try:
import libpandagl
except:
pass
print 'DirectStart: Starting the game.'
from direct.showbase import ShowBase
base = ShowBase.ShowBase()
| 1.804688 | 2 |
python/terminal/question.py | VEXG/experimental | 1 | 12795295 | get_nama = input('Masukan nama : ')
try:
get_umur = int(input('Masukan umur : '))
except Exception as e:
print('Harus pakek angka ya')
else:
print(f'Halo {get_nama.capitalize()}!')
print(f'Umur kamu {str(get_umur)}')
| 3.546875 | 4 |
loopChat.py | ThePenultimatum/transformer | 0 | 12795296 | # -*- coding: utf-8 -*-
#/usr/bin/python2
'''
June 2017 by <NAME>.
<EMAIL>.
https://www.github.com/kyubyong/transformer
'''
from __future__ import print_function
import codecs
import os
import tensorflow as tf
import numpy as np
from hyperparams import Hyperparams as hp
from data_load import load_test_data, load_de_vocab, load_en_vocab
from train import Graph
#from nltk.translate.bleu_score import corpus_bleu
def eval():
# Load graph
g = Graph(is_training=False)
print("Graph loaded")
# Load data
# X, Sources, Targets = load_test_data()
"""
x_list, y_list, Sources, Targets = [], [], [], []
for source_sent, target_sent in zip(source_sents, target_sents):
x = [de2idx.get(word, 1) for word in (source_sent + u" </S>").split()] # 1: OOV, </S>: End of Text
y = [en2idx.get(word, 1) for word in (target_sent + u" </S>").split()]
if max(len(x), len(y)) <=hp.maxlen:
x_list.append(np.array(x))
y_list.append(np.array(y))
Sources.append(source_sent)
Targets.append(target_sent)
# Pad
X = np.zeros([len(x_list), hp.maxlen], np.int32)
Y = np.zeros([len(y_list), hp.maxlen], np.int32)
for i, (x, y) in enumerate(zip(x_list, y_list)):
X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0))
"""
en2idx, idx2en = load_en_vocab()
# Start session
with g.graph.as_default():
sv = tf.train.Supervisor()
with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
## Restore parameters
sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir))
while(True):
prompt = raw_input()
xlist = []
xval = [en2idx.get(word, 1) for word in (target_sent + u" </S>").split()]
if (len(xval) <= hp.maxlen):
xlist.append(np.array(xval))
X = np.zeros([len(xlist), hp.maxlen], np.int32)
for i, xi in enumerate(xlist):
X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0))
list_of_refs, hypotheses = [], []
for i in range(len(X) // hp.batch_size):
### Get mini-batches
x = X[i*hp.batch_size: (i+1)*hp.batch_size]
prompt = raw_input()
### Autoregressive inference
preds = np.zeros((hp.batch_size, hp.maxlen), np.int32)
for j in range(hp.maxlen):
#print("j: " + str(j))
_preds = sess.run(g.preds, {g.x: x, g.y: preds})
preds[:, j] = _preds[:, j]
#print(pred) # pred should be length 1 each time due to the cycling of the while loop in main
for pred in preds:
got = " ".join(idx2en[idx] for idx in pred).split("</S>")[0].strip()
#return got
print(got)
if __name__ == '__main__':
eval()
| 2.453125 | 2 |
minik/models.py | bafio/minik | 86 | 12795297 | <filename>minik/models.py
import json
from minik.status_codes import codes
class MinikRequest:
"""
Simple wrapper of the data object received from API Gateway. This object will
parse a given API gateway event and it will transform it into a more user
friendly object to operate on. The idea is that a view does not need to be
concerned with the inner representation of the APIGateway's event as long as
it has access to the underlaying data values in the event.
"""
__slots__ = ['request_type', 'path', 'resource', 'query_params', 'headers', 'uri_params',
'method', 'body', '_json_body', 'aws_context', 'aws_event']
def __init__(self, request_type, path, resource, query_params, headers, uri_params, method, body, context, event):
self.request_type = request_type
self.path = path
self.resource = resource
self.query_params = query_params
self.headers = headers
self.uri_params = uri_params
self.method = method
self.body = body
self.aws_context = context
self.aws_event = event
# The parsed JSON from the body. This value should
# only be set if the Content-Type header is application/json,
# which is the default content type.
self._json_body = None
@property
def json_body(self):
"""
Lazy loading/parsing of the json payload.
"""
if self.headers.get('content-type', '').startswith('application/json'):
if self._json_body is None:
self._json_body = json.loads(self.body)
return self._json_body
class Response:
__slots__ = ['body', 'headers', 'status_code']
def __init__(self, body='', headers=None, status_code=codes.ok):
self.body = body
self.headers = headers or {}
self.status_code = status_code
@property
def content_type(self):
return {
key.lower(): value
for key, value in self.headers.items()
}.get('content-type')
def to_dict(self, binary_types=None):
return {
'headers': self.headers,
'statusCode': self.status_code,
'body': self.body
}
| 2.5 | 2 |
manage.py | DzoanaZ/Aplikacje-internetowe2 | 0 | 12795298 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "forecastsite.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Nie można zaimportować Django"
)
raise
execute_from_command_line(sys.argv)
| 1.359375 | 1 |
No_0171_Excel Sheet Column Number/excel_sheet_column_number_by_recursion.py | coderMaruf/leetcode-1 | 32 | 12795299 | '''
Description:
Given a column title as appear in an Excel sheet, return its corresponding column number.
For example:
A -> 1
B -> 2
C -> 3
...
Z -> 26
AA -> 27
AB -> 28
...
Example 1:
Input: "A"
Output: 1
Example 2:
Input: "AB"
Output: 28
Example 3:
Input: "ZY"
Output: 701
'''
class Solution:
def titleToNumber(self, s: str) -> int:
if len(s) == 1:
# base case
return ord(s)-64
else:
# general case
return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1] )
# n : the length of input string s
## Time Complexity: O( n )
#
# The major overhead in time is the call depth of recursion, which is of O( n ).
## Space Complexity: O( n )
#
# The major overhead in space is to maintain call stack for recursion, which is of O( n ).
def test_bench():
test_data = ['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA']
for s in test_data:
n = Solution().titleToNumber(s)
print(n)
return
if __name__ == '__main__':
test_bench() | 3.765625 | 4 |
utils/criterion.py | zjunlp/SemEval2021Task4 | 8 | 12795300 | import torch.nn as nn
import torch
class LabelSmoothing(nn.Module):
def __init__(self, size, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(size_average=False)
#self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
def forward(self, x, target):
"""
x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P
target表示label(M,)
"""
assert x.size(1) == self.size
x = x.log()
true_dist = x.data.clone()#先深复制过来
#print true_dist
true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式
#print true_dist
#变成one-hot编码,1表示按列填充,
#target.data.unsqueeze(1)表示索引,confidence表示填充的数字
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
self.true_dist = true_dist
# print(x.shape,true_dist.shape)
return self.criterion(x, true_dist)
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
| 2.90625 | 3 |
studzie/keras_gym/mountain_car_v0.py | amozie/amozie | 0 | 12795301 | <filename>studzie/keras_gym/mountain_car_v0.py
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
import copy
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge
from keras.optimizers import Adam, RMSprop
from keras.callbacks import History
from keras import backend as K
import tensorflow as tf
from gym import Env, Space, spaces
from gym.utils import seeding
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.agents.cem import CEMAgent
from rl.agents import SARSAAgent
from rl.callbacks import TrainEpisodeLogger, CallbackList
class MountainCarEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self) -> None:
self.env = gym.make('MountainCar-v0')
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def _step(self, action):
step = self.env.step(action)
step = list(step)
step[1] = np.abs(step[0][1]) - 0.05
return tuple(step)
def _reset(self):
return self.env.reset()
def _seed(self, seed=None):
return self.env.seed(seed)
def _render(self, mode='human', close=False):
return self.env.render(mode, close)
def _close(self):
return self.env.close()
env = MountainCarEnv()
env.seed()
nb_actions = env.action_space.n
x = Input((1,) + env.observation_space.shape)
y = Flatten()(x)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(nb_actions)(y)
y = Activation('linear')(y)
model = Model(x, y)
memory = SequentialMemory(limit=10000, window_length=1)
# policy = BoltzmannQPolicy()
policy = EpsGreedyQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32,
enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy)
dqn.compile(Adam(), metrics=['mae'])
hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None)
state = env.reset()
action = env.action_space.sample()
print(action)
state_list= []
for i in range(500):
action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0])
state, reward, done, _ = env.step(2)
state_list.append(reward)
env.render()
env.render(close=True)
dqn.test(env, nb_episodes=5, visualize=True)
env.render(close=True) | 2.46875 | 2 |
PGMF/script/focaljunction.py | haiwangyang/PGMF | 0 | 12795302 | <reponame>haiwangyang/PGMF<gh_stars>0
#!/usr/bin/env python
"""
Purpose:
Handling normalized read count of genes
"""
import focalannotation
import focalgene
import sharedinfo
import pandas as pd
from sharedinfo import exist_file, get_lines
def get_junction_of_species_by_partiallocation(species, partiallocation):
"""
species (e.g., dyak)
location is partial location (e.g., 3L_17475)
output is all possible junctions with coverage
"""
geneid = sharedinfo.tra_species2geneid[species]
gen = focalgene.FocalGene(species, geneid, "M")
for sex in sharedinfo.ordered_sex:
for tissue in sharedinfo.ordered_tissue7:
jun = FocalJunction(species, sex, tissue)
for range in jun.juncinfo.keys():
if range.startswith(partiallocation):
print(species + "_" + sex + "_" + tissue, partiallocation, range, jun.juncinfo[range])
def get_junction_of_species_by_location(species, location):
"""
species (e.g., dyak)
location is whole range (e.g., 3L_17475357_17475426_+)
output is a dict of junctions with coverage
"""
geneid = sharedinfo.tra_species2geneid[species]
gen = focalgene.FocalGene(species, geneid, "M")
dct = dict()
for sex in sharedinfo.ordered_sex:
for tissue in sharedinfo.ordered_tissue7:
dct[species + "_" + sex + "_" + tissue] = dict()
jun = FocalJunction(species, sex, tissue)
if location in jun.juncinfo:
dct[species + "_" + sex + "_" + tissue][location] = jun.juncinfo[location]
else:
dct[species + "_" + sex + "_" + tissue][location] = 0
return dct
def merge_dcts(dcts):
mdct = dict()
for d in dcts:
for sst in d.keys(): # species_sex_tissue
if not sst in mdct.keys():
mdct[sst] = dict()
for range in d[sst].keys():
mdct[sst][range] = d[sst][range]
return mdct
class FocalJunction:
"""FocalJunction object"""
def __init__(self, species, sex, tissue):
self.species = species
self.sex = sex
self.tissue = tissue
# self.filename = self.species + "_" + sex + "_" + tissue + ".merged.juncs" # spanki juncs
self.filename = self.species + "_" + sex + "_" + tissue + ".sorted.junc.bed"
self.lines = get_lines("../data/junction", self.filename)
self.juncinfo = self.get_juncinfo()
def get_juncinfo(self):
""" get junction info """
dct = dict()
for line in self.lines:
# (juncid, dinucleotide, intron_size, annostatus, gmcode, regcode, geneassign, cov, lirt, rirt, irt, dncov, ancov, numsamps) = line.rstrip().split("\t") # spanki junc
(chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split("\t")
chromStart = int(chromStart)
chromEnd = int(chromEnd)
bs1, bs2 = blockSizes.split(",")
bs1 = int(bs1)
bs2 = int(bs2)
juncstart = chromStart + bs1 + 1
juncend = chromEnd - bs2
dct[chrom + ":" + str(juncstart) + "-" + str(juncend) + "_" + strand] = score
return dct
if __name__ == '__main__':
# consensus splicing junction of tra in dyak
dct_cs = get_junction_of_species_by_location("dyak", "3L:17475357-17475426_+")
# alternative splicing junction (short) of tra in dyak
dct_as1 = get_junction_of_species_by_location("dyak", "3L:17474772-17474844_+")
# alternative splicing junction (long) of tra in dyak
dct_as2 = get_junction_of_species_by_location("dyak", "3L:17474772-17475015_+")
mdct = merge_dcts([dct_cs, dct_as1, dct_as2])
mpd = pd.DataFrame.from_dict(mdct)
mpd.to_csv("../data/output/dyak.tra.junc.summary.txt", sep="\t")
| 2.375 | 2 |
comb_spec_searcher/tree_searcher.py | odinn13/comb_spec_searcher-1 | 0 | 12795303 | <reponame>odinn13/comb_spec_searcher-1
"""
Finds and returns a combinatorial specification, that we call a proof tree.
"""
import time
from collections import defaultdict, deque
from copy import deepcopy
from itertools import chain, product
from random import choice, shuffle
from typing import Dict, FrozenSet, Iterator, List, Optional, Sequence, Set, Tuple
__all__ = ("prune", "proof_tree_generator_dfs", "proof_tree_generator_bfs")
RulesDict = Dict[int, Set[Tuple[int, ...]]]
class Node:
"""A node for a proof tree."""
def __init__(self, n: int, children: Optional[List["Node"]] = None):
if children is None:
children = []
self.label = n
self.children = children
def labels(self) -> Set[int]:
"""Return the set of all labels in the proof tree."""
res = set([self.label])
res.update(chain.from_iterable(node.labels() for node in self.children))
return res
def nodes(self) -> Iterator["Node"]:
"""Yield all nodes in the proof tree."""
yield self
for node in self.children:
yield from node.nodes()
def __str__(self) -> str:
return "".join(["(", str(self.label), *map(str, self.children), ")"])
def __len__(self) -> int:
"""Return the number nodes in the proof tree."""
return 1 + sum(len(c) for c in self.children)
def prune(rdict: RulesDict) -> None:
"""
Prune all nodes not in a combinatorial specification. This changes rdict
in place.
"""
changed = True
while changed:
changed = False
for k, rule_set in list(rdict.items()):
for rule in list(rule_set):
if any(x not in rdict for x in rule):
rule_set.remove(rule)
changed = True
if not rule_set:
del rdict[k]
def iterative_prune(rules_dict: RulesDict, root: Optional[int] = None) -> RulesDict:
"""Prune all nodes not iteratively verifiable."""
verified_labels: Set[int] = set()
if root is not None:
verified_labels.add(root)
rdict = deepcopy(rules_dict)
new_rules_dict: RulesDict = defaultdict(set)
while True:
changed = False
for k, rule_set in list(rdict.items()):
for rule in list(rule_set):
if all(x in verified_labels for x in rule):
changed = True
verified_labels.add(k)
new_rules_dict[k].add(rule)
rdict[k].remove(rule)
if not rule_set:
del rdict[k]
if not changed:
break
return new_rules_dict
def proof_tree_dfs(rules_dict: RulesDict, root: int, seen: Optional[Set[int]] = None):
"""Return random proof tree found by depth first search."""
if seen is None:
seen = set()
seen = seen.copy()
if root in rules_dict:
rule_set = rules_dict[root]
root_node = Node(root)
if root in seen or () in rule_set:
seen.add(root)
return seen, root_node
seen.add(root)
rule = choice(list(rule_set))
visited, trees = all_proof_trees_dfs(rules_dict, rule, seen)
root_node.children = trees
return visited, root_node
def all_proof_trees_dfs(
rules_dict: RulesDict, roots: Sequence[int], seen: Optional[Set[int]] = None
) -> Tuple[Set[int], List[Node]]:
"""Return all labels which have been seen, together with all of the trees
using the given roots.."""
if seen is None:
seen = set()
if not roots:
return seen, []
root, roots = roots[0], roots[1:]
seen1, tree = proof_tree_dfs(rules_dict, root, seen)
seen2, trees = all_proof_trees_dfs(rules_dict, roots, seen1)
return seen1.union(seen2), [tree] + trees
def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node:
"""Takes in a iterative pruned rules_dict and returns iterative proof
tree."""
root_node = Node(root)
queue = deque([root_node])
while queue:
v = queue.popleft()
rule = sorted(rules_dict[v.label])[0]
if not rule == ():
children = [Node(i) for i in rule]
queue.extend([child for child in children if not child.label == root])
v.children = children
return root_node
def random_proof_tree(rules_dict: RulesDict, root: int) -> Node:
"""Return random tree found by breadth first search."""
seen: Set[int] = set()
root_node = Node(root)
queue = deque([root_node])
while queue:
v = queue.popleft()
rule = choice(list(rules_dict[v.label]))
if not (v.label in seen or rule == ()):
children = [Node(i) for i in rule]
shuffle(children)
queue.extend(children)
v.children = children
seen.add(v.label)
return root_node
def smallish_random_proof_tree(
rules_dict: RulesDict, root: int, minimization_time_limit: float
) -> Node:
"""
Searches a rule_dict known to contain at least one specification for a
small specification. Spends minimization_time_limit seconds searching.
"""
start_time = time.time()
smallest_so_far = random_proof_tree(rules_dict, root=root)
smallest_size = len(smallest_so_far)
while time.time() - start_time < minimization_time_limit:
next_tree = random_proof_tree(rules_dict, root=root)
next_tree_size = len(next_tree)
if next_tree_size < smallest_size:
smallest_so_far = next_tree
smallest_size = next_tree_size
return smallest_so_far
def proof_tree_generator_bfs(rules_dict: RulesDict, root: int) -> Iterator[Node]:
"""A generator for all proof trees using breadth first search.
N.B. The rules_dict is assumed to be pruned.
"""
def _bfs_helper(root_label: int, seen: FrozenSet[int]):
if root_label in seen:
yield Node(root_label)
return
next_seen = seen.union((root_label,))
for rule in rules_dict[root_label]:
for children in product(
*[_bfs_helper(child_label, next_seen) for child_label in rule]
):
root_node = Node(root_label)
root_node.children = children
yield root_node
sorted_rules_dict = {
start: tuple(sorted(ends)) for start, ends in rules_dict.items()
}
if root in sorted_rules_dict:
yield from _bfs_helper(root, frozenset())
def proof_tree_generator_dfs(
rules_dict: RulesDict, root: int, maximum: Optional[int] = None
) -> Iterator[Node]:
"""A generator for all proof trees using depth first search.
N.B. The rules_dict is assumed to be pruned.
"""
def _dfs_tree(
root_label: int, seen: FrozenSet[int], maximum: int = None
) -> Iterator[Tuple[FrozenSet[int], Node]]:
if maximum is not None and maximum <= 0:
return
if root_label in seen:
yield seen, Node(root_label)
return
seen = seen.union((root_label,))
for rule in sorted_rules_dict[root_label]:
if rule == ():
yield seen, Node(root_label)
else:
for new_seen, children in _dfs_forest(rule, seen, maximum):
root_node = Node(root_label)
root_node.children = children
yield new_seen, root_node
def _dfs_forest(
root_labels: Sequence[int], seen: FrozenSet[int], maximum: Optional[int] = None
) -> Iterator[Tuple[FrozenSet[int], List[Node]]]:
if maximum is not None and maximum <= 0:
return
if not root_labels:
yield seen, []
else:
root, roots = root_labels[0], root_labels[1:]
new_max = maximum - len(root_labels) + 1 if maximum is not None else None
for seen1, tree in _dfs_tree(root, seen, new_max):
length = len(tree)
new_maximum = maximum - length if maximum is not None else None
for seen2, trees in _dfs_forest(roots, seen1, new_maximum):
actual_length = length + sum(len(t) for t in trees)
if maximum is not None and actual_length < maximum:
yield seen1.union(seen2), [tree] + trees
sorted_rules_dict = {
start: tuple(sorted(ends)) for start, ends in rules_dict.items()
}
if root in sorted_rules_dict:
for _, tree in _dfs_tree(root, frozenset(), maximum):
yield tree
def iterative_proof_tree_finder(rules_dict: RulesDict, root: int) -> Node:
"""Finds an iterative proof tree for root, if one exists.
"""
trees: Dict[int, Node] = {}
def get_tree(start):
if start == root:
return Node(start)
if start in trees:
return trees[start]
raise KeyError("{} is not in trees".format(start))
def create_tree(start, end):
if start in trees:
return
root = Node(start)
children = [get_tree(i) for i in end]
root.children = children
trees[start] = root
verified_labels = set()
if root is not None:
verified_labels.add(root)
rdict = deepcopy(rules_dict)
new_rules_dict: RulesDict = defaultdict(set)
while True:
changed = False
for k, rule_set in list(rdict.items()):
for rule in list(rule_set):
if all(x in verified_labels for x in rule):
changed = True
verified_labels.add(k)
new_rules_dict[k].add(rule)
create_tree(k, rule)
rdict[k].remove(rule)
if not rule_set:
del rdict[k]
if not changed:
break
if root in trees:
return trees[root]
raise ValueError("{} has no tree in rules_dict".format(root))
| 2.71875 | 3 |
openbabel-2.4.1/scripts/python/setup.py | sxhexe/reaction-route-search | 1 | 12795304 | <gh_stars>1-10
#!/usr/bin/env python
import os
import subprocess
import sys
from distutils.command.build import build
from distutils.command.sdist import sdist
from distutils.errors import DistutilsExecError
from distutils.version import StrictVersion
from setuptools.command.build_ext import build_ext
from setuptools.command.install import install
from setuptools import setup, Extension
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '2.4.0'
__license__ = 'GPL'
if os.path.exists('README.rst'):
long_description = open('README.rst').read()
else:
long_description = '''
The Open Babel package provides a Python wrapper to the Open Babel C++
chemistry library. Open Babel is a chemical toolbox designed to speak
the many languages of chemical data. It's an open, collaborative
project allowing anyone to search, convert, analyze, or store data from
molecular modeling, chemistry, solid-state materials, biochemistry, or
related areas. It provides a broad base of chemical functionality for
custom development.
'''
class PkgConfigError(Exception):
pass
def pkgconfig(package, option):
"""Wrapper around pkg-config command line tool."""
try:
p = subprocess.Popen(['pkg-config', option, package],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = p.communicate()
if stderr:
raise PkgConfigError('package %s could not be found by pkg-config' % package)
return stdout.strip()
except OSError:
raise PkgConfigError('pkg-config could not be found')
def locate_ob():
"""Try use pkgconfig to locate Open Babel, otherwise guess default location."""
try:
version = pkgconfig('openbabel-2.0', '--modversion')
if not StrictVersion(version) >= StrictVersion('2.3.0'):
print('Warning: Open Babel 2.3.0 or later is required. Your version (%s) may not be compatible.' % version)
include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir')
library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir')
print('Open Babel location automatically determined by pkg-config:')
except PkgConfigError as e:
print('Warning: %s.\nGuessing Open Babel location:' % e)
include_dirs = '/usr/local/include/openbabel-2.0'
library_dirs = '/usr/local/lib'
return include_dirs, library_dirs
class CustomBuild(build):
"""Ensure build_ext runs first in build command."""
def run(self):
self.run_command('build_ext')
build.run(self)
class CustomInstall(install):
"""Ensure build_ext runs first in install command."""
def run(self):
self.run_command('build_ext')
install.run(self)
class CustomSdist(sdist):
"""Add swig interface files into distribution from parent directory."""
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
link = 'hard' if hasattr(os, 'link') else None
self.copy_file('../stereo.i', base_dir, link=link)
self.copy_file('../openbabel-python.i', base_dir, link=link)
class CustomBuildExt(build_ext):
"""Custom build_ext to set SWIG options and print a better error message."""
def finalize_options(self):
# Setting include_dirs, library_dirs, swig_opts here instead of in Extension constructor allows them to be
# overridden using -I and -L command line options to python setup.py build_ext.
build_ext.finalize_options(self)
include_dirs, library_dirs = locate_ob()
self.include_dirs.append(include_dirs)
self.library_dirs.append(library_dirs)
self.swig_opts = ['-c++', '-small', '-O', '-templatereduce', '-naturalvar']
self.swig_opts += ['-I%s' % i for i in self.include_dirs]
print('- include_dirs: %s\n- library_dirs: %s' % (self.include_dirs, self.library_dirs))
def swig_sources(self, sources, extension):
try:
return build_ext.swig_sources(self, sources, extension)
except DistutilsExecError:
print('\nError: SWIG failed. Is Open Babel installed?\n'
'You may need to manually specify the location of Open Babel include and library directories. '
'For example:\n'
' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\n'
' python setup.py install')
sys.exit(1)
obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel'])
setup(name='openbabel',
version=__version__,
author=__author__,
author_email=__email__,
license=__license__,
url='http://openbabel.org/',
description='Python interface to the Open Babel chemistry library',
long_description=long_description,
zip_safe=False,
cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist},
py_modules=['openbabel', 'pybel'],
ext_modules=[obextension],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Other Environment',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: C++',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries'
]
)
| 1.835938 | 2 |
tests/scheme/test_stochastic.py | olivierverdier/odelab | 15 | 12795305 | # -*- coding: utf-8 -*-
from __future__ import division
import unittest
import odelab
from odelab.scheme.stochastic import *
from odelab.system import *
from odelab.solver import *
import numpy as np
class Test_OU(unittest.TestCase):
def test_run(self):
sys = OrnsteinUhlenbeck()
scheme = EulerMaruyama()
scheme.h = .01
self.s = SingleStepSolver(scheme, sys)
self.s.initialize(u0=np.array([1.]))
self.s.run(time=1.)
class Test_Differentiator(unittest.TestCase):
t0 = 5e-9
V0 = .01
def test_run(self):
sys = Differentiator(LinBumpSignal(self.V0,self.t0))
## sys.kT = 0. # no noise
scheme = EulerMaruyama()
## scheme.h = 2.5e-11
scheme.h = self.t0
self.s = SingleStepSolver(scheme, sys)
self.s.initialize(u0 = np.array([0,0,0,0,0.]))
self.s.run(time=5*self.t0)
| 2.625 | 3 |
src/polybius.py | nicholasz2510/Polybius | 1 | 12795306 | <reponame>nicholasz2510/Polybius<filename>src/polybius.py
import asyncio
import discord
from discord.ext import commands
import json
import math
import datetime
import os
import random
trivia_answers = [":regional_indicator_a:", ":regional_indicator_b:", ":regional_indicator_c:", ":regional_indicator_d:"]
unicode_max_answers = ['🇦', '🇧', '🇨', '🇩']
bot = commands.Bot(command_prefix='$')
with open("../data.json") as data_file:
data = json.load(data_file)
@bot.event
async def on_ready():
print('Logged on as ' + str(bot.user))
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send("That wasn't a valid command!\nType `$help` to see a list of commands.")
else:
raise error
@bot.command()
@commands.cooldown(1, 60 * 60 * 24, commands.BucketType.user)
async def daily(ctx):
discord_id = str(ctx.message.author.id)
if discord_id not in data:
await ctx.send("You need to register first! Do `$register`")
else:
data[discord_id]["points"] += 1
await ctx.send("You got 1 :candy:")
_save()
@daily.error
async def daily_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send('You already claimed your daily reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after))))
else:
raise error
@bot.command()
@commands.cooldown(1, 60 * 60 * 24 * 30, commands.BucketType.user)
async def monthly(ctx):
discord_id = str(ctx.message.author.id)
if discord_id not in data:
await ctx.send("You need to register first! Do `$register`")
else:
data[discord_id]["points"] += 10
await ctx.send("You got 1 :chocolate_bar: (equivalent to 10 :candy:)")
_save()
@monthly.error
async def monthly_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send('You already claimed your monthly reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after))))
else:
raise error
@bot.command()
async def pot(ctx, recipient: discord.Member):
if str(ctx.message.author.id) in data:
if data[str(ctx.message.author.id)]["honey_potter"]:
if not str(recipient.id) in data:
await ctx.send("The person you're giving a :honey_pot: to hasn't registered yet! Tell them to do `$register`")
return
if ctx.message.author.id == recipient.id:
await ctx.send("You can't pot yourself... :unamused:")
return
data[str(recipient.id)]["points"] += 100
_save()
await ctx.send("Wow! <@" + str(ctx.message.author.id) + "> just gave <@" + str(recipient.id) + "> a :honey_pot:! (worth 100 :candy:)")
else:
await ctx.send("Hey, you don't have permission to do that!")
else:
await ctx.send("You need to register first! Do `$register`")
@pot.error
async def pot_error(ctx, error):
await ctx.send('Make sure you have the recipient in the command: `$pot <recipient>`')
@bot.command()
async def trivia(ctx):
discord_id = str(ctx.message.author.id)
if discord_id not in data:
await ctx.send("You need to register first! Do `$register`")
return
with open("../trivia/" + random.choice(os.listdir("../trivia")), encoding="utf-8") as trivia_file:
trivia_questions = json.load(trivia_file)
question = trivia_questions[random.randint(0, len(trivia_questions) - 1)]
message = "Question for <@" + str(ctx.message.author.id) + ">:\n" + question["question"] + "\n\n"
unicode_answers = unicode_max_answers[:len(question["choices"])]
answer = question["choices"].index(question["answer"])
for i in range(len(question["choices"])):
message += trivia_answers[i] + " " + question["choices"][i] + "\n"
sent = await ctx.send(message)
for c in unicode_answers:
await sent.add_reaction(c)
print(trivia_answers[answer] + " " + question["answer"])
def check(reaction_arg, user_arg):
return user_arg == ctx.message.author
try:
reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check)
except asyncio.TimeoutError:
await ctx.send("Trivia question for <@" + str(ctx.message.author.id) + "> timed out.")
return
if unicode_answers.index(str(reaction)) == answer:
data[discord_id]["points"] += 5
await ctx.send("<@" + str(ctx.message.author.id) + "> " + trivia_answers[answer] + " " + question["answer"] + " was correct! You received 1 :lollipop: (equivalent to 5 :candy:)")
_save()
else:
data[discord_id]["points"] -= 1
await ctx.send("<@" + str(ctx.message.author.id) + "> " + str(reaction) + " " + question["choices"][unicode_answers.index(str(reaction))] + " was incorrect... The correct answer was " + trivia_answers[answer] + " " + question["answer"] + ". You lost 1 :candy:")
_save()
@bot.command()
async def bal(ctx):
discord_id = str(ctx.message.author.id)
if discord_id in data:
await ctx.send("You have " + str(data[discord_id]["points"]) + " :candy:")
else:
await ctx.send("You need to register first! Do `$register`")
@bot.command()
async def register(ctx):
discord_id = str(ctx.message.author.id)
if discord_id not in data:
data[discord_id] = {}
data[discord_id]["points"] = 0
data[discord_id]["honey_potter"] = False
await ctx.send("You've been registered!")
_save()
daily.reset_cooldown(ctx)
monthly.reset_cooldown(ctx)
else:
await ctx.send("You are already registered!")
def _save():
with open('../data.json', 'w+') as file_save:
json.dump(data, file_save)
with open('../secret.txt', 'r') as f:
bot.run(f.readline())
| 2.6875 | 3 |
pnno/engine/processor.py | zjykzj/pnno | 3 | 12795307 | # -*- coding: utf-8 -*-
"""
@date: 2020/7/14 下午8:34
@file: processor.py
@author: zj
@description:
"""
from ..anno import build_anno
from ..util.logger import setup_logger
class Processor(object):
"""
The labeled data is processed to create training data with specified format
"""
def __init__(self, cfg):
self.parser = build_anno(cfg.ANNO.PARSER, cfg)
self.creator = build_anno(cfg.ANNO.CREATOR, cfg)
self.logger = setup_logger(__name__)
self.verbose = cfg.ANNO.VERBOSE
def process(self):
verbose = self.verbose
logger = self.logger
if verbose:
logger.info('Processing original data')
output_data = self.parser.process()
if verbose:
logger.info('Save data in specified format')
self.creator.save(output_data)
if verbose:
logger.info('Finish!!!')
| 2.515625 | 3 |
800/11_05_2021/236A.py | hieuptch2012001/Python_codeforces | 0 | 12795308 | def main():
a = input()
set_a = set(a)
if len(set_a) % 2 == 0:
print('CHAT WITH HER!')
else:
print('IGNORE HIM!')
if __name__ == "__main__":
main()
| 3.515625 | 4 |
plugins/supervisor/__init__.py | ajenti/ajen | 3,777 | 12795309 | <gh_stars>1000+
# pyflakes: disable-all
from .api import *
from .aug import *
from .main import *
| 1.078125 | 1 |
src/apps/core/admin_actions.py | crivet/HydroLearn | 0 | 12795310 |
# potentially not, may be better to use 'ModelAdmin' methods
# 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques'
#
# def delete_with_placeholders(modeladmin, request, queryset):
# for obj in queryset:
# obj.delete()
#
#
# delete_with_placeholders.short_description = "Delete Selected Items" | 1.90625 | 2 |
tests/seahub/utils/test_get_conf_test_ext.py | jjzhang166/seahub | 0 | 12795311 | <gh_stars>0
from constance import config
from django.conf import settings
from seahub.utils import get_conf_text_ext
from seahub.test_utils import BaseTestCase
class GetConfTextExtTest(BaseTestCase):
def setUp(self):
self.clear_cache()
def tearDown(self):
self.clear_cache()
def test_get(self):
assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT
orig_preview_ext = settings.TEXT_PREVIEW_EXT
config.TEXT_PREVIEW_EXT = orig_preview_ext + ',az'
assert 'az' in get_conf_text_ext()
| 1.820313 | 2 |
ch5/op_test.py | oysstu/pyopencl-in-action | 21 | 12795312 | '''
Listing 5.1: Operator usage (and vector usage)
'''
import pyopencl as cl
import pyopencl.array
import utility
kernel_src = '''
__kernel void op_test(__global int4 *output) {
int4 vec = (int4)(1, 2, 3, 4);
/* Adds 4 to every element of vec */
vec += 4;
/* Sets the third element to 0
Doesn't change the other elements
(-1 in hexadecimal = 0xFFFFFFFF */
if(vec.s2 == 7){
vec &= (int4)(-1, -1, 0, -1);
}
/* Sets the first element to -1, the second to 0 */
vec.s01 = vec.s23 < 7;
/* Divides the last element by 2 until it is less than or equal to 7 */
while(vec.s3 > 7 && (vec.s0 < 16 || vec.s1 < 16)){
vec.s3 >>= 1;
}
*output = vec;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev])
queue = cl.CommandQueue(context, dev)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev])
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Create output buffer
out = cl.array.vec.zeros_int4()
buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize)
# Enqueue kernel (with argument specified directly)
n_globals = (1,)
n_locals = None
prog.op_test(queue, n_globals, n_locals, buffer_out)
# Enqueue command to copy from buffer_out to host memory
cl.enqueue_copy(queue, dest=out, src=buffer_out, is_blocking=True)
print('Output: ' + str(out))
| 2.765625 | 3 |
regulator.py | Jerry-Terrasse/LlfSystem | 6 | 12795313 | import time
from surgeon import *
ori_time=int()
cur_time=int()
pre_time=int()
waves=int()
double_water=False
AUTO=False
def fight_start():
global ori_time,started,double_water,pre_time
ori_time=time.time()
pre_time=0
started=True
double_water=False
def fight_end():
global started
print("Fight Finished.")
if AUTO:
next_fight()
fight_start()
else:
exit()
def time_past():
global cur_time
cur_time=time.time()
return int(cur_time-ori_time)
def wait():
global double_water,cur_time
if double_water:
time.sleep(3)
else:
cur_time=time.time()
if cur_time-ori_time>=120:
double_water=True
time.sleep(3)
else:
time.sleep(5)
def Fight():
global waves,cur_time,pre_time
cur_time=time.time()
if cur_time-pre_time>=30:
waves+=1
# pre_time=cur_time
return True
else:
return False
def set_pre_time():
global pre_time
pre_time=time.time()
if __name__=='__main__':
print("Regulator Here") | 2.90625 | 3 |
src/day17.py | nlasheras/aoc-2021 | 0 | 12795314 | """ https://adventofcode.com/2021/day/17 """
import re
import math
from typing import Tuple
class Rect:
"""A 2D rectangle defined by top-left and bottom-right positions"""
def __init__(self, left, right, bottom, top):
self.left = left
self.right = right
self.bottom = bottom
self.top = top
def inside(self, x, y):
"""Checks if a given x, y point is inside the rect"""
return (self.left <= x <= self.right) and (self.bottom <= y <= self.top)
@staticmethod
def from_input(string):
match = re.search(r"target area: x=(-?\d*)..(-?\d*), y=(-?\d*)..(-?\d*)", string)
if match:
left = int(match.group(1))
right = int(match.group(2))
bottom = int(match.group(3))
top = int(match.group(4))
return Rect(left, right, bottom, top)
assert False # Shouldn't reach
return None
def sign(_n):
if _n > 0:
return 1
if _n < 0:
return -1
return 0
def hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]:
"""Simulate the probe shooting and check if the probe reaches the target area.
Returns wether probe reaches the target area in a discrete t and, in that case,
the maximum height it reaches on the trajectory."""
velocity_x = vx0
velocity_y = vy0
probe_x = 0
probe_y = 0
_t = 0
max_height = 0
while probe_x < target.right and probe_y > target.bottom:
probe_x += velocity_x
probe_y += velocity_y
max_height = max(max_height, probe_y)
velocity_x -= sign(velocity_x)
velocity_y -= 1
_t += 1
if target.inside(probe_x, probe_y):
return True, max_height
return False, 0
puzzle = Rect.from_input("target area: x=209..238, y=-86..-59")
example = Rect.from_input("target area: x=20..30, y=-10..-5")
def both_parts_bruteforce(target):
global_maxima = 0
hit_count = 0
# do a smart brute-force over sensible ranges
min_vx = 0
max_vx = target.right # max speed is hitting the right of the area in t=1
min_vy = min(target.bottom, target.top) # use the same reasoning as for maxvy
max_vy = -min_vy # not much thinkin here (explore the same range in positive than in negative)
for velocity_x in range(min_vx, max_vx+1):
min_vx = math.floor((1 + math.sqrt(1 + target.left * 8)) / 2)
max_vx = target.right
for velocity_y in range(min_vy, max_vy+1):
hit, maxy = hit_target(velocity_x, velocity_y, target)
if hit:
global_maxima = max(global_maxima, maxy)
hit_count += 1
print(f"What is the highest y position it reaches on this trajectory? {global_maxima}")
print(f"How many distinct initial velocity values cause the probe to be within the target area after any step?: {hit_count}")
both_parts_bruteforce(example)
both_parts_bruteforce(puzzle)
| 4.25 | 4 |
ehelp/application/urls.py | Taimur-DevOps/ehelp_fyp | 7 | 12795315 | <filename>ehelp/application/urls.py
from django.urls import path, include
from django.conf.urls import url
from .views import (
view_dashboard,
view_home,
view_queue,
view_privacy,
view_requests,
view_responses,
view_login,
view_logout,
view_signup,
view_activate,
view_profile,
view_request_description,
view_add_update_request,
view_add_to_queue_required,
view_delete_from_queue_required,
view_delete_request_required,
view_delete_response_required,
view_add_response_required,
)
from django.contrib.auth import views as auth_views
app_name = 'application'
urlpatterns = [
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[<KEY>})/$',
view_activate, name='activate'),
path('', view_home, name='home'),
path('dashboard/', view_dashboard, name='dashboard'),
path('profile/', view_profile, name='profile'),
path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'),
path('queue/', view_queue, name='queue'),
path('privacy/', view_privacy, name='privacy'),
path('requests/', view_requests, name='requests'),
path('add/request/', view_add_update_request, name='add-request'),
path('update/request/<int:pk>/', view_add_update_request, name='update-request'),
path('request/description/<int:pk>/', view_request_description, name='request-description'),
path('responses/', view_responses, name='responses'),
path('response/', view_responses, name='response'),
path('accounts/login/', view_login, name='login'),
path('accounts/logout/', view_logout, name='logout'),
path('signup/', view_signup, name='signup'),
path('update/request/', view_responses, name='update-request'),
path('settings/', view_privacy, name='settings'),
path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'),
path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'),
path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'),
path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'),
path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'),
path(
'change-password/', auth_views.PasswordChangeView.as_view(
template_name='application/password_change.html',
success_url='/'
), name='change_password'
),
]
| 1.96875 | 2 |
awx/main/migrations/_rbac.py | Avinesh/awx | 1 | 12795316 | <reponame>Avinesh/awx
import logging
from time import time
from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding
logger = logging.getLogger('rbac_migrations')
def create_roles(apps, schema_editor):
'''
Implicit role creation happens in our post_save hook for all of our
resources. Here we iterate through all of our resource types and call
.save() to ensure all that happens for every object in the system before we
get busy with the actual migration work.
This gets run after migrate_users, which does role creation for users a
little differently.
'''
models = [
apps.get_model('main', m) for m in [
'Organization',
'Team',
'Inventory',
'Project',
'Credential',
'CustomInventoryScript',
'JobTemplate',
]
]
with batch_role_ancestor_rebuilding():
for model in models:
for obj in model.objects.iterator():
obj.save()
def rebuild_role_hierarchy(apps, schema_editor):
logger.info('Computing role roots..')
start = time()
roots = Role.objects \
.all() \
.values_list('id', flat=True)
stop = time()
logger.info('Found %d roots in %f seconds, rebuilding ancestry map' % (len(roots), stop - start))
start = time()
Role.rebuild_role_ancestor_list(roots, [])
stop = time()
logger.info('Rebuild completed in %f seconds' % (stop - start))
logger.info('Done.')
def delete_all_user_roles(apps, schema_editor):
ContentType = apps.get_model('contenttypes', "ContentType")
Role = apps.get_model('main', "Role")
User = apps.get_model('auth', "User")
user_content_type = ContentType.objects.get_for_model(User)
for role in Role.objects.filter(content_type=user_content_type).iterator():
role.delete()
| 2.21875 | 2 |
tests/test_contrast.py | bunkahle/PILasOPENCV | 19 | 12795317 | <filename>tests/test_contrast.py<gh_stars>10-100
# from PIL import Image, ImageEnhance
import PILasOPENCV as Image
import PILasOPENCV as ImageEnhance
img = Image.open('lena.jpg')
#
enhancer = ImageEnhance.Contrast(img)
enhancer.enhance(0.0).save(
"ImageEnhance_Contrast_000.jpg")
enhancer.enhance(0.25).save(
"ImageEnhance_Contrast_025.jpg")
enhancer.enhance(0.5).save(
"ImageEnhance_Contrast_050.jpg")
enhancer.enhance(0.75).save(
"ImageEnhance_Contrast_075.jpg")
enhancer.enhance(1.0).save(
"ImageEnhance_Contrast_100.jpg") | 2.671875 | 3 |
TEP-IFPI-2017_8-master/django-api-atv1-master/app/settings.py | danieldsf/ads-activities | 1 | 12795318 | <reponame>danieldsf/ads-activities
import os, raven, logging
from unipath import Path
from decouple import config
BASE_DIR = Path(__file__).parent
PROJECT_DIR = BASE_DIR.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str)
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ('*',)
INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost')
# Email settings
EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str)
EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int)
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str)
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str)
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool)
DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development only
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR.parent.child('static')
# Application definition
INSTALLED_APPS = [
'rest_framework',
'django.contrib.auth',
'core',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Extra Apps:
'raven.contrib.django.raven_compat',
]
IMPORT_EXPORT_USE_TRANSACTIONS = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
#Extra context processors:
#'django.core.context_processors.i18n',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Fortaleza'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_CHARSET = 'utf-8'
LOCALE_PATHS = (
BASE_DIR.child('locale'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR.parent.child('media')
STATICFILES_DIRS = (
BASE_DIR.parent.child('node_modules'),
STATIC_ROOT.child('custom'),
)
# BASE_DIR.parent.child('static'),
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Raven Settings:
RAVEN_CONFIG = {
'dsn': config('RAVEN_DSN', default=os.environ.get('RAVEN_DSN'), cast=str),
#'release': raven.fetch_git_sha(BASE_DIR.child('.git').child('HEAD')),
} | 1.882813 | 2 |
python/vsi/utils/image_iterators.py | cabdiweli1/vsi_common | 7 | 12795319 | import numpy as np
from collections import namedtuple
import skimage.measure
#import matplotlib.pyplot as plt
#import ipdb
# could maybe turn this into a generic mutable namedtuple
class Point2D(object):
__slots__ = "x", "y"
def __init__(self, x, y):
self.x = x
self.y = y
def __iter__(self):
'''iterate over fields tuple/list style'''
for field_name in self.__slots__:
yield getattr(self, field_name)
def __getitem__(self, index):
'''tuple/list style getitem'''
return getattr(self, self.__slots__[index])
# NOTE IterateOverWindows and IterateOverSuperpixels must share the same iter() interface
# TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces
# pixel_stride <= pixels_per_cell
#
# NOTE if pixel_stride > pixels_per_cell/2, it is possible to leave data unseen on the
# right/bottom boarder of an image
#
# this is similar to matlab's im2col
class IterateOverWindows(object):
def __init__(self, pixels_per_cell, pixel_stride=None, image=None,
mode='constant', cval=0,
start_pt=(0, 0), stop_pt=(None, None)):
''' Sliding window iterator.
Parameters
----------
pixels_per_cell : array_like
x,y - let x,y be odd so the window can be easily centered
pixel_stride : array_like, optional
x,y
image : array_like, optional
like numpy.array (ndim == 2 or 3)
mode : str, optional
Points outside the boundaries of the input are filled according to the
given mode. Only ``mode='constant'``, ``mode='discard'`` and
``mode='reflect'`` are currently supported, although others could be
added (e.g., 'nearest' and 'wrap')
cval : float, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
start_pt : array_like, optional
(x,y)
stop_pt : array_like, optional
(x,y)
>>> tot = 0; im = np.arange(100).reshape((10,10))
>>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)):
... tot += ret[0].sum()
... #print(i, ':\n', ret[0])
>>> print(tot) # weak test
22647
>>> tot = 0; im = np.arange(81).reshape((9,9)).T
>>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)):
... tot += ret[0].sum()
... #print(i, ':\n', ret[0])
>>> print(tot) # weak test
25000
'''
assert pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1] % 2 == 1, \
'provide an odd number for pixels_per_cell to easily center the window'
self.pixels_per_cell = tuple(pixels_per_cell)
self.pixel_stride = self.pixels_per_cell if pixel_stride is None else pixel_stride
self.image = image
self.mode = mode
self.cval = cval
self.start_pt = Point2D(*(int(s) for s in start_pt))
self.stop_pt = Point2D(*(stop_pt))
def setImage(self, image):
'''
Parameters
----------
image : array_like
like numpy.array (ndim == 2 or 3)
'''
self.image = image
return self
def shape(self):
if self.image is None: raise TypeError("self.image cannot be of type NoneType")
nrows, ncols = self.image.shape[0:2]
stop_x = ncols if self.stop_pt.x is None else int(self.stop_pt.x)
stop_y = nrows if self.stop_pt.y is None else int(self.stop_pt.y)
roi_height = stop_y-self.start_pt.y
roi_width = stop_x-self.start_pt.x
#print(roi_width, roi_height, self.pixel_stride)
nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int)
ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int)
return (nrows, ncols)
def iter(self,image=None):
'''Next window generator
Parameters
----------
image : array_like
like numpy.array (ndim == 2 or 3)
Returns
-------
numpy.array, optional
chip : pixels within the current window. Points outside the
boundaries of the input are filled according to the given mode.
numpy.array
mask : the binary mask of the window within the chip
BoundingBox
bbox : the inclusive extents of the chip (which may exceed the bounds
of the image)
MODIFICATIONS
sgr : turned into a class
sgr : added mode='reflect'
'''
if image is not None: self.image = image
elif self.image is None: raise TypeError("self.image cannot be of type NoneType")
nrows, ncols = self.image.shape[0:2]
# NOTE could iterate over the interior of the image without bounds checking
# for additional speedup
BoundingBox = namedtuple("BoundingBox", "min_x max_x min_y max_y")
pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2
ystrides_per_image, xstrides_per_image = self.shape()
# iterate around the boarder of the image
for r in xrange(ystrides_per_image):
for c in xrange(xstrides_per_image):
# chip out pixels in this sliding window
min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0]
max_x = min_x+self.pixels_per_cell[0]
min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1]
max_y = min_y+self.pixels_per_cell[1]
bbox = BoundingBox(min_x,max_x,min_y,max_y)
min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x)
min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y)
#print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y)
chip = self.image[min_y:max_y, min_x:max_x, ...]
# couch chip in a fixed-size window
# REVIEW I could refactor handling the boarder into pad_image(). then mode wouldn't
# be necessary here and I could simply loop over the image.
# RE this is more efficient though
if self.mode == 'constant' or self.mode == 'reflect':
chunk = np.empty(
self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else ()),
dtype=self.image.dtype.type)
chunk[:] = self.cval
mask = np.zeros(self.pixels_per_cell)
min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0]
max_x = min(self.pixels_per_cell[0], ncols - min_x)
min_x = max(0, -min_x)
min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1]
max_y = min(self.pixels_per_cell[1], nrows - min_y)
min_y = max(0, -min_y)
#print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y)
#print()
chunk[min_y:max_y, min_x:max_x, ...] = chip
mask[min_y:max_y, min_x:max_x] = 1
if self.mode == 'reflect':
nrows_chunk, ncols_chunk = chunk.shape[0:2]
# NOTE assume the points outside the boundaries of input can be filled from chip.
# this seems harder than it should be...
chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border
chip[:min_y, :, ...]))
chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left border
chip[:, :min_x, ...]))
# NOTE neg indice trikery (flipping first simplifies indexing)
chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom border
np.flipud(chip)[:nrows_chunk-max_y, :, ...])
chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right border
np.fliplr(chip)[:, :ncols_chunk-max_x, ...])
chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner
chip[:min_y, :min_x, ...])))
chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner
np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...]))
chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right corner
np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...])
chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner
np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...]))
elif self.mode == 'discard':
mask = np.ones_like(chip)
chunk = chip
else:
assert False, 'unrecognized mode'
# FIXME should bbox be max-1 like in the superpixel version
yield chunk, mask, bbox
class IterateOverSuperpixels(object):
def __init__(self, segmented, image=None):
self.segmented = segmented
self.image = image
'''
Parameters
----------
segmented : array_like
Superpixel labeled segmentation (like numpy.array)
NOTE regionprops expects labels to be sequential and start
at 1: {1,2,...}. label 0 is treated as unlabeled.
image : array_like, optional
like numpy.array (ndim == 2 or 3)
'''
def setImage(self, image):
'''
Parameters
----------
image : array_like
like numpy.array (ndim == 2 or 3)
'''
self.image = image
return self
def iter(self, image=None):
'''Next superpixel generator
Parameters
----------
image : array_like, optional
like numpy.array (ndim == 2 or 3)
Returns
-------
numpy.array, optional
chip : pixels within the current window. Points outside the
boundaries of the input are filled according to the given mode.
numpy.array
mask : the binary mask of the window within the chip
BoundingBox
bbox : the inclusive extents of the chip (which may exceed the bounds
of the image)
MODIFICATIONS
sgr : optimized
sgr : turned into a class
'''
if image is not None: self.image = image
elif self.image is None: raise TypeError("self.image cannot be of type NoneType")
# regionprops() treats label zero (0) as unlabeled and ignores it
# TODO remove small, unconnected components
properties = skimage.measure.regionprops(self.segmented)
BoundingBox = namedtuple("BoundingBox", "min_x max_x min_y max_y")
for rp in properties:
if rp._slice is None: continue
(min_y,min_x,max_y,max_x) = rp.bbox
chip = image[min_y:max_y, min_x:max_x,...]
mask = rp.filled_image
bbox = BoundingBox(min_x,max_x-1,min_y,max_y-1)
yield (chip, mask, bbox)
| 2.65625 | 3 |
lib/firmware/ap_firmware_config/grunt.py | khromiumos/chromiumos-chromite | 0 | 12795320 | # -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Grunt configs."""
from __future__ import print_function
def is_fast_required(use_futility, servo):
"""Returns true if --fast is necessary to flash successfully.
The configurations in this function consistently fail on the verify step,
adding --fast removes verification of the flash and allows these configs to
flash properly. Meant to be a temporary hack until b/143240576 is fixed.
Args:
use_futility (bool): True if futility is to be used, False if
flashrom.
servo (servo_lib.Servo): The type name of the servo device being used.
Returns:
bool: True if fast is necessary, False otherwise.
"""
return use_futility and servo.is_v4
def get_commands(servo):
"""Get specific flash commands for grunt
Each board needs specific commands including the voltage for Vref, to turn
on and turn off the SPI flash. The get_*_commands() functions provide a
board-specific set of commands for these tasks. The voltage for this board
needs to be set to 1.8 V.
Args:
servo (servo_lib.Servo): The servo connected to the target DUT.
Returns:
list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]
dut_control*=2d arrays formmated like [["cmd1", "arg1", "arg2"],
["cmd2", "arg3", "arg4"]]
where cmd1 will be run before cmd2
flashrom_cmd=command to flash via flashrom
futility_cmd=command to flash via futility
"""
dut_control_on = []
dut_control_off = []
if servo.is_v2:
dut_control_on.append([
'spi2_vref:pp1800',
'spi2_buf_en:on',
'spi2_buf_on_flex_en:on',
'cold_reset:on',
])
dut_control_off.append([
'spi2_vref:off',
'spi2_buf_en:off',
'spi2_buf_on_flex_en:off',
'cold_reset:off',
])
programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial
elif servo.is_micro:
dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on'])
dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off'])
programmer = 'raiden_debug_spi:serial=%s' % servo.serial
elif servo.is_ccd:
# Note nothing listed for flashing with ccd_cr50 on go/grunt-care.
# These commands were based off the commands for other boards.
programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial
else:
raise Exception('%s not supported' % servo.version)
flashrom_cmd = ['flashrom', '-p', programmer, '-w']
futility_cmd = ['futility', 'update', '-p', programmer, '-i']
return [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]
| 2.234375 | 2 |
foundation/organisation/migrations/0001_initial.py | Mindelirium/foundation | 0 | 12795321 | <reponame>Mindelirium/foundation
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Person'
db.create_table(u'organisation_person', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal(u'organisation', ['Person'])
# Adding model 'Unit'
db.create_table(u'organisation_unit', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'organisation', ['Unit'])
# Adding model 'UnitMembership'
db.create_table(u'organisation_unitmembership', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])),
('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])),
))
db.send_create_signal(u'organisation', ['UnitMembership'])
# Adding model 'Board'
db.create_table(u'organisation_board', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'organisation', ['Board'])
# Adding model 'BoardMembership'
db.create_table(u'organisation_boardmembership', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])),
('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])),
))
db.send_create_signal(u'organisation', ['BoardMembership'])
def backwards(self, orm):
# Deleting model 'Person'
db.delete_table(u'organisation_person')
# Deleting model 'Unit'
db.delete_table(u'organisation_unit')
# Deleting model 'UnitMembership'
db.delete_table(u'organisation_unitmembership')
# Deleting model 'Board'
db.delete_table(u'organisation_board')
# Deleting model 'BoardMembership'
db.delete_table(u'organisation_boardmembership')
models = {
u'organisation.board': {
'Meta': {'object_name': 'Board'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.boardmembership': {
'Meta': {'object_name': 'BoardMembership'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Board']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.person': {
'Meta': {'object_name': 'Person'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'organisation.unit': {
'Meta': {'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unitmembership': {
'Meta': {'object_name': 'UnitMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Unit']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['organisation'] | 2.171875 | 2 |
make_validation_set.py | lelloman/python-languagedetector | 0 | 12795322 | from common import *
from os.path import join as join_path, isdir
from shutil import rmtree
from os import mkdir
import feedparser
from bs4 import BeautifulSoup as bs
languages_names = [x['name'] for x in languages]
rss_sources = {
'da': [
'https://politiken.dk/rss/senestenyt.rss',
'https://borsen.dk/rss/'
],
'de': [
'http://www.spiegel.de/index.rss',
'https://www.faz.net/rss/aktuell/'
],
'en': [
'http://feeds.washingtonpost.com/rss/rss_powerpost',
'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml'
],
'es': [
'http://ep00.epimg.net/rss/elpais/portada.xml',
'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml'
],
'fi': [
'https://www.iltalehti.fi/rss/uutiset.xml',
'https://www.uusisuomi.fi/raha/feed'
],
'fr': [
'https://www.lemonde.fr/rss/une.xml',
'http://www.lefigaro.fr/rss/figaro_flash-actu.xml'
],
'hu': [
'https://nepszava.hu/feed',
'https://www.vg.hu/feed/'
],
'it': [
'https://www.fanpage.it/feed/',
'http://www.ansa.it/campania/notizie/campania_rss.xml'
],
'nl': [
'https://www.telegraaf.nl/rss',
'https://www.ad.nl/nieuws/rss.xml'
],
'no': [
'https://www.vg.no/rss/feed/forsiden/?format=rss',
'https://www.aftenposten.no/rss'
],
'pl': [
'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml',
'https://www.rp.pl/rss/1019'
],
'pt': [
'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml',
'http://feeds.jn.pt/JN-Nacional'
],
'ro': [
'https://evz.ro/rss.xml',
'https://adevarul.ro/rss/'
],
'ru': [
'https://www.mk.ru/rss/index.xml',
'https://iz.ru/xml/rss/all.xml'
],
'sv': [
'https://www.di.se/rss',
'https://www.arbetarbladet.se/feed'
],
'uk': [
'https://ukurier.gov.ua/uk/feed/',
'http://day.kyiv.ua/uk/news-rss.xml'
],
'vi': [
'https://vnexpress.net/rss/tin-moi-nhat.rss',
'https://www.tienphong.vn/rss/ho-chi-minh-288.rss'
]
}
def text_from_html(html):
return bs(html, "lxml").text
if __name__ == '__main__':
if isdir(VALIDATION_SET_DIR):
user_input = input("Validation set directory already exists, should delete it and re-fetch the data? Y/N\n")
if user_input.lower() != 'y':
print("Nothing to do.")
exit(0)
else:
print("Deleting old validate set dir", VALIDATION_SET_DIR)
rmtree(VALIDATION_SET_DIR)
print("Creating new directory", VALIDATION_SET_DIR)
mkdir(VALIDATION_SET_DIR)
# for lang in ['vi']:
for lang in languages_names:
print(lang)
if lang not in rss_sources:
print("\tSkipping", lang, "as there are no sources.")
continue
with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f:
for source in rss_sources[lang]:
feed = feedparser.parse(source)
items = feed.entries
for item in items:
title = text_from_html(item['title'])
summary = text_from_html(item['summary'])
validation_text = sanitize_text(title) + ' ' + sanitize_text(summary)
if len(validation_text) > 200:
validation_text = validation_text[:200]
f.write(validation_text.encode("UTF-8"))
f.write('\n'.encode("UTF-8"))
# print('\t', title, ' -> ', summary, ' -> ', validation_text)
print("\tfound", len(items), "feeds in", source)
| 2.484375 | 2 |
train.py | liujiachang/Graph-WaveNet | 0 | 12795323 | <reponame>liujiachang/Graph-WaveNet
import torch
import numpy as np
import argparse
import time
import util
import matplotlib.pyplot as plt
from engine import trainer
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda:0', help='')
parser.add_argument('--data', type=str, default='data/METR-LA', help='data path')
parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path')
parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type')
parser.add_argument('--gcn_bool', action='store_true', help='whether to add graph convolution layer')
parser.add_argument('--aptonly', action='store_true', help='whether only adaptive adj')
parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive adj')
parser.add_argument('--randomadj', action='store_true', help='whether random initialize adaptive adj')
parser.add_argument('--seq_length', type=int, default=12, help='')
parser.add_argument('--nhid', type=int, default=32, help='')
parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension')
parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate')
parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate')
parser.add_argument('--epochs', type=int, default=50, help='')
parser.add_argument('--print_every', type=int, default=50, help='')
# parser.add_argument('--seed',type=int,default=99,help='random seed')
parser.add_argument('--save', type=str, default='./garage/metr', help='save path')
parser.add_argument('--expid', type=int, default=1, help='experiment id')
args = parser.parse_args()
def main():
# set seed
# torch.manual_seed(args.seed)
# np.random.seed(args.seed)
# load data
device = torch.device(args.device)
sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype)
dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)
scaler = dataloader['scaler']
supports = [torch.tensor(i).to(device) for i in adj_mx]
print(args)
if args.randomadj:
adjinit = None
else:
adjinit = supports[0]
if args.aptonly:
supports = None
engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,
args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,
adjinit)
print("start training...")
his_loss = []
val_time = []
train_time = []
for i in range(1, args.epochs + 1):
# if i % 10 == 0:
# lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))
# for g in engine.optimizer.param_groups:
# g['lr'] = lr
train_loss = []
train_mape = []
train_rmse = []
t1 = time.time()
dataloader['train_loader'].shuffle()
for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()):
trainx = torch.Tensor(x).to(device)
trainx = trainx.transpose(1, 3)
trainy = torch.Tensor(y).to(device)
trainy = trainy.transpose(1, 3)
metrics = engine.train(trainx, trainy[:, 0, :, :])
train_loss.append(metrics[0])
train_mape.append(metrics[1])
train_rmse.append(metrics[2])
if iter % args.print_every == 0:
log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'
print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]))
t2 = time.time()
train_time.append(t2 - t1)
# validation
valid_loss = []
valid_mape = []
valid_rmse = []
s1 = time.time()
for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
testy = torch.Tensor(y).to(device)
testy = testy.transpose(1, 3)
metrics = engine.eval(testx, testy[:, 0, :, :])
valid_loss.append(metrics[0])
valid_mape.append(metrics[1])
valid_rmse.append(metrics[2])
s2 = time.time()
log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'
print(log.format(i, (s2 - s1)))
val_time.append(s2 - s1)
mtrain_loss = np.mean(train_loss)
mtrain_mape = np.mean(train_mape)
mtrain_rmse = np.mean(train_rmse)
mvalid_loss = np.mean(valid_loss)
mvalid_mape = np.mean(valid_mape)
mvalid_rmse = np.mean(valid_rmse)
his_loss.append(mvalid_loss)
log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'
print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1)))
torch.save(engine.model.state_dict(),
args.save + "_epoch_" + str(i) + "_" + str(round(mvalid_loss, 2)) + ".pth")
print("Average Training Time: {:.4f} secs/epoch".format(np.mean(train_time)))
print("Average Inference Time: {:.4f} secs".format(np.mean(val_time)))
# testing
bestid = np.argmin(his_loss)
engine.model.load_state_dict(
torch.load(args.save + "_epoch_" + str(bestid + 1) + "_" + str(round(his_loss[bestid], 2)) + ".pth"))
outputs = []
realy = torch.Tensor(dataloader['y_test']).to(device)
realy = realy.transpose(1, 3)[:, 0, :, :]
for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):
testx = torch.Tensor(x).to(device)
testx = testx.transpose(1, 3)
with torch.no_grad():
preds = engine.model(testx).transpose(1, 3)
outputs.append(preds.squeeze())
yhat = torch.cat(outputs, dim=0)
yhat = yhat[:realy.size(0), ...]
print("Training finished")
print("The valid loss on best model is", str(round(his_loss[bestid], 4)))
amae = []
amape = []
armse = []
for i in range(12):
pred = scaler.inverse_transform(yhat[:, :, i])
real = realy[:, :, i]
metrics = util.metric(pred, real)
log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))
amae.append(metrics[0])
amape.append(metrics[1])
armse.append(metrics[2])
log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'
print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))
torch.save(engine.model.state_dict(),
args.save + "_exp" + str(args.expid) + "_best_" + str(round(his_loss[bestid], 2)) + ".pth")
if __name__ == "__main__":
t1 = time.time()
main()
t2 = time.time()
print("Total time spent: {:.4f}".format(t2 - t1))
| 2.109375 | 2 |
modules/tankshapes/base.py | bullseyestudio/guns-game | 0 | 12795324 | <filename>modules/tankshapes/base.py
""" Base tank shape -- all other shapes derive from this
Minimal requirements:
- typeID
- position (relative to tank origin)
- size (fixed for most shapes)
- layer (fixed for most shapes)
- anchor rectangle (what portion of the shape must be supported by the lower layer;
most shapes will define their entire extent as their anchor rectangle)
"""
class base_shape(object):
type = 0
position = (0,0)
size = (0,0)
anchor = (0,0)
layer = 0
def __init__(self, type, position, size, layer=0, anchor=None):
# TODO: Don't forget to do some kind of validation!
self.type = type
self.position = position
self.size = size
self.layer = layer
if anchor:
self.anchor = anchor
else:
self.anchor = (0,0) + size
def absanchor(self):
""" Returns the anchor rectangle relative to the tank origin (i.e. NOT relative to the shape) """
return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3])
def __repr__(self):
return '<shape(type={self.type})>'.format(self=self)
def __str__(self):
return '{0}(type={self.type},position={self.position},size={self.size},layer={self.layer},anchor={self.anchor})'.format(
type(self),
self=self)
| 3.25 | 3 |
news/migrations/0001_initial.py | SIBSIND/PHPMYADMINWEBSITE | 31 | 12795325 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import markupfield.fields
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Planet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(unique=True)),
('title', models.CharField(max_length=100)),
('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
],
options={
'ordering': ['-date'],
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(unique_for_date=b'date')),
('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('body', markupfield.fields.MarkupField(rendered_field=True)),
('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])),
('_body_rendered', models.TextField(editable=False)),
('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date'],
},
),
]
| 1.992188 | 2 |
chapters/10/src/biglittle/domain/loan.py | PacktPublishing/-Learn-MongoDB-4.0 | 13 | 12795326 | <reponame>PacktPublishing/-Learn-MongoDB-4.0
"""
biglittle.domain.loan
Description: module which interacts with the "loans" collection
"""
import pymongo
import decimal
from config.config import Config
from pymongo.cursor import CursorType
from decimal import Decimal
from bson.decimal128 import Decimal128
from utils.utils import Utils
from biglittle.domain.base import Base
from biglittle.entity.loan import Payment, LoanInfo, Loan
class LoanService(Base) :
# defaults
collectName = 'biglittle.loans'
"""
Generates a proposal
Formula:
M = P * ( J / (1 - (1 + J)**-N))
M == monthly payment
P == principal
J == effective rate
N == total number of payments
@param float principal
@param int numPayments
@param float annualRate
@param string currency
@param string borrowerKey
@param string lenderKey
@param string lenderName
@param string lenderBusiness
@return dict
"""
def generateProposal(self, principal, numPayments, annualRate, currency, borrowerKey, lenderKey, lenderName, lenderBusiness) :
# calc effective rate and monthly payment
effective_rate = annualRate / 100 / 12;
monthly_payment = principal * ( effective_rate / (1 - (1 + effective_rate) ** -numPayments ))
# create LoanInfo and Loan documents
loanInfo = {
'principal' : principal,
'numPayments' : numPayments,
'annualRate' : annualRate,
'effectiveRate' : effective_rate * 1000,
'currency' : currency,
'monthlyPymt' : monthly_payment
}
loan = {
'borrowerKey' : borrowerKey,
'lenderKey' : lenderKey,
'lenderName' : lenderName,
'lenderBusiness' : lenderBusiness,
'overpayment' : 0.00,
'loanInfo' : loanInfo,
'payments' : []
}
return loan
"""
Generates a series of simulated proposals
@param float principal
@param int numPayments
@param list lenders : list of lender keys
@return dict
"""
def generateMany(self, principal, numPayments, currency, borrowerKey, lenders) :
proposals = {}
for item in lenders :
# pick an annual rate at random
import random
annualRate = random.randint(1000,20000) / 1000
# add a new proposal
doc = self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey, item['key'], item['name'], item['business'])
proposals.update({ item['key'] : doc })
return proposals
"""
Generates loan key
@param biglittle.entity.loan.Loan loan
@return string loanKey
"""
def generateLoanKey(self, loan) :
from time import gmtime, strftime
date = strftime('%Y%m%d', gmtime())
loan['loanKey'] = loan['borrowerKey'] + '_' + loan['lenderKey'] + '_' + date
return loan
"""
Saves loan document
@param biglittle.entity.loan.Loan loan
@return bool True if success else False
"""
def save(self, loan) :
# generate loanKey
loan = self.generateLoanKey(loan)
# convert values to NumberDecimal
loan.convertDecimalToBson()
# save
return self.collection.insert(loan)
"""
Retrieves amount due for given borrower
@param string borrowerKey
@return Decimal amtDue
"""
def fetchAmtDueForBorrower(self, borrowerKey) :
amtDue = Decimal(0.00)
loan = self.collection.find_one({"borrowerKey":borrowerKey})
if loan :
loanInfo = loan.getLoanInfo()
amtDue = loanInfo.get('monthlyPymt').to_decimal()
return amtDue
"""
Retrieves loan for given borrower
Converts all BSON Decimal128 financial data fields to Decimal
@param string borrowerKey
@return biglittle.entity.users.User instance
"""
def fetchLoanByBorrowerKey(self, borrowerKey) :
loan = self.collection.find_one({"borrowerKey":borrowerKey})
loan.convertBsonToDecimal()
return loan
"""
Looks for next scheduled payment for this borrower
@param string borrowerKey
@param float amtPaid
@param biglittle.entity.loan.Loan loan
@return bool True if payment processed OK | False otherwise
"""
def processPayment(self, borrowerKey, amtPaid, loan) :
# init vars
config = Config()
utils = Utils()
result = False
loanInfo = loan.getLoanInfo()
amtDue = loanInfo.getMonthlyPayment()
overpayment = 0.00
# convert amount paid to decimal.Decimal for processing purposes
if not isinstance(amtPaid, Decimal) :
amtPaid = Decimal(amtPaid)
# find first payment where nothing has been paid
for doc in loan['payments'] :
if doc['amountPaid'] == 0 :
# if underpayment, add to "overpayment" but do no further processing
if amtPaid < amtDue :
overpayment = amtPaid
else :
overpayment = amtPaid - amtDue
# apply payment
doc['amountPaid'] = doc['amountDue']
doc['status'] = 'received'
# save var today
from time import gmtime, strftime
now = strftime('%Y-%m-%d', gmtime())
doc['recvdate'] = now
break
# update overpayment field
currentOver = loan.get('overpayment')
loan.set('overpayment', currentOver + overpayment)
# convert values to NumberDecimal
loan.convertDecimalToBson()
# update by replacement
filt = { 'borrowerKey' : borrowerKey }
result = self.collection.replace_one(filt,loan)
# send out pub/sub notifications
if result :
# convert values to Decimal
loan.convertBsonToDecimal()
# have publisher notify subscribers
self.publisher.trigger(self.publisher.EVENT_LOAN_UPDATE_BORROWER, {'loan':loan, 'amtPaid':amtPaid})
# done
return result
| 2.6875 | 3 |
examples/retrieve_folders.py | swimlane/python-office365 | 21 | 12795327 | <reponame>swimlane/python-office365<filename>examples/retrieve_folders.py
from office365api import Mail
from dotenv import load_dotenv
from os.path import join, dirname, normpath
from os import environ
dot_env_path = normpath(join(dirname(__file__), '../', '.env'))
load_dotenv(dot_env_path)
def simplest(auth):
mail = Mail(auth=auth)
c = mail.folders.get_count()
print('Folder count {0}'.format(c))
m = mail.folders.get_all_folders()
print('Folder names.')
for folder in m:
print(" {id} {name}".format(id=folder.Id, name=folder.DisplayName))
for folder in (f for f in m if f.ChildFolderCount > 0):
f_info = mail.folders.get_folder(folder_id=folder.Id)
print('Subfolders of {name}'.format(name=folder.DisplayName))
sf = mail.folders.get_sub_folders(folder.Id)
for f in sf:
print(" {id} {name}".format(id=f.Id, name=f.DisplayName))
if __name__ == '__main__':
authorization = (environ.get('OFFICE_USER'), environ.get('OFFICE_USER_PASSWORD'))
simplest(authorization)
| 3.015625 | 3 |
ExtractionModule/fileStructureIdentifier.py | k41nt/ECEN403-ML-for-Data-Organization | 0 | 12795328 | """
Title:: File Structure Identifier
Brief:: Implementation of the program that trains the model for identifying a known set of document classes
Author:: <NAME>
Date:: 04/02/2019
""" | 0.648438 | 1 |
notice.py | rookiesmile/yibanAutoSgin | 24 | 12795329 | # -*- coding: utf-8 -*-
"""
@Time : 2021/8/24 13:00
@Auth : apecode
@File :notice.py
@IDE :PyCharm
@Blog:https://liiuyangxiong.cn
"""
import json
import time
from email.mime.text import MIMEText
from email.header import Header
from smtplib import SMTP_SSL
import requests
import config
class Notice:
def __init__(self, admin: dict, account: dict):
self.admin = admin,
self.account = account
def send(self, content):
if self.account.get("notice") == "" or self.account.get("notice") == "local":
return Notice.saveLocal(content)
elif self.account.get("notice") == "mail":
if self.admin[0]["mail"]["sendMail"] == "" and self.admin[0]["mail"]["authCode"] == "":
print("未设置发送者邮箱信息,转为本地记录")
Notice.saveLocal(content)
else:
self.send_mail(content)
else:
self.sendPushPlus(content)
print(content)
def send_mail(self, message: str):
try:
host_server = self.admin[0]["mail"]["smtpServer"]
# 发件人的邮箱
sendMail = self.admin[0]["mail"]["sendMail"]
# 邮箱的授权码
authCode = self.admin[0]["mail"]["authCode"]
# 收件人邮箱
receiver = self.account.get("mail")
# 邮件标题
mail_title = "易班 " + time.strftime("%Y-%m-%d", time.localtime(int(time.time()))) + " 签到情况"
# ssl登录
smtp = SMTP_SSL(host_server)
smtp.ehlo(host_server)
smtp.login(sendMail, authCode)
msg = MIMEText(message, "html", 'utf-8')
msg["Subject"] = Header(mail_title, 'utf-8')
msg["From"] = sendMail
msg["To"] = receiver
smtp.sendmail(sendMail, receiver, msg.as_string())
smtp.quit()
return True
except Exception as e:
print(e)
return False
# 发送pushPlus
def sendPushPlus(self, content: str):
url = 'https://www.pushplus.plus/send'
headers = {"Content-Type": "application/json"}
data = json.dumps({
"token": self.account.get("pushToken"),
"title": "易班签到通知",
"content": content,
"template": "txt"
})
response = requests.post(url=url, data=data, headers=headers).json()
if response['code'] == 200:
return Notice.log(f"{self.account.get('mobile')}\tPush Plus发送成功!\n")
else:
print("发送失败,转为本地记录")
Notice.saveLocal(content)
return Notice.log(f"{self.account.get('mobile')}\tPush Plus发送失败!原因: {response['msg']}\n")
@staticmethod
def log(message: str):
with open(file="data/logs.log", mode="a+", encoding="utf-8") as f:
f.write(message)
print(message)
@staticmethod
def saveLocal(message):
with open("data/result.log", mode="a+", encoding="utf-8") as w:
w.write(message)
| 2.796875 | 3 |
app/storage/storage_encryption.py | pricem14pc/eq-questionnaire-runner | 0 | 12795330 | <gh_stars>0
import hashlib
from typing import Optional, Union
from jwcrypto import jwe, jwk
from jwcrypto.common import base64url_encode
from structlog import get_logger
from app.utilities.json import json_dumps
from app.utilities.strings import to_bytes, to_str
logger = get_logger()
class StorageEncryption:
def __init__(
self, user_id: Optional[str], user_ik: Optional[str], pepper: Optional[str]
) -> None:
if not user_id:
raise ValueError("user_id not provided")
if not user_ik:
raise ValueError("user_ik not provided")
if not pepper:
raise ValueError("pepper not provided")
self.key = self._generate_key(user_id, user_ik, pepper)
@staticmethod
def _generate_key(user_id: str, user_ik: str, pepper: str) -> jwk.JWK:
sha256 = hashlib.sha256()
sha256.update(to_str(user_id).encode("utf-8"))
sha256.update(to_str(user_ik).encode("utf-8"))
sha256.update(to_str(pepper).encode("utf-8"))
# we only need the first 32 characters for the CEK
cek = to_bytes(sha256.hexdigest()[:32])
password = {"<PASSWORD>": "<PASSWORD>", "k": base64url_encode(cek)}
return jwk.JWK(**password)
def encrypt_data(self, data: Union[str, dict]) -> str:
if isinstance(data, dict):
data = json_dumps(data)
protected_header = {"alg": "dir", "enc": "A256GCM", "kid": "1,1"}
jwe_token = jwe.JWE(
plaintext=data, protected=protected_header, recipient=self.key
)
serialized_token: str = jwe_token.serialize(compact=True)
return serialized_token
def decrypt_data(self, encrypted_token: str) -> bytes:
jwe_token = jwe.JWE(algs=["dir", "A256GCM"])
jwe_token.deserialize(encrypted_token, self.key)
payload: bytes = jwe_token.payload
return payload
| 2.375 | 2 |
models/__init__.py | bdbaraban/mlb_tweets | 1 | 12795331 | <gh_stars>1-10
import json
from models.league import League
from models.standings import Standings
league = League()
league.reload()
standings = Standings()
standings.reload()
| 1.679688 | 2 |
aries_cloudagent/did/tests/test_did_key_bls12381g1.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 12795332 | from unittest import TestCase
from ...wallet.key_type import KeyType
from ...wallet.util import b58_to_bytes
from ..did_key import DIDKey, DID_KEY_RESOLVERS
from .test_dids import (
DID_B<KEY>,
)
TEST_BLS12381G1_BASE58_KEY = (
"<KEY>"
)
TEST_BLS12381G1_FINGERPRINT = (
"<KEY>"
)
TEST_BLS12381G1_DID = f"did:key:{TEST_BLS12381G1_FINGERPRINT}"
TEST_BLS12381G1_KEY_ID = f"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}"
TEST_BLS12381G1_PREFIX_BYTES = b"".join(
[b"\xea\x01", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)]
)
class TestDIDKey(TestCase):
def test_bls12381g1_from_public_key(self):
key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)
did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1)
assert did_key.did == TEST_BLS12381G1_DID
def test_bls12381g1_from_public_key_b58(self):
did_key = DIDKey.from_public_key_b58(
TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1
)
assert did_key.did == TEST_BLS12381G1_DID
def test_bls12381g1_from_fingerprint(self):
did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT)
assert did_key.did == TEST_BLS12381G1_DID
assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY
def test_bls12381g1_from_did(self):
did_key = DIDKey.from_did(TEST_BLS12381G1_DID)
assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY
def test_bls12381g1_properties(self):
did_key = DIDKey.from_did(TEST_BLS12381G1_DID)
assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT
assert did_key.did == TEST_BLS12381G1_DID
assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY
assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)
assert did_key.key_type == KeyType.BLS12381G1
assert did_key.key_id == TEST_BLS12381G1_KEY_ID
assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES
def test_bls12381g1_diddoc(self):
did_key = DIDKey.from_did(TEST_BLS12381G1_DID)
resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1]
assert resolver(did_key) == did_key.did_doc
def test_bls12381g1_resolver(self):
did_key = DIDKey.from_did(TEST_BLS12381G1_DID)
resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1]
did_doc = resolver(did_key)
assert (
did_doc
== DID_BLS12381G1_z3tEFALUKUzzCAvytMHX8X4SnsNsq6T5tC5Zb18oQEt1FqNcJXqJ3AA9umgzA9yoqPBeWA
)
| 2.390625 | 2 |
tests/test_executable.py | cirosantilli/python-utils | 1 | 12795333 | #!/usr/bin/env python
"""
this calls test_executable_caller as it should be called for the test to work.
"""
import subprocess
if __name__ == '__main__':
process = subprocess.Popen(
['python', 'test_executable_caller.py','test_executable_callee.py'],
shell = False,
universal_newlines = True
)
exit_status = process.wait()
| 2.21875 | 2 |
quiz/tests/test_models.py | Palombredun/django_quiz | 0 | 12795334 | import datetime
from django.contrib.auth.models import User
from quiz.models import (
AnswerUser,
Category,
Grade,
Question,
QuestionScore,
Quiz,
Statistic,
SubCategory,
ThemeScore,
)
import pytest
### FIXTURES ###
@pytest.fixture
def category_m(db):
return Category.objects.create(category="m")
@pytest.fixture
def sub_category_n(db, category_m):
return SubCategory.objects.create(category=category_m, sub_category="n")
@pytest.fixture
def user_A(db):
return User.objects.create_user(username="A")
@pytest.fixture
def quiz_q(db, category_m, sub_category_n, user_A):
date = datetime.datetime.now()
return Quiz.objects.create(
title="title",
description="Long description",
creator=user_A,
category=category_m,
category_name="m",
sub_category=sub_category_n,
created=date,
random_order=False,
difficulty=1,
)
@pytest.fixture
def question_q(db, quiz_q):
return Question.objects.create(
quiz=quiz_q,
difficulty=1,
order=1,
figure=None,
content="question",
explanation=None,
theme1="t1",
theme2="t2",
theme3="t3",
)
@pytest.fixture
def answerUser(db, question_q, user_A):
a = AnswerUser.objects.create(correct=True)
a.save()
a.question.add(question_q)
a.user.add(user_A)
return a
@pytest.fixture
def stats_s(db, quiz_q):
return Statistic.objects.create(
quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5
)
@pytest.fixture
def grade_g(db, stats_s):
return Grade.objects.create(grade=5, number=10, statistics=stats_s)
@pytest.fixture
def questionScore_qs(db, stats_s, question_q):
return QuestionScore.objects.create(
question=question_q, statistics=stats_s, score=5
)
@pytest.fixture
def themeScore_ts(db, stats_s, quiz_q):
return ThemeScore.objects.create(
theme="t1", score=5, statistics=stats_s, quiz=quiz_q
)
### TESTS ###
def test_category(category_m):
assert isinstance(category_m, Category)
assert category_m.category == "m"
assert str(category_m) == "m"
def test_sub_category(category_m, sub_category_n):
assert sub_category_n.sub_category == "n"
assert sub_category_n.category == category_m
assert isinstance(sub_category_n, SubCategory)
assert str(sub_category_n) == "n (m)"
def test_quiz(quiz_q, user_A, category_m, sub_category_n):
date = datetime.datetime.now()
assert quiz_q.title == "title"
assert quiz_q.description == "Long description"
assert quiz_q.creator == user_A
assert quiz_q.category == category_m
assert quiz_q.sub_category == sub_category_n
assert isinstance(quiz_q.created, datetime.datetime)
assert quiz_q.created.year == date.year
assert quiz_q.created.month == date.month
assert quiz_q.created.day == date.day
assert quiz_q.random_order == False
assert quiz_q.difficulty == 1
assert str(quiz_q) == "title"
def test_question(quiz_q, question_q):
assert question_q.quiz == quiz_q
assert question_q.difficulty == 1
assert question_q.order == 1
assert question_q.figure == None
assert question_q.content == "question"
assert question_q.explanation == None
assert question_q.theme1 == "t1"
assert question_q.theme2 == "t2"
assert question_q.theme3 == "t3"
assert str(question_q) == "question"
def test_answerUser(answerUser, question_q, user_A):
assert answerUser.correct == True
assert answerUser.question.get(pk=question_q.id) == question_q
assert answerUser.user.get(pk=user_A.id) == user_A
def test_statisc(stats_s, quiz_q):
assert stats_s.quiz == quiz_q
assert stats_s.number_participants == 10
assert stats_s.mean == 15
assert stats_s.easy == 5
assert stats_s.medium == 5
assert stats_s.difficult == 5
def test_grade(grade_g, stats_s):
assert grade_g.grade == 5
assert grade_g.number == 10
assert grade_g.statistics == stats_s
def test_questionScore(stats_s, question_q, questionScore_qs):
assert questionScore_qs.question == question_q
assert questionScore_qs.statistics == stats_s
assert questionScore_qs.score == 5
def test_themeScore(themeScore_ts, stats_s, quiz_q):
assert themeScore_ts.theme == "t1"
assert themeScore_ts.score == 5
assert themeScore_ts.statistics == stats_s
assert themeScore_ts.quiz == quiz_q
| 2.203125 | 2 |
ex_017.py | antonioravila/Exercicios-CEV-Python | 0 | 12795335 | '''
#Método 1 de calcular a hipotenusa (sem o módulo math)
co = float(input('comprimento do cateto oposto: '))
ca = float(input('comprimento do cateto adjacente: '))
h = (co ** 2) + (ca ** 2) ** (1/2)
print(f'a hipotenusa equivale a {h:.2f}')
'''
# Método 2 de calcular a hipotenusa (com o módulo math)
import math
co = float(input('comprimento do cateto oposto: '))
ca = float(input('comprimento do cateto adjacente: '))
h = math.hypot(co, ca)
print(f'a hipotenusa equivale a {h:.2f}')
| 3.9375 | 4 |
src/backend/data/bofa.py | akmadian/openfinance | 1 | 12795336 | import csv
import json
from ofxtools.Parser import OFXTree
"""
statement schema:
{
"id":int,
"ref_no": int,
"date": string,
"account": int, account code,
"isincome": bool,
"countinbudget": bool,
"payee": string,
"notes": {
"bank": string,
"personal": string
},
"categories": [strings],
"totalamount": float,
"splits":[
{
"amount": float,
"categories": [],
"linked_transaction": int,
"countinbudget": bool
}
]
}
"""
def fetch_all_transactions():
cc = import_cc_statement()
acct = import_acct_statement()
return cc + acct
def convert_acct_fitid(id):
if "-" in id:
return id.split("-")[0]
else:
return id.split(".")[0]
def convert_date_to_ISO(datestring):
return "{}-{}-{}T{}:{}:{}Z".format(
datestring[:4],
datestring[4:6],
datestring[6:8],
datestring[8:10],
datestring[10:12],
datestring[12:14],
)
def import_acct_statement():
print("Importing BOFA Account Statement")
parser = OFXTree()
parser.parse("./data/stmt.qfx")
transactions_root = parser.find(".//BANKTRANLIST")[:]
transactions = []
for trans in transactions_root[2:]:
transactions.append({
"id": 0,
"ref_no": int(convert_acct_fitid(trans[3].text)),
"date": convert_date_to_ISO(trans[1].text),
"account": "BOFA_CHECKING",
"payee": trans[4].text,
"notes": {
"bank": "",
"personal": ""
},
"categories": [],
"totalamount": float(trans[2].text),
"splits": []
})
return transactions
def import_cc_statement():
print("Importing BOFA CC Statement")
parser = OFXTree()
parser.parse("./data/currentTransaction_1626.qfx")
transactions_root = parser.find(".//BANKTRANLIST")[:]
id = 0
transactions = []
for trans in transactions_root[2:]:
transactions.append({
"id": id,
"ref_no": int(trans[3].text),
"date": convert_date_to_ISO(trans[1].text),
"account": "BOFA_CASHREWARDS_CC",
"payee": trans[6].text,
"notes": {
"bank": "",
"personal": ""
},
"categories": [],
"totalamount": float(trans[2].text),
"splits": []
})
return transactions
def fetch_acct_info():
print("Updating Account Information")
accounts = []
parser = OFXTree()
parser.parse('./data/currentTransaction_1626.qfx')
accounts.append({
"name": "BOFA_CASHREWARDS_CC",
"id": 2,
"balance": parser.find(".//BALAMT").text,
"last_updated": convert_date_to_ISO(parser.find(".//DTASOF").text)
})
parser.parse('./data/stmt.qfx')
accounts.append({
"name": "BOFA_CHECKING",
"id": 0,
"balance": parser.find(".//BALAMT").text,
"last_updated": convert_date_to_ISO(parser.find(".//DTASOF").text)
})
return accounts
if __name__ == '__main__':
accts = fetch_acct_info()
for acct in accts:
print(acct)
| 2.75 | 3 |
code_transformer/configuration/great_transformer.py | SpirinEgor/code-transformer | 0 | 12795337 | <gh_stars>0
from code_transformer.configuration.configuration_utils import ModelConfiguration
class GreatTransformerConfig(ModelConfiguration):
def __init__(
self,
num_layers: int,
positional_encoding=None,
embed_dim=256,
num_heads=8,
ff_dim=1024,
dropout_rate=0.1,
is_encoder_decoder=False,
):
super(GreatTransformerConfig, self).__init__()
self.num_layers = num_layers
self.positional_encoding = positional_encoding
self.embed_dim = embed_dim
self.hidden_dim = embed_dim
self.attention_dim = embed_dim
self.bias_dim = embed_dim
self.num_heads = num_heads
self.ff_dim = ff_dim
self.dropout_rate = dropout_rate
self.is_encoder_decoder = is_encoder_decoder
class GreatEncoderConfig(ModelConfiguration):
def __init__(
self,
transformer_config: GreatTransformerConfig,
vocab_size=32000,
num_node_types=None,
subtokens_per_token=5,
num_languages=None,
):
super(GreatEncoderConfig, self).__init__()
self.transformer_config = transformer_config
self.vocab_size = vocab_size
self.num_node_types = num_node_types
self.subtokens_per_token = subtokens_per_token
self.num_languages = num_languages
| 2.015625 | 2 |
adminmgr/testmgr/testoutput.py | IamMayankThakur/test-bigdata | 9 | 12795338 | <reponame>IamMayankThakur/test-bigdata<filename>adminmgr/testmgr/testoutput.py
import os
from hdfs import InsecureClient
from .config import *
def test_task_3(output_paths):
if(output_paths[0] == None):
return [0]
path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH,
"Task3",
"0.txt")
path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH,
"Task3",
"1.txt")
x_1 = os.system("diff -wBZ "+path_to_correct_output_1 + " " + output_paths[0] + "> abc")
x_2 = os.system("diff -wBZ "+path_to_correct_output_2 + " " + output_paths[0] + "> abc")
if x_1 == 0 or x_2 == 0:
return [4]
return [0]
def test_task_1_2(output_paths, task_number):
if(output_paths[0] == None):
return [0]
path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH,
"Task"+task_number,
"0.txt")
'''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH,
"Task"+task_number,
"1.txt")
path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH,
"Task"+task_number,
"2.txt")
path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH,
"Task"+task_number,
"3.txt")'''
x_1 = os.system("diff -wBZ "+path_to_correct_output_1 + " " + output_paths[0] + "> abc")
if x_1 == 0:
return [4]
'''x_2 = os.system("diff -wBZ "+path_to_correct_output_2 + " " + output_paths[0] + "> abc")
if x_2 == 0:
return [3]
x_3 = os.system("diff -wBZ "+path_to_correct_output_3 + " " + output_paths[0] + "> abc")
if x_3 == 0:
return [2]
x_4 = os.system("diff -wBZ "+path_to_correct_output_4 + " " + output_paths[0] + "> abc")
# print("x_1 = " + str(x_1))
# print("x_2 = " + str(x_2))
# print("x_3 = " + str(x_3))
# print("x_4 = " + str(x_4))
if x_4 == 0:
return [1]'''
return [0]
def test(output_paths, task_number):
correctness = []
for output_path in output_paths:
scores = []
if output_path is None:
print("SKIPPING TEST CASE ")
continue
print(output_path)
test_case = output_paths.index(output_path) + 1
for i in range(4):
score = check_test_case(output_path,
(os.path.join(SETTERS_OUTPUT_BASE_PATH, "Task"+task_number,
str(i) + ".txt")), str(i))
if(score != 0):
scores.append(score)
break
if(len(scores) == 0):
correctness.append(0)
else:
correctness.append(scores[0])
print("CORRECTNESS")
print(correctness)
return correctness
def check_test_case(path_to_team_output, path_to_correct_output,
test_case_number):
print("team_output = " + path_to_team_output)
print("setters_output = " + path_to_correct_output)
score = 4 - int(test_case_number)
x = os.system("diff -wBZ "+path_to_team_output+" "+path_to_correct_output + " > abc")
print("x = "+str(x))
if x == 0:
print("TEST CASE " + str(test_case_number) + " PASSED")
return score
else:
return 0
| 2.28125 | 2 |
setup.py | kipparker/django-trello-webhooks | 11 | 12795339 | import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.3",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.migrations',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 1.242188 | 1 |
examples/lightfm_recs.py | ankane/pgvector-python | 10 | 12795340 | from lightfm import LightFM
from lightfm.datasets import fetch_movielens
from pgvector.sqlalchemy import Vector
from sqlalchemy import create_engine, text, Column, Float, Integer, String
from sqlalchemy.orm import declarative_base, Session
engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True)
with engine.connect() as conn:
conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector'))
conn.commit()
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
factors = Column(Vector(20))
class Item(Base):
__tablename__ = 'item'
id = Column(Integer, primary_key=True)
title = Column(String)
factors = Column(Vector(20))
bias = Column(Float)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
data = fetch_movielens(min_rating=5.0)
model = LightFM(loss='warp', no_components=20)
model.fit(data['train'], epochs=30)
user_biases, user_factors = model.get_user_representations()
item_biases, item_factors = model.get_item_representations()
users = [dict(id=i, factors=factors) for i, factors in enumerate(user_factors)]
items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors in enumerate(item_factors)]
session = Session(engine)
session.bulk_insert_mappings(User, users)
session.bulk_insert_mappings(Item, items)
session.commit()
user = session.query(User).get(1)
# subtract item bias for negative inner product
items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all()
print('user-based recs:', [item.title for item in items])
item = session.query(Item).filter(Item.title == 'Star Wars (1977)').first()
items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all()
print('item-based recs:', [item.title for item in items])
| 2.390625 | 2 |
src/SimpleSchemaGenerator/Plugins/PicklePlugin.py | davidbrownell/Common_SimpleSchemaGenerator | 0 | 12795341 | <filename>src/SimpleSchemaGenerator/Plugins/PicklePlugin.py
# ----------------------------------------------------------------------
# |
# | PicklePlugin.py
# |
# | <NAME> <<EMAIL>>
# | 2020-07-24 15:08:32
# |
# ----------------------------------------------------------------------
# |
# | Copyright <NAME> 2020-21
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Contains the Plugin object"""
import os
import pickle
import sys
import CommonEnvironment
from CommonEnvironment.Interface import staticderived, override, DerivedProperty
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from ..Plugin import Plugin as PluginBase, ParseFlag, Extension
# ----------------------------------------------------------------------
@staticderived
class Plugin(PluginBase):
# ----------------------------------------------------------------------
# | Public Properties
Name = DerivedProperty("Pickle")
Description = DerivedProperty("Pickles each element to a file")
Flags = DerivedProperty(ParseFlag.AllFlags)
# ----------------------------------------------------------------------
# | Public Methods
@staticmethod
@override
def IsValidEnvironment():
return True
# ----------------------------------------------------------------------
@staticmethod
@override
def GenerateCustomSettingsAndDefaults():
return []
# ----------------------------------------------------------------------
@staticmethod
@override
def GenerateOutputFilenames(context):
# Return a single item (that will never be used), as an empty lists
# aren't supported.
return ["{}.{}".format(context["output_name"], ext) for ext in ["pickle", "path"]]
# ----------------------------------------------------------------------
@staticmethod
@override
def Generate(
simple_schema_generator,
invoke_reason,
input_filenames,
output_filenames,
name,
elements,
include_indexes,
status_stream,
verbose_stream,
verbose,
**custom_settings
):
assert len(output_filenames) == 2, output_filenames
# Pickle
status_stream.write("Creating '{}'...".format(output_filenames[0]))
with status_stream.DoneManager() as status_dm:
with open(output_filenames[0], "wb") as f:
pickle.dump(elements, f)
# Path
status_stream.write("Creating '{}'...".format(output_filenames[1]))
with status_stream.DoneManager() as status_dm:
generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename)
assert os.path.isdir(generator_path), generator_path
generator_path = os.path.dirname(generator_path)
with open(output_filenames[1], "w") as f:
f.write(generator_path)
| 1.984375 | 2 |
2019/Lecture02/02Examples/Lecture02_Profiling02.py | cbchoi/SIT32004 | 1 | 12795342 | <gh_stars>1-10
# calculate 10!
import time
from functools import wraps
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.perf_counter()
result = fn(*args, **kwargs)
t2 = time.perf_counter()
print("@timefn: {} took {} seconds".format(fn.__name__, t2 - t1))
return result
return measure_time
def factorial(num):
if num == 1:
return 1
else:
print("Caluating " + str(num) + "!")
return factorial(num -1) * num
@timefn
def profiling_factorial():
value = 10
result = factorial(value)
print("10!= " + str(result))
if __name__ == "__main__":
profiling_factorial() | 3.28125 | 3 |
catkin_ws/build/simple_applications/cmake/simple_applications-genmsg-context.py | a-yildiz/ROS-Simple-Sample-Packages | 1 | 12795343 | <filename>catkin_ws/build/simple_applications/cmake/simple_applications-genmsg-context.py
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg/Distance.msg"
services_str = "/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv"
pkg_name = "simple_applications"
dependencies_str = ""
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg"
PYTHON_EXECUTABLE = "/usr/bin/python3"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/noetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 1.179688 | 1 |
tail.py | doctoryes/python-morsels | 0 | 12795344 |
def tail(things, num_items):
if num_items <= 0:
return []
x = list(things)
return [i for i in x[max(len(x)-num_items, 0):]]
| 3.5625 | 4 |
xarray/tests/test_tutorial.py | apkrelling/xarray | 0 | 12795345 | <reponame>apkrelling/xarray
import os
import pytest
from xarray import DataArray, tutorial
from . import assert_identical, network
@network
class TestLoadDataset:
@pytest.fixture(autouse=True)
def setUp(self):
self.testfile = "tiny"
def test_download_from_github(self, tmp_path, monkeypatch):
monkeypatch.setenv("XDG_CACHE_DIR", os.fspath(tmp_path))
ds = tutorial.open_dataset(self.testfile).load()
tiny = DataArray(range(5), name="tiny").to_dataset()
assert_identical(ds, tiny)
def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch):
monkeypatch.setenv("XDG_CACHE_DIR", os.fspath(tmp_path))
ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load()
ds_cache = tutorial.open_dataset(self.testfile).load()
assert_identical(ds_cache, ds_nocache)
def test_download_rasterio_from_github_load_without_cache(
self, tmp_path, monkeypatch
):
monkeypatch.setenv("XDG_CACHE_DIR", os.fspath(tmp_path))
ds_nocache = tutorial.open_dataset("RGB.byte", cache=False).load()
ds_cache = tutorial.open_dataset("RGB.byte", cache=True).load()
assert_identical(ds_cache, ds_nocache)
| 2.1875 | 2 |
qb_to_dynaboard.py | Pinafore/qb | 122 | 12795346 | <gh_stars>100-1000
import argparse
import json
from pathlib import Path
DS_VERSION = "2018.04.18"
LOCAL_QANTA_PREFIX = "data/external/datasets/"
QANTA_TRAIN_DATASET_PATH = f"qanta.train.{DS_VERSION}.json"
QANTA_DEV_DATASET_PATH = f"qanta.dev.{DS_VERSION}.json"
QANTA_TEST_DATASET_PATH = f"qanta.test.{DS_VERSION}.json"
def main():
parser = argparse.ArgumentParser()
parser.add_argument('output_dir', type=str)
args = parser.parse_args()
output_dir = Path(args.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
for split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]:
with open(Path(LOCAL_QANTA_PREFIX) / path) as f:
data = json.load(f)
output = []
for q in data['questions']:
output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context': ''})
with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f:
for r in output:
f.write(f'{json.dumps(r)}\n')
if __name__ == '__main__':
main() | 2.421875 | 2 |
CodeUp/1546.py | chae-heechan/Algorithm_Study | 0 | 12795347 | # 함수로 plus/minus/0 판별하기
def f(key):
if key>0:
print("plus")
elif key==0:
print("zero")
else:
print("minus")
f(int(input())) | 3.890625 | 4 |
Python/PythonExercicios/ex014.py | isabellathome/College-Activities | 0 | 12795348 | <reponame>isabellathome/College-Activities
celsius = float(input('Informe a temperatura em graus ºC: '))
fah = ((celsius * 9) / 5) + 32
print('A temperatura de {}ºC corresponde a {}ºF'.format(celsius, fah))
| 3.84375 | 4 |
ecorelevesensor/models/animal.py | NaturalSolutions/ecoReleve-Server | 0 | 12795349 | <filename>ecorelevesensor/models/animal.py
from sqlalchemy import (
Column,
Index,
Integer,
Sequence,
String,
)
from ecorelevesensor.models import Base
class Animal(Base):
#TODO: Ajouter un autoincrément à la fin d'eRelevé
__tablename__ = 'T_Animal'
id = Column('PK_id', Integer, primary_key=True)
chip_code = Column(String(10))
__table_args__ = (
Index('idx_Tanimal_chipcode_pk', chip_code, id),
) | 2.40625 | 2 |
billforward/apis/profiles_api.py | billforward/bf-python | 2 | 12795350 | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ProfilesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_all_profiles(self, **kwargs):
"""
Returns a collection of all profiles. By default 10 values are returned. Records are returned in natural order
{\"nickname\":\"Get all profiles\",\"response\":\"getProfileAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_profiles(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_profiles_with_http_info(**kwargs)
else:
(data) = self.get_all_profiles_with_http_info(**kwargs)
return data
def get_all_profiles_with_http_info(self, **kwargs):
"""
Returns a collection of all profiles. By default 10 values are returned. Records are returned in natural order
{\"nickname\":\"Get all profiles\",\"response\":\"getProfileAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_profiles_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organizations', 'offset', 'records', 'order_by', 'order']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_profiles" % key
)
params[key] = val
del params['kwargs']
resource_path = '/profiles'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_profile(self, profile_id, **kwargs):
"""
Returns a single profile, specified by the ID parameter.
{\"nickname\":\"Retrieve an existing profile\",\"response\":\"getProfileByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile(profile_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str profile_id: ID of the Profile. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_profile_with_http_info(profile_id, **kwargs)
else:
(data) = self.get_profile_with_http_info(profile_id, **kwargs)
return data
def get_profile_with_http_info(self, profile_id, **kwargs):
"""
Returns a single profile, specified by the ID parameter.
{\"nickname\":\"Retrieve an existing profile\",\"response\":\"getProfileByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str profile_id: ID of the Profile. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['profile_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'profile_id' is set
if ('profile_id' not in params) or (params['profile_id'] is None):
raise ValueError("Missing the required parameter `profile_id` when calling `get_profile`")
resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json')
path_params = {}
if 'profile_id' in params:
path_params['profile-ID'] = params['profile_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_profile_by_account_id(self, account_id, **kwargs):
"""
Returns a collection of profiles, specified by the account-ID parameter. By default 10 values are returned. Records are returned in natural order
{\"nickname\":\"Retrieve by account\",\"response\":\"getProfileByAccountID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_by_account_id(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The account-ID of the profile. (required)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_profile_by_account_id_with_http_info(account_id, **kwargs)
else:
(data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs)
return data
def get_profile_by_account_id_with_http_info(self, account_id, **kwargs):
"""
Returns a collection of profiles, specified by the account-ID parameter. By default 10 values are returned. Records are returned in natural order
{\"nickname\":\"Retrieve by account\",\"response\":\"getProfileByAccountID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The account-ID of the profile. (required)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'organizations', 'offset', 'records', 'order_by', 'order']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_profile_by_account_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_profile_by_account_id`")
resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['account-ID'] = params['account_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_profile_by_email_address(self, email, **kwargs):
"""
Returns a single profile, specified by the email parameter.
{\"nickname\":\"Retrieve by e-mail\",\"response\":\"getProfileByEmail.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_by_email_address(email, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email: The email address of the profile. (required)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired profiles should be returned.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_profile_by_email_address_with_http_info(email, **kwargs)
else:
(data) = self.get_profile_by_email_address_with_http_info(email, **kwargs)
return data
def get_profile_by_email_address_with_http_info(self, email, **kwargs):
"""
Returns a single profile, specified by the email parameter.
{\"nickname\":\"Retrieve by e-mail\",\"response\":\"getProfileByEmail.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email: The email address of the profile. (required)
:param list[str] organizations: A list of organizations used to restrict the scope of API calls.
:param int offset: The offset from the first profile to return.
:param int records: The maximum number of profiles to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired profiles should be returned.
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_profile_by_email_address" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email' is set
if ('email' not in params) or (params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `get_profile_by_email_address`")
resource_path = '/profiles/email/{email}'.replace('{format}', 'json')
path_params = {}
if 'email' in params:
path_params['email'] = params['email']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_profile(self, request, **kwargs):
"""
Update a profile
{\"nickname\":\"Update a profile\",\"request\":\"updateProfileRequest.html\",\"response\":\"updateProfileResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_profile(request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UpdateProfileRequest request: The profile object to be updated. (required)
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_profile_with_http_info(request, **kwargs)
else:
(data) = self.update_profile_with_http_info(request, **kwargs)
return data
def update_profile_with_http_info(self, request, **kwargs):
"""
Update a profile
{\"nickname\":\"Update a profile\",\"request\":\"updateProfileRequest.html\",\"response\":\"updateProfileResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_profile_with_http_info(request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UpdateProfileRequest request: The profile object to be updated. (required)
:return: ProfilePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['request']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'request' is set
if ('request' not in params) or (params['request'] is None):
raise ValueError("Missing the required parameter `request` when calling `update_profile`")
resource_path = '/profiles'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request' in params:
body_params = params['request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProfilePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| 1.65625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.