repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
J-asy/Emotion-FAN | basic_code/networks.py | 30c1e24a31b2a05c0810a17eb533096a7baaeeef | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torch
import numpy as np
import cv2
import pdb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def norm_angle(angle):
norm_angle = sigmoid(10 * (abs(angle) / 0.7853975 - 1))
return norm_angle
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
###''' self-attention; relation-attention '''
class ResNet_AT(nn.Module):
def __init__(self, block, layers, num_classes=1000, end2end=True, at_type=''):
self.inplanes = 64
self.end2end = end2end
super(ResNet_AT, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.6)
self.alpha = nn.Sequential(nn.Linear(512, 1),
nn.Sigmoid())
self.beta = nn.Sequential(nn.Linear(1024, 1),
nn.Sigmoid())
self.pred_fc1 = nn.Linear(512, 7)
self.pred_fc2 = nn.Linear(1024, 7)
self.at_type = at_type
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x='', phrase='train', AT_level='first_level',vectors='',vm='',alphas_from1='',index_matrix=''):
vs = []
alphas = []
assert phrase == 'train' or phrase == 'eval'
assert AT_level == 'first_level' or AT_level == 'second_level' or AT_level == 'pred'
if phrase == 'train':
num_pair = 3
for i in range(num_pair):
f = x[:, :, :, :, i] # x[128,3,224,224]
f = self.conv1(f)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
vs.append(f)
alphas.append(self.alpha(self.dropout(f)))
vs_stack = torch.stack(vs, dim=2)
alphas_stack = torch.stack(alphas, dim=2)
if self.at_type == 'self-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
if self.at_type == 'self_relation-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
betas = []
for i in range(len(vs)):
vs[i] = torch.cat([vs[i], vm1], dim=1)
betas.append(self.beta(self.dropout(vs[i])))
cascadeVs_stack = torch.stack(vs, dim=2)
betas_stack = torch.stack(betas, dim=2)
output = cascadeVs_stack.mul(betas_stack * alphas_stack).sum(2).div((betas_stack * alphas_stack).sum(2))
if self.at_type == 'self-attention':
vm1 = self.dropout(vm1)
pred_score = self.pred_fc1(vm1)
if self.at_type == 'self_relation-attention':
output = self.dropout2(output)
pred_score = self.pred_fc2(output)
return pred_score
if phrase == 'eval':
if AT_level == 'first_level':
f = self.conv1(x)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
alphas = self.alpha(self.dropout(f))
return f, alphas
if AT_level == 'second_level':
assert self.at_type == 'self_relation-attention'
vms = index_matrix.permute(1, 0).mm(vm) # [381, 21783] -> [21783,381] * [381,512] --> [21783, 512]
vs_cate = torch.cat([vectors, vms], dim=1)
betas = self.beta(self.dropout(vs_cate))
''' keywords: mean_fc ; weight_sourcefc; sum_alpha; weightmean_sourcefc '''
''' alpha * beta '''
weight_catefc = vs_cate.mul(alphas_from1) # [21570,512] * [21570,1] --->[21570,512]
alpha_beta = alphas_from1.mul(betas)
sum_alphabetas = index_matrix.mm(alpha_beta) # [380,21570] * [21570,1] -> [380,1]
weightmean_catefc = index_matrix.mm(weight_catefc).div(sum_alphabetas)
weightmean_catefc = self.dropout2(weightmean_catefc)
pred_score = self.pred_fc2(weightmean_catefc)
return pred_score
if AT_level == 'pred':
if self.at_type == 'self-attention':
pred_score = self.pred_fc1(self.dropout(vm))
return pred_score
''' self-attention; relation-attention '''
def resnet18_at(pretrained=False, **kwargs):
# Constructs base a ResNet-18 model.
model = ResNet_AT(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
| [((20, 11, 21, 43), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((30, 19, 30, 41), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(30, 34, 30, 40): 'planes'}, {}), '(planes)', True, 'import torch.nn as nn\n'), ((31, 20, 31, 29), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((33, 19, 33, 41), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(33, 34, 33, 40): 'planes'}, {}), '(planes)', True, 'import torch.nn as nn\n'), ((61, 21, 61, 75), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((62, 19, 62, 41), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(62, 34, 62, 40): 'planes'}, {}), '(planes)', True, 'import torch.nn as nn\n'), ((63, 21, 64, 53), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((65, 19, 65, 41), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(65, 34, 65, 40): 'planes'}, {}), '(planes)', True, 'import torch.nn as nn\n'), ((66, 21, 66, 77), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((67, 19, 67, 45), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(67, 34, 67, 44): 'planes * 4'}, {}), '(planes * 4)', True, 'import torch.nn as nn\n'), ((68, 20, 68, 29), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((101, 21, 102, 42), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((103, 19, 103, 37), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(103, 34, 103, 36): '64'}, {}), '(64)', True, 'import torch.nn as nn\n'), ((104, 20, 104, 29), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((105, 23, 105, 71), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (), '', True, 'import torch.nn as nn\n'), ((110, 23, 110, 46), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', ({(110, 44, 110, 45): '1'}, {}), '(1)', True, 'import torch.nn as nn\n'), ((111, 23, 111, 38), 'torch.nn.Dropout', 'nn.Dropout', ({(111, 34, 111, 37): '0.5'}, {}), '(0.5)', True, 'import torch.nn as nn\n'), ((112, 24, 112, 39), 'torch.nn.Dropout', 'nn.Dropout', ({(112, 35, 112, 38): '0.6'}, {}), '(0.6)', True, 'import torch.nn as nn\n'), ((119, 24, 119, 41), 'torch.nn.Linear', 'nn.Linear', ({(119, 34, 119, 37): '512', (119, 39, 119, 40): '7'}, {}), '(512, 7)', True, 'import torch.nn as nn\n'), ((120, 24, 120, 42), 'torch.nn.Linear', 'nn.Linear', ({(120, 34, 120, 38): '1024', (120, 40, 120, 41): '7'}, {}), '(1024, 7)', True, 'import torch.nn as nn\n'), ((146, 15, 146, 37), 'torch.nn.Sequential', 'nn.Sequential', ({(146, 29, 146, 36): '*layers'}, {}), '(*layers)', True, 'import torch.nn as nn\n'), ((10, 20, 10, 32), 'math.exp', 'math.exp', ({(10, 29, 10, 31): '(-x)'}, {}), '(-x)', False, 'import math\n'), ((113, 35, 113, 52), 'torch.nn.Linear', 'nn.Linear', ({(113, 45, 113, 48): '512', (113, 50, 113, 51): '1'}, {}), '(512, 1)', True, 'import torch.nn as nn\n'), ((114, 35, 114, 47), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((116, 34, 116, 52), 'torch.nn.Linear', 'nn.Linear', ({(116, 44, 116, 48): '1024', (116, 50, 116, 51): '1'}, {}), '(1024, 1)', True, 'import torch.nn as nn\n'), ((117, 34, 117, 46), 'torch.nn.Sigmoid', 'nn.Sigmoid', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((178, 23, 178, 45), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((179, 27, 179, 53), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((135, 16, 136, 67), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((137, 16, 137, 56), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ({(137, 31, 137, 55): 'planes * block.expansion'}, {}), '(planes * block.expansion)', True, 'import torch.nn as nn\n'), ((190, 34, 190, 56), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((191, 30, 191, 55), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((226, 26, 226, 58), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((126, 41, 126, 58), 'math.sqrt', 'math.sqrt', ({(126, 51, 126, 57): '(2.0 / n)'}, {}), '(2.0 / n)', False, 'import math\n'), ((187, 28, 187, 58), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n')] |
symphonyrm/ndscheduler | ndscheduler/server/handlers/index.py | e9a56ef345b25916a2b53d1ea3349efb532d63ce | """Serves the single page app web ui."""
import json
import tornado.gen
from ndscheduler import settings
from ndscheduler import utils
from ndscheduler.server.handlers import base
class Handler(base.BaseHandler):
"""Index page request handler."""
@tornado.gen.coroutine
def get(self):
"""Serve up the single page app for scheduler dashboard."""
meta_info = utils.get_all_available_jobs()
self.render(settings.APP_INDEX_PAGE, jobs_meta_info=json.dumps(meta_info))
| [((18, 20, 18, 50), 'ndscheduler.utils.get_all_available_jobs', 'utils.get_all_available_jobs', ({}, {}), '()', False, 'from ndscheduler import utils\n'), ((19, 60, 19, 81), 'json.dumps', 'json.dumps', ({(19, 71, 19, 80): 'meta_info'}, {}), '(meta_info)', False, 'import json\n')] |
eoc21/biopython | Scripts/xbbtools/xbb_io.py | c0f8db8f55a506837c320459957a0ce99b0618b6 | #!/usr/bin/env python
# Created: Wed Jun 21 13:46:35 2000
# Last changed: Time-stamp: <00/12/02 14:18:23 thomas>
# [email protected], http://evolution.bmc.uu.se/~thomas
# File: xbb_io.py
import os, sys # os.system, sys.argv
sys.path.insert(0, '.')
sys.path.insert(0, os.path.expanduser('~thomas/cbs/python/biopython'))
from Bio.ParserSupport import *
from Bio import Fasta
class xbb_io:
def __init__(self):
""
def error(self, str):
print str
def read_fasta_file(self, file):
genes = []
iter = Fasta.Iterator(handle = open(file), parser = Fasta.RecordParser())
while 1:
rec = iter.next()
if not rec: break
genes.append((rec.sequence, rec.title))
return genes
| [] |
kolyasalubov/Lv-677.PythonCore | HW6/Andrii_Haponov/cw_4.py | c9f9107c734a61e398154a90b8a3e249276c2704 | # Convert a Number to a String!
# We need a function that can transform a number into a string.
# What ways of achieving this do you know?
def number_to_string(num: int) -> str:
str_num = str(num)
return str_num
print(number_to_string(123))
print(type(number_to_string(123))) | [] |
explosion/healthsea | project/scripts/clausecat/evaluate_clausecat.py | 4481488ed9fc85b89844ee872d0a8412a33f0b15 | import spacy
from spacy.scorer import PRFScore
import typer
from pathlib import Path
from wasabi import Printer, table
import operator
import benepar
import clausecat_component
import clausecat_model
import clausecat_reader
import clause_segmentation
import clause_aggregation
msg = Printer()
def main(model_path: Path, eval_path: Path):
"""This script is used to evaluate the clausecat component"""
nlp = spacy.load(model_path)
reader = clausecat_reader.ClausecatCorpus(eval_path)
examples = reader(nlp)
clausecat = nlp.get_pipe("clausecat")
scorer = {
"POSITIVE": PRFScore(),
"NEGATIVE": PRFScore(),
"NEUTRAL": PRFScore(),
"ANAMNESIS": PRFScore(),
}
for i, example in enumerate(examples):
prediction = example.predicted
reference = example.reference
# Prediction
prediction = clausecat(prediction)
# Iterate through prediction and references
for pred_clause, ref_clause in zip(prediction._.clauses, reference._.clauses):
prediction_cats = pred_clause["cats"]
reference_cats = ref_clause["cats"]
prediction_class = max(prediction_cats.items(), key=operator.itemgetter(1))[
0
]
# Add to matrix
for label in prediction_cats:
if label != prediction_class:
prediction = 0
else:
prediction = 1
if prediction == 0 and reference_cats[label] != 0:
scorer[label].fn += 1
elif prediction == 1 and reference_cats[label] != 1:
scorer[label].fp += 1
elif prediction == 1 and reference_cats[label] == 1:
scorer[label].tp += 1
# Printing
textcat_data = []
avg_fscore = 0
avg_recall = 0
avg_precision = 0
for label in scorer:
textcat_data.append(
(
label,
round(scorer[label].fscore, 2),
round(scorer[label].recall, 2),
round(scorer[label].precision, 2),
)
)
avg_fscore += scorer[label].fscore
avg_recall += scorer[label].recall
avg_precision += scorer[label].precision
textcat_data.append(
(
"AVERAGE",
round(avg_fscore / len(scorer), 2),
round(avg_recall / len(scorer), 2),
round(avg_precision / len(scorer), 2),
)
)
header = ("Label", "F-Score", "Recall", "Precision")
print(table(textcat_data, header=header, divider=True))
if __name__ == "__main__":
typer.run(main)
| [((15, 6, 15, 15), 'wasabi.Printer', 'Printer', ({}, {}), '()', False, 'from wasabi import Printer, table\n'), ((21, 10, 21, 32), 'spacy.load', 'spacy.load', ({(21, 21, 21, 31): 'model_path'}, {}), '(model_path)', False, 'import spacy\n'), ((22, 13, 22, 56), 'clausecat_reader.ClausecatCorpus', 'clausecat_reader.ClausecatCorpus', ({(22, 46, 22, 55): 'eval_path'}, {}), '(eval_path)', False, 'import clausecat_reader\n'), ((99, 4, 99, 19), 'typer.run', 'typer.run', ({(99, 14, 99, 18): 'main'}, {}), '(main)', False, 'import typer\n'), ((28, 20, 28, 30), 'spacy.scorer.PRFScore', 'PRFScore', ({}, {}), '()', False, 'from spacy.scorer import PRFScore\n'), ((29, 20, 29, 30), 'spacy.scorer.PRFScore', 'PRFScore', ({}, {}), '()', False, 'from spacy.scorer import PRFScore\n'), ((30, 19, 30, 29), 'spacy.scorer.PRFScore', 'PRFScore', ({}, {}), '()', False, 'from spacy.scorer import PRFScore\n'), ((31, 21, 31, 31), 'spacy.scorer.PRFScore', 'PRFScore', ({}, {}), '()', False, 'from spacy.scorer import PRFScore\n'), ((95, 10, 95, 58), 'wasabi.table', 'table', (), '', False, 'from wasabi import Printer, table\n'), ((45, 64, 45, 86), 'operator.itemgetter', 'operator.itemgetter', ({(45, 84, 45, 85): '(1)'}, {}), '(1)', False, 'import operator\n')] |
david-waugh/network-automation | utils/test.py | c85ab092cd9b76753c4d35f113126cfb663c1933 | import pathlib
print(pathlib.Path(__file__).parent.resolve())
while True:
next_cmd = input("> ")
print(eval(next_cmd))
| [((3, 6, 3, 28), 'pathlib.Path', 'pathlib.Path', ({(3, 19, 3, 27): '__file__'}, {}), '(__file__)', False, 'import pathlib\n')] |
IBM/deepsearch-nlp-annotator-api-example | nlp_annotator_api/server/app.py | 76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40 | import logging
import os
import aiohttp.web
from connexion import AioHttpApp
from nlp_annotator_api.config.config import conf
from nlp_annotator_api.config.logging import setup_logging
from nlp_annotator_api.server.middleware.statsd_middleware import StatsdMiddleware
from nlp_annotator_api.server.signals.statsd_client import statsd_client_factory
setup_logging()
access_log = logging.getLogger("nlp_annotator_api.access")
_file_dir = os.path.dirname(__file__)
app = AioHttpApp(
__name__, specification_dir=os.path.join(_file_dir, "..", "resources", "schemas"),
server_args=dict(
client_max_size=8 * 1024**2
)
)
app.add_api("openapi.yaml", pass_context_arg_name="request")
aiohttp_app: aiohttp.web.Application = app.app
aiohttp_app.cleanup_ctx.append(statsd_client_factory(conf.statsd))
aiohttp_app.middlewares.append(StatsdMiddleware())
if __name__ == "__main__":
app.run(access_log=access_log)
| [((12, 0, 12, 15), 'nlp_annotator_api.config.logging.setup_logging', 'setup_logging', ({}, {}), '()', False, 'from nlp_annotator_api.config.logging import setup_logging\n'), ((14, 13, 14, 58), 'logging.getLogger', 'logging.getLogger', ({(14, 31, 14, 57): '"""nlp_annotator_api.access"""'}, {}), "('nlp_annotator_api.access')", False, 'import logging\n'), ((16, 12, 16, 37), 'os.path.dirname', 'os.path.dirname', ({(16, 28, 16, 36): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((29, 31, 29, 65), 'nlp_annotator_api.server.signals.statsd_client.statsd_client_factory', 'statsd_client_factory', ({(29, 53, 29, 64): 'conf.statsd'}, {}), '(conf.statsd)', False, 'from nlp_annotator_api.server.signals.statsd_client import statsd_client_factory\n'), ((31, 31, 31, 49), 'nlp_annotator_api.server.middleware.statsd_middleware.StatsdMiddleware', 'StatsdMiddleware', ({}, {}), '()', False, 'from nlp_annotator_api.server.middleware.statsd_middleware import StatsdMiddleware\n'), ((19, 32, 19, 85), 'os.path.join', 'os.path.join', ({(19, 45, 19, 54): '_file_dir', (19, 56, 19, 60): '""".."""', (19, 62, 19, 73): '"""resources"""', (19, 75, 19, 84): '"""schemas"""'}, {}), "(_file_dir, '..', 'resources', 'schemas')", False, 'import os\n')] |
dcleres/keras_cv_attention_models | keras_cv_attention_models/resnest/resnest.py | 264876673e369f23eff49b3b589b72f908a9625b | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from keras_cv_attention_models.aotnet import AotNet
from keras_cv_attention_models.download_and_load import reload_model_weights
from keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias
PRETRAINED_DICT = {
"resnest101": {"imagenet": "63f9ebdcd32529cbc4b4fbbec3d1bb2f"},
"resnest200": {"imagenet": "8e211dcb089b588e18d36ba7cdf92ef0"},
"resnest269": {"imagenet": "4309ed1b0a8ae92f2b1143dc3512c5c7"},
"resnest50": {"imagenet": "eee7b20a229821f730ab205b6afeb369"},
}
def rsoftmax(inputs, groups):
if groups > 1:
nn = tf.reshape(inputs, [-1, 1, groups, inputs.shape[-1] // groups])
# nn = tf.transpose(nn, [0, 2, 1, 3])
nn = tf.nn.softmax(nn, axis=2)
nn = tf.reshape(nn, [-1, 1, 1, inputs.shape[-1]])
else:
nn = keras.layers.Activation("sigmoid")(inputs)
return nn
def split_attention_conv2d(inputs, filters, kernel_size=3, strides=1, downsample_first=False, groups=2, activation="relu", name=""):
h_axis, w_axis = [2, 3] if K.image_data_format() == "channels_first" else [1, 2]
in_channels = inputs.shape[-1]
conv_strides = strides if downsample_first else 1
if groups == 1:
logits = conv2d_no_bias(inputs, filters, kernel_size, strides=conv_strides, padding="same", name=name and name + "1_")
else:
# Using groups=2 is slow in `mixed_float16` policy
# logits = conv2d_no_bias(inputs, filters * groups, kernel_size, padding="same", groups=groups, name=name and name + "1_")
logits = []
splitted_inputs = tf.split(inputs, groups, axis=-1)
for ii in range(groups):
conv_name = name and name + "1_g{}_".format(ii + 1)
logits.append(conv2d_no_bias(splitted_inputs[ii], filters, kernel_size, strides=conv_strides, padding="same", name=conv_name))
logits = tf.concat(logits, axis=-1)
logits = batchnorm_with_activation(logits, activation=activation, name=name and name + "1_")
if groups > 1:
splited = tf.split(logits, groups, axis=-1)
gap = tf.reduce_sum(splited, axis=0)
else:
gap = logits
gap = tf.reduce_mean(gap, [h_axis, w_axis], keepdims=True)
reduction_factor = 4
inter_channels = max(in_channels * groups // reduction_factor, 32)
atten = keras.layers.Conv2D(inter_channels, kernel_size=1, name=name and name + "2_conv")(gap)
atten = batchnorm_with_activation(atten, activation=activation, name=name and name + "2_")
atten = keras.layers.Conv2D(filters * groups, kernel_size=1, name=name and name + "3_conv")(atten)
atten = rsoftmax(atten, groups)
out = keras.layers.Multiply()([atten, logits])
if groups > 1:
out = tf.split(out, groups, axis=-1)
out = tf.reduce_sum(out, axis=0)
if not downsample_first and strides > 1:
out = keras.layers.ZeroPadding2D(padding=1, name=name and name + "pool_pad")(out)
out = keras.layers.AveragePooling2D(3, strides=2, name=name and name + "pool")(out)
return out
def ResNest(input_shape=(224, 224, 3), stem_type="deep", attn_types="sa", bn_after_attn=False, shortcut_type="avg", pretrained="imagenet", **kwargs):
kwargs.pop("kwargs", None)
model = AotNet(**locals(), **kwargs)
reload_model_weights(model, pretrained_dict=PRETRAINED_DICT, sub_release="resnest", pretrained=pretrained)
return model
def ResNest50(input_shape=(224, 224, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 4, 6, 3], stem_width=64, model_name="resnest50", **locals(), **kwargs)
def ResNest101(input_shape=(256, 256, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 4, 23, 3], stem_width=128, model_name="resnest101", **locals(), **kwargs)
def ResNest200(input_shape=(320, 320, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 24, 36, 3], stem_width=128, model_name="resnest200", **locals(), **kwargs)
def ResNest269(input_shape=(416, 416, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 30, 48, 8], stem_width=128, model_name="resnest269", **locals(), **kwargs)
| [((42, 13, 42, 96), 'keras_cv_attention_models.attention_layers.batchnorm_with_activation', 'batchnorm_with_activation', (), '', False, 'from keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias\n'), ((49, 10, 49, 62), 'tensorflow.reduce_mean', 'tf.reduce_mean', (), '', True, 'import tensorflow as tf\n'), ((54, 12, 54, 94), 'keras_cv_attention_models.attention_layers.batchnorm_with_activation', 'batchnorm_with_activation', (), '', False, 'from keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias\n'), ((72, 4, 72, 110), 'keras_cv_attention_models.download_and_load.reload_model_weights', 'reload_model_weights', (), '', False, 'from keras_cv_attention_models.download_and_load import reload_model_weights\n'), ((18, 13, 18, 76), 'tensorflow.reshape', 'tf.reshape', ({(18, 24, 18, 30): 'inputs', (18, 32, 18, 75): '[-1, 1, groups, inputs.shape[-1] // groups]'}, {}), '(inputs, [-1, 1, groups, inputs.shape[-1] // groups])', True, 'import tensorflow as tf\n'), ((20, 13, 20, 38), 'tensorflow.nn.softmax', 'tf.nn.softmax', (), '', True, 'import tensorflow as tf\n'), ((21, 13, 21, 57), 'tensorflow.reshape', 'tf.reshape', ({(21, 24, 21, 26): 'nn', (21, 28, 21, 56): '[-1, 1, 1, inputs.shape[-1]]'}, {}), '(nn, [-1, 1, 1, inputs.shape[-1]])', True, 'import tensorflow as tf\n'), ((32, 17, 32, 126), 'keras_cv_attention_models.attention_layers.conv2d_no_bias', 'conv2d_no_bias', (), '', False, 'from keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias\n'), ((37, 26, 37, 59), 'tensorflow.split', 'tf.split', (), '', True, 'import tensorflow as tf\n'), ((41, 17, 41, 43), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((45, 18, 45, 51), 'tensorflow.split', 'tf.split', (), '', True, 'import tensorflow as tf\n'), ((46, 14, 46, 44), 'tensorflow.reduce_sum', 'tf.reduce_sum', (), '', True, 'import tensorflow as tf\n'), ((53, 12, 53, 93), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (), '', False, 'from tensorflow import keras\n'), ((55, 12, 55, 95), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (), '', False, 'from tensorflow import keras\n'), ((57, 10, 57, 33), 'tensorflow.keras.layers.Multiply', 'keras.layers.Multiply', ({}, {}), '()', False, 'from tensorflow import keras\n'), ((60, 14, 60, 44), 'tensorflow.split', 'tf.split', (), '', True, 'import tensorflow as tf\n'), ((61, 14, 61, 40), 'tensorflow.reduce_sum', 'tf.reduce_sum', (), '', True, 'import tensorflow as tf\n'), ((23, 13, 23, 47), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', ({(23, 37, 23, 46): '"""sigmoid"""'}, {}), "('sigmoid')", False, 'from tensorflow import keras\n'), ((28, 31, 28, 52), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ({}, {}), '()', True, 'from tensorflow.keras import backend as K\n'), ((64, 14, 64, 84), 'tensorflow.keras.layers.ZeroPadding2D', 'keras.layers.ZeroPadding2D', (), '', False, 'from tensorflow import keras\n'), ((65, 14, 65, 86), 'tensorflow.keras.layers.AveragePooling2D', 'keras.layers.AveragePooling2D', (), '', False, 'from tensorflow import keras\n'), ((40, 26, 40, 137), 'keras_cv_attention_models.attention_layers.conv2d_no_bias', 'conv2d_no_bias', (), '', False, 'from keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias\n')] |
brianherman/data-act-broker-backend | dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py | 80eb055b9d245046192f7ad4fd0be7d0e11d2dec | """replace FileRequest with FileGeneration
Revision ID: 8692ab1298e1
Revises: 4bbc47f2b48d
Create Date: 2018-10-24 14:54:39.278159
"""
# revision identifiers, used by Alembic.
revision = '8692ab1298e1'
down_revision = '4bbc47f2b48d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('file_generation',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('file_generation_id', sa.Integer(), nullable=False),
sa.Column('request_date', sa.Date(), nullable=False),
sa.Column('start_date', sa.Date(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=False),
sa.Column('agency_code', sa.Text(), nullable=False),
sa.Column('agency_type', sa.Enum('awarding', 'funding', name='generation_agency_types'), server_default='awarding', nullable=False),
sa.Column('file_type', sa.Enum('D1', 'D2', name='generation_file_types'), server_default='D1', nullable=False),
sa.Column('file_path', sa.Text(), nullable=True),
sa.Column('is_cached_file', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('file_generation_id')
)
op.create_index(op.f('ix_file_generation_agency_code'), 'file_generation', ['agency_code'], unique=False)
op.create_index(op.f('ix_file_generation_agency_type'), 'file_generation', ['agency_type'], unique=False)
op.create_index(op.f('ix_file_generation_end_date'), 'file_generation', ['end_date'], unique=False)
op.create_index(op.f('ix_file_generation_file_type'), 'file_generation', ['file_type'], unique=False)
op.create_index(op.f('ix_file_generation_request_date'), 'file_generation', ['request_date'], unique=False)
op.create_index(op.f('ix_file_generation_start_date'), 'file_generation', ['start_date'], unique=False)
op.add_column('job', sa.Column('file_generation_id', sa.Integer(), nullable=True))
op.create_foreign_key('fk_file_request_file_generation_id', 'job', 'file_generation', ['file_generation_id'], ['file_generation_id'], ondelete='SET NULL')
op.drop_column('job', 'from_cached')
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('job', sa.Column('from_cached', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.drop_constraint('fk_file_request_file_generation_id', 'job', type_='foreignkey')
op.drop_column('job', 'file_generation_id')
op.drop_index(op.f('ix_file_generation_start_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_request_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_file_type'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_end_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_agency_type'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_agency_code'), table_name='file_generation')
op.drop_table('file_generation')
op.execute("""
DROP TYPE generation_agency_types
""")
op.execute("""
DROP TYPE generation_file_types
""")
# ### end Alembic commands ###
| [((53, 4, 53, 158), 'alembic.op.create_foreign_key', 'op.create_foreign_key', (), '', False, 'from alembic import op\n'), ((54, 4, 54, 40), 'alembic.op.drop_column', 'op.drop_column', ({(54, 19, 54, 24): '"""job"""', (54, 26, 54, 39): '"""from_cached"""'}, {}), "('job', 'from_cached')", False, 'from alembic import op\n'), ((61, 4, 61, 87), 'alembic.op.drop_constraint', 'op.drop_constraint', (), '', False, 'from alembic import op\n'), ((62, 4, 62, 47), 'alembic.op.drop_column', 'op.drop_column', ({(62, 19, 62, 24): '"""job"""', (62, 26, 62, 46): '"""file_generation_id"""'}, {}), "('job', 'file_generation_id')", False, 'from alembic import op\n'), ((69, 4, 69, 36), 'alembic.op.drop_table', 'op.drop_table', ({(69, 18, 69, 35): '"""file_generation"""'}, {}), "('file_generation')", False, 'from alembic import op\n'), ((70, 4, 72, 8), 'alembic.op.execute', 'op.execute', ({(70, 15, 72, 7): '"""\n DROP TYPE generation_agency_types\n """'}, {}), '("""\n DROP TYPE generation_agency_types\n """)', False, 'from alembic import op\n'), ((73, 4, 75, 8), 'alembic.op.execute', 'op.execute', ({(73, 15, 75, 7): '"""\n DROP TYPE generation_file_types\n """'}, {}), '("""\n DROP TYPE generation_file_types\n """)', False, 'from alembic import op\n'), ((44, 4, 44, 49), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', ({(44, 28, 44, 48): '"""file_generation_id"""'}, {}), "('file_generation_id')", True, 'import sqlalchemy as sa\n'), ((46, 20, 46, 58), 'alembic.op.f', 'op.f', ({(46, 25, 46, 57): '"""ix_file_generation_agency_code"""'}, {}), "('ix_file_generation_agency_code')", False, 'from alembic import op\n'), ((47, 20, 47, 58), 'alembic.op.f', 'op.f', ({(47, 25, 47, 57): '"""ix_file_generation_agency_type"""'}, {}), "('ix_file_generation_agency_type')", False, 'from alembic import op\n'), ((48, 20, 48, 55), 'alembic.op.f', 'op.f', ({(48, 25, 48, 54): '"""ix_file_generation_end_date"""'}, {}), "('ix_file_generation_end_date')", False, 'from alembic import op\n'), ((49, 20, 49, 56), 'alembic.op.f', 'op.f', ({(49, 25, 49, 55): '"""ix_file_generation_file_type"""'}, {}), "('ix_file_generation_file_type')", False, 'from alembic import op\n'), ((50, 20, 50, 59), 'alembic.op.f', 'op.f', ({(50, 25, 50, 58): '"""ix_file_generation_request_date"""'}, {}), "('ix_file_generation_request_date')", False, 'from alembic import op\n'), ((51, 20, 51, 57), 'alembic.op.f', 'op.f', ({(51, 25, 51, 56): '"""ix_file_generation_start_date"""'}, {}), "('ix_file_generation_start_date')", False, 'from alembic import op\n'), ((63, 18, 63, 55), 'alembic.op.f', 'op.f', ({(63, 23, 63, 54): '"""ix_file_generation_start_date"""'}, {}), "('ix_file_generation_start_date')", False, 'from alembic import op\n'), ((64, 18, 64, 57), 'alembic.op.f', 'op.f', ({(64, 23, 64, 56): '"""ix_file_generation_request_date"""'}, {}), "('ix_file_generation_request_date')", False, 'from alembic import op\n'), ((65, 18, 65, 54), 'alembic.op.f', 'op.f', ({(65, 23, 65, 53): '"""ix_file_generation_file_type"""'}, {}), "('ix_file_generation_file_type')", False, 'from alembic import op\n'), ((66, 18, 66, 53), 'alembic.op.f', 'op.f', ({(66, 23, 66, 52): '"""ix_file_generation_end_date"""'}, {}), "('ix_file_generation_end_date')", False, 'from alembic import op\n'), ((67, 18, 67, 56), 'alembic.op.f', 'op.f', ({(67, 23, 67, 55): '"""ix_file_generation_agency_type"""'}, {}), "('ix_file_generation_agency_type')", False, 'from alembic import op\n'), ((68, 18, 68, 56), 'alembic.op.f', 'op.f', ({(68, 23, 68, 55): '"""ix_file_generation_agency_code"""'}, {}), "('ix_file_generation_agency_code')", False, 'from alembic import op\n'), ((33, 28, 33, 41), 'sqlalchemy.DateTime', 'sa.DateTime', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((34, 28, 34, 41), 'sqlalchemy.DateTime', 'sa.DateTime', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((35, 36, 35, 48), 'sqlalchemy.Integer', 'sa.Integer', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((36, 30, 36, 39), 'sqlalchemy.Date', 'sa.Date', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((37, 28, 37, 37), 'sqlalchemy.Date', 'sa.Date', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((38, 26, 38, 35), 'sqlalchemy.Date', 'sa.Date', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((39, 29, 39, 38), 'sqlalchemy.Text', 'sa.Text', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((40, 29, 40, 91), 'sqlalchemy.Enum', 'sa.Enum', (), '', True, 'import sqlalchemy as sa\n'), ((41, 27, 41, 76), 'sqlalchemy.Enum', 'sa.Enum', (), '', True, 'import sqlalchemy as sa\n'), ((42, 27, 42, 36), 'sqlalchemy.Text', 'sa.Text', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((43, 32, 43, 44), 'sqlalchemy.Boolean', 'sa.Boolean', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((52, 57, 52, 69), 'sqlalchemy.Integer', 'sa.Integer', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((60, 50, 60, 62), 'sqlalchemy.BOOLEAN', 'sa.BOOLEAN', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((60, 79, 60, 95), 'sqlalchemy.text', 'sa.text', ({(60, 87, 60, 94): '"""false"""'}, {}), "('false')", True, 'import sqlalchemy as sa\n')] |
Birfy/Endlinking | cluster.py | cc87a5528498e1733111d302437aeb1142b0a47f | import numpy as np
import random
import sys
chainlength = int(sys.argv[1])
dfname = sys.argv[2]
outfl = 'result.data'
cluster_size = int(sys.argv[3])
def readsize(dfname):
with open(dfname, 'r') as df:
lines = df.readlines()
for line in lines:
content = line.split()
if content and content[-1] == 'xhi':
return 2*float(content[1])
def readdata(dfname, chainlen):
X=[]
Xi=[]
with open(dfname, 'r') as df:
lines = df.readlines()
for line in lines:
content = line.split()
if len(content) == 9:
# print(content)
if (int(content[0]) % chainlen == 0 or int(content[0]) % chainlen == 1) and int(content[2]) != 3 and int(content[2]) != 4 :
X.append([float(content[i]) for i in range(3,6)])
Xi.append(int(content[0]))
return np.array(X), np.array(Xi)
def initmeans(n):
M=[]
for i in range(n):
M.append([size*(random.random()-0.5),size*(random.random()-0.5),size*(random.random()-0.5)])
return np.array(M)
def SetDistMat(X, means):
distmat_dtype = [('key',int), ('dist',float)]
distmat = np.empty((n,k),dtype=distmat_dtype)
for i in range(n):
distmat[i,:] = [(c[0], GetDist(X[i], c[1])) for c in enumerate(means)]
distmat[i,:] = np.sort(distmat[i,:], order='dist')
return distmat
def GetDist(x, c):
dist = np.linalg.norm(x-c-boxl*np.around((x-c)/boxl))
return dist
def Get_plst(assigned, distmat, full):
plst = []
for i in range(n):
if (i not in assigned):
j = 0
while j<k:
if (not full[distmat[i,j][0]]):
bestkey = distmat[i,j][0]
mindist = distmat[i,j][1]
break
else:
j += 1
for j in range(k-1,-1,-1):
if (not full[distmat[i,j][0]]):
maxdist = distmat[i,j][1]
break
plst.append((i, bestkey, maxdist-mindist))
plst.sort(key=lambda t:t[2])
return plst
def InitialAssignment(distmat):
clusters = {}
full = np.zeros(k,dtype=bool) # a boolean array that records which clusters are full
assigned = [] # a list of objects who has been assigned to a cluster
plst = Get_plst(assigned, distmat, full)
while (len(plst)):
temp = plst.pop()
try:
if (len(clusters[temp[1]])<cluster_size):
clusters[temp[1]].append(temp[0])
assigned.append(temp[0])
else:
full[temp[1]] = True
plst = Get_plst(assigned, distmat, full)
except KeyError:
clusters[temp[1]] = [temp[0]]
assigned.append(temp[0])
return clusters
def CalcMeans(X, oldmeans, clusters):
means = np.zeros((k,3))
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
means[key] += X[i]-boxl*np.around((X[i]-oldmeans[key])/boxl)
means[key] /= len(clusters[key])
means[key] -= boxl*np.around(means[key]/boxl)
return means
def SortObj(X, clusters, means, distmat):
objlst = [] # list of objects ordered in asceding delta of the current
# assignment and the best possible alternate assignment
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
currdist = GetDist(X[i],means[key])
mindist = distmat[i,0][1]
objlst.append((i, key, currdist-mindist))
objlst.sort(key=lambda t:t[2], reverse=True)
return objlst
def Transfer(obj, clufrom, cluto, clusters):
clusters[clufrom].remove(obj)
clusters[cluto].append(obj)
return clusters
def WriteResult(file, X, means, clusters):
with open(file, 'w') as fl:
# keys = sorted(clusters.keys())
# i = 1
# for key in keys:
# for obj in clusters[key]:
# fl.write("%d\t%d\t%f\t%f\t%f\t%d\n"\
# %(obj,Xi[obj], X[obj][0], X[obj][1], X[obj][2], key))
# i = i + 1
for c in enumerate(means):
fl.write("%d\t%f\t%f\t%f"%(c[0], c[1][0], c[1][1], c[1][2]))
for obj in clusters[c[0]]:
fl.write("\t%d"%(Xi[obj]))
fl.write('\n')
# i = i + 1
return
# This function will perform statistical analysis to the clustering results
def ClusterStat(X, means, clusters):
# Average distance between means
means_avg = 0.
for i in range(k-1):
for j in range(i+1,k):
means_avg += GetDist(means[i], means[j])
means_avg /= (k*(k-1)/2.)
# Average distance between obj and mean in a cluster
obj2mean_avg = np.zeros(k)
# Variance of the distances between obj and mean in a cluster
obj2mean_var = np.zeros(k)
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
obj2mean = GetDist(X[i], means[key])
obj2mean_avg[key] += obj2mean
obj2mean_var[key] += obj2mean*obj2mean
obj2mean_avg[key] /= len(clusters[key])
obj2mean_var[key] /= len(clusters[key])
obj2mean_var[key] = np.sqrt(obj2mean_var[key])
# Average within cluster distances between objects
winclu_avg = np.zeros(k)
# Average of within cluster distances of all clusters
winclu_grandavg = 0.
for key in keys:
for i in clusters[key]:
x = X[i]
for j in clusters[key]:
if j>i:
winclu_avg[key] += GetDist(x, X[j])
s = len(clusters[key])
winclu_avg[key] /= (s*(s-1)/2)
winclu_grandavg += winclu_avg[key]
winclu_grandavg /= k
# write the summary
print("average distance among means: %f"%means_avg)
#print("average distance from objects to the mean of a cluster:")
#for i in range(k):
# print("cluster %i: %f"%(i, obj2mean_avg[i]))
#print("variance of distances from objects to the mean of a cluster:")
#for i in range(k):
# print("cluster %i: %f"%(i, obj2mean_var[i]))
#print("within-cluster average distances:")
#for i in range(k):
# print("cluster %i: %f"%(i, winclu_avg[i]))
print("grand average of within-cluster average distances: %f"%winclu_grandavg)
return
X, Xi = readdata(dfname, chainlength)
size = readsize(dfname)
boxl = np.array([size, size, size])
n = len(X)
k = int(len(X)/cluster_size)
# Set up the database of objects
# X = readdata(dfname, chainlength)
# Choose initial means with K-means
means = initmeans(k)
# Set up initial clusters
distmat = SetDistMat(X, means)
clusters = InitialAssignment(distmat)
## debug code
#keys = sorted(clusters.keys())
#for key in keys:
# print("cluster %i:"%key)
# print(clusters[key])
## end of debug
# Iteration step
for iter in range(100):
active = 0 # indicate the number of transfers in the current iteration
tranlst = (-1)*np.ones(k, dtype='int') # set up transfer list for each cluster
# Compute the cluster means
oldmeans = means.copy()
means = CalcMeans(X, oldmeans, clusters)
# Get statistics about the clustering
#ClusterStat(X, means, clusters)
## debug code
#print("old means:")
#print(oldmeans)
#print("new means:")
#print(means)
## end of debug
# For each object, compute the distances to the cluster means
distmat = SetDistMat(X, means)
# Sort objects based on the delta of the current assignment and the best
# possible alternate assignment
objlst = SortObj(X, clusters, means, distmat)
##debug code
#print(objlst)
##return
#end of debug
# For each element by prioty:
while (len(objlst)):
(i, key, temp) = objlst.pop()
obj2key = GetDist(X[i], means[key])
transferred = False #record if any transfering has occured to i
if (key == distmat[i,0][0]):
##debug
#print("%i is already the opt cluster for obj %i. no transfer"%(clu, i))
##end of debug
continue
# For each other clusters by element gain:
else:
for j in range(k):
clu = distmat[i,j][0] # the key of another cluster
objgain = obj2key - distmat[i,j][1] # gain by transfering i from cluster key to clu
if (clu==key): # already in the cluster
continue
if (len(clusters[clu]) < cluster_size):
active += 1
transferred = True
clusters = Transfer(i, key, clu, clusters)
##debug
#print("cluster %i not full. transfer obj %i from cluster %i to it."%(clu, i, key))
##end of debug
break
elif (tranlst[clu] != -1): # if the tranlst of another cluster is not empty
# distance between the obj in the tranlst and the current cluster
tran2key = GetDist(X[tranlst[clu]], means[key])
tran2clu = GetDist(X[tranlst[clu]], means[clu])
# gain by transfering the obj in tranlst from cluster clu to key
trangain = tran2clu - tran2key
if (objgain + trangain > 0): # transfer if the sum of gains are positive, ie net gain
active += 2
transferred = True
clusters = Transfer(i, key, clu, clusters)
clusters = Transfer(tranlst[clu], clu, key, clusters)
##debug
#print("obj %i is transfered from cluster %i to %i"%(i, key, clu))
#print("obj %i is transfered from cluster %i to %i"%(tranlst[clu], clu, key))
#print("objgain: %f, trangain: %f"%(objgain, trangain))
##end of debug
tranlst[clu] = -1 # reset the tranlst to empty
break
if (not transferred):
tranlst[key] = i
##debug
#print("add obj %i in cluster %i to the transfer list"%(i, key))
##end of debug
# nothing is transferred during this iteration, return the clustering result
if (not active):
break
#debug code
print("number of transfers in iter %i: %i\n"%(iter+1, active))
#end of debug
print("K-means clustering converged in %d iterations!\n"%(iter+1))
# Output the clustering results
WriteResult(outfl, X, means, clusters)
ClusterStat(X, means, clusters)
# print(X)
| [((186, 7, 186, 35), 'numpy.array', 'np.array', ({(186, 16, 186, 34): '[size, size, size]'}, {}), '([size, size, size])', True, 'import numpy as np\n'), ((36, 11, 36, 22), 'numpy.array', 'np.array', ({(36, 20, 36, 21): 'M'}, {}), '(M)', True, 'import numpy as np\n'), ((40, 14, 40, 49), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((72, 11, 72, 33), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((90, 12, 90, 27), 'numpy.zeros', 'np.zeros', ({(90, 21, 90, 26): '(k, 3)'}, {}), '((k, 3))', True, 'import numpy as np\n'), ((142, 19, 142, 30), 'numpy.zeros', 'np.zeros', ({(142, 28, 142, 29): 'k'}, {}), '(k)', True, 'import numpy as np\n'), ((144, 19, 144, 30), 'numpy.zeros', 'np.zeros', ({(144, 28, 144, 29): 'k'}, {}), '(k)', True, 'import numpy as np\n'), ((155, 17, 155, 28), 'numpy.zeros', 'np.zeros', ({(155, 26, 155, 27): 'k'}, {}), '(k)', True, 'import numpy as np\n'), ((30, 11, 30, 22), 'numpy.array', 'np.array', ({(30, 20, 30, 21): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((30, 24, 30, 36), 'numpy.array', 'np.array', ({(30, 33, 30, 35): 'Xi'}, {}), '(Xi)', True, 'import numpy as np\n'), ((43, 23, 43, 58), 'numpy.sort', 'np.sort', (), '', True, 'import numpy as np\n'), ((153, 28, 153, 54), 'numpy.sqrt', 'np.sqrt', ({(153, 36, 153, 53): 'obj2mean_var[key]'}, {}), '(obj2mean_var[key])', True, 'import numpy as np\n'), ((208, 19, 208, 42), 'numpy.ones', 'np.ones', (), '', True, 'import numpy as np\n'), ((96, 27, 96, 53), 'numpy.around', 'np.around', ({(96, 37, 96, 52): '(means[key] / boxl)'}, {}), '(means[key] / boxl)', True, 'import numpy as np\n'), ((47, 35, 47, 56), 'numpy.around', 'np.around', ({(47, 45, 47, 55): '(x - c) / boxl'}, {}), '((x - c) / boxl)', True, 'import numpy as np\n'), ((94, 36, 94, 72), 'numpy.around', 'np.around', ({(94, 46, 94, 71): '((X[i] - oldmeans[key]) / boxl)'}, {}), '((X[i] - oldmeans[key]) / boxl)', True, 'import numpy as np\n'), ((35, 24, 35, 39), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n'), ((35, 51, 35, 66), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n'), ((35, 78, 35, 93), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n')] |
isunchy/cuboid_abstraction | util/hierarchical_primitive/cube_inclusion.py | afda6ca8516c2f5e5e7292b3b22a059a4f6c84ec | import numpy as np
import quaternion
sample_points = np.array([[-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0],
[-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.8,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.6,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.4,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0],
[-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0,-1.0,-0.8,-0.6,-0.4,-0.2,0.0,0.2,0.4,0.6,0.8,1.0]
], dtype=np.float32) # [3, n]
sample_points = np.transpose(sample_points) # [n, 3]
def cube_inclusion(cube_param_1, cube_param_2):
n_cube_1 = cube_param_1['z'].shape[0] # child
n_cube_2 = cube_param_2['z'].shape[0] # parent
assert(n_cube_1 > n_cube_2)
assert(cube_param_1['q'].shape[0] == cube_param_1['t'].shape[0] == n_cube_1)
assert(cube_param_2['q'].shape[0] == cube_param_2['t'].shape[0] == n_cube_2)
n_point = sample_points.shape[0]
cube_cube_distance = np.zeros([n_cube_1, n_cube_2])
for i in range(n_cube_1):
z1, q1, t1 = [cube_param_1[v][i] for v in ['z', 'q', 't']]
for j in range(n_cube_2):
z2, q2, t2 = [cube_param_2[v][j] for v in ['z', 'q', 't']]
points = sample_points * z1
rot1 = np.quaternion(q1[0], q1[1], q1[2], q1[3])
rot1 = quaternion.as_rotation_matrix(rot1)
points = np.transpose(np.matmul(rot1, np.transpose(points)))
points += t1
points -= t2
rot2 = np.quaternion(q2[0], q2[1], q2[2], q2[3]).conjugate()
rot2 = quaternion.as_rotation_matrix(rot2)
points = np.transpose(np.matmul(rot2, np.transpose(points)))
distance = np.mean(np.sum(np.maximum(abs(points) - z2, 0)**2, axis=1))
cube_cube_distance[i, j] = distance
index = np.argmin(cube_cube_distance, axis=1)
return index
def generate_sample_cube_points(resulution=11):
sample_points = np.zeros([resulution, resulution, resulution, 3], dtype=np.float32)
location_template = np.linspace(-1.0, 1.0, num=11)
for i in range(resulution):
for j in range(resulution):
for k in range(resulution):
sample_points[i, j, k, 0] = location_template[i]
sample_points[i, j, k, 1] = location_template[j]
sample_points[i, j, k, 2] = location_template[k]
np.savetxt('sample_points.txt', np.transpose(np.reshape(sample_points, [-1, 3])),
fmt='%1.1f', delimiter=',')
if __name__ == '__main__':
# generate_sample_cube_points()
z1 = np.array([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])
q1 = np.array([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]])
t1 = np.array([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.4, 0.4, 0.4]])
cube_param_1 = {'z': z1, 'q': q1, 't': t1}
z2 = np.array([[0.1, 0.1, 0.1], [0.2, 0.2, 0.2]])
q2 = np.array([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]])
t2 = np.array([[0.2, 0.2, 0.2], [0.3, 0.3, 0.3]])
cube_param_2 = {'z': z2, 'q': q2, 't': t2}
index = cube_inclusion(cube_param_1, cube_param_2)
print(index)
assert((index == np.array([0, 0, 1])).all())
| [((5, 16, 8, 46), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((9, 16, 9, 43), 'numpy.transpose', 'np.transpose', ({(9, 29, 9, 42): 'sample_points'}, {}), '(sample_points)', True, 'import numpy as np\n'), ((19, 23, 19, 53), 'numpy.zeros', 'np.zeros', ({(19, 32, 19, 52): '[n_cube_1, n_cube_2]'}, {}), '([n_cube_1, n_cube_2])', True, 'import numpy as np\n'), ((35, 10, 35, 47), 'numpy.argmin', 'np.argmin', (), '', True, 'import numpy as np\n'), ((40, 18, 40, 85), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((41, 22, 41, 52), 'numpy.linspace', 'np.linspace', (), '', True, 'import numpy as np\n'), ((54, 7, 54, 68), 'numpy.array', 'np.array', ({(54, 16, 54, 67): '[[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.1, 0.1, 0.1]]'}, {}), '([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])', True, 'import numpy as np\n'), ((55, 7, 55, 83), 'numpy.array', 'np.array', ({(55, 16, 55, 82): '[[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]'}, {}), '([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]])', True, 'import numpy as np\n'), ((56, 7, 56, 68), 'numpy.array', 'np.array', ({(56, 16, 56, 67): '[[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.4, 0.4, 0.4]]'}, {}), '([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1], [0.4, 0.4, 0.4]])', True, 'import numpy as np\n'), ((58, 7, 58, 51), 'numpy.array', 'np.array', ({(58, 16, 58, 50): '[[0.1, 0.1, 0.1], [0.2, 0.2, 0.2]]'}, {}), '([[0.1, 0.1, 0.1], [0.2, 0.2, 0.2]])', True, 'import numpy as np\n'), ((59, 7, 59, 61), 'numpy.array', 'np.array', ({(59, 16, 59, 60): '[[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]'}, {}), '([[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]])', True, 'import numpy as np\n'), ((60, 7, 60, 51), 'numpy.array', 'np.array', ({(60, 16, 60, 50): '[[0.2, 0.2, 0.2], [0.3, 0.3, 0.3]]'}, {}), '([[0.2, 0.2, 0.2], [0.3, 0.3, 0.3]])', True, 'import numpy as np\n'), ((25, 13, 25, 54), 'numpy.quaternion', 'np.quaternion', ({(25, 27, 25, 32): 'q1[0]', (25, 34, 25, 39): 'q1[1]', (25, 41, 25, 46): 'q1[2]', (25, 48, 25, 53): 'q1[3]'}, {}), '(q1[0], q1[1], q1[2], q1[3])', True, 'import numpy as np\n'), ((26, 13, 26, 48), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', ({(26, 43, 26, 47): 'rot1'}, {}), '(rot1)', False, 'import quaternion\n'), ((31, 13, 31, 48), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', ({(31, 43, 31, 47): 'rot2'}, {}), '(rot2)', False, 'import quaternion\n'), ((48, 47, 48, 81), 'numpy.reshape', 'np.reshape', ({(48, 58, 48, 71): 'sample_points', (48, 73, 48, 80): '[-1, 3]'}, {}), '(sample_points, [-1, 3])', True, 'import numpy as np\n'), ((64, 19, 64, 38), 'numpy.array', 'np.array', ({(64, 28, 64, 37): '[0, 0, 1]'}, {}), '([0, 0, 1])', True, 'import numpy as np\n'), ((27, 44, 27, 64), 'numpy.transpose', 'np.transpose', ({(27, 57, 27, 63): 'points'}, {}), '(points)', True, 'import numpy as np\n'), ((30, 13, 30, 54), 'numpy.quaternion', 'np.quaternion', ({(30, 27, 30, 32): 'q2[0]', (30, 34, 30, 39): 'q2[1]', (30, 41, 30, 46): 'q2[2]', (30, 48, 30, 53): 'q2[3]'}, {}), '(q2[0], q2[1], q2[2], q2[3])', True, 'import numpy as np\n'), ((32, 44, 32, 64), 'numpy.transpose', 'np.transpose', ({(32, 57, 32, 63): 'points'}, {}), '(points)', True, 'import numpy as np\n')] |
kayduemre/ituro | ituro/accounts/tests.py | eb5bb0655c2d85eed212d28c1d154006c57a4f03 | from django.test import TestCase
from django.utils import timezone
from accounts.models import CustomUser, CustomUserManager
class UserCreateTestCase(TestCase):
def test_create_user_correctly(self):
"Creating users correctly"
new_user = CustomUser.objects.create(
email="[email protected]",
name="Participant Name",
phone="09876543210",
school="Some University",
is_staff="False",
is_active="True",
date_joined=timezone.now())
self.assertTrue(isinstance(new_user, CustomUser))
self.assertEqual(new_user.get_full_name(), "Participant Name")
self.assertEqual(new_user.get_short_name(), "Participant Name")
| [((17, 24, 17, 38), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n')] |
oleksost/continuum | continuum/datasets/dtd.py | 682d66540bfbfa171ac73281ed2989f9338e88bf | import os
from typing import List
import numpy as np
from torchvision import datasets as torchdata
from continuum.datasets import ImageFolderDataset
from continuum import download
from continuum.tasks import TaskType
class DTD(ImageFolderDataset):
"""Describable Textures Dataset (DTD)
Reference:
* Describing Textures in the Wild
M. Cimpoi and S. Maji and I. Kokkinos and S. Mohamed and and A. Vedaldi
CVPR 2014
"""
url = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
def __init__(self, data_path: str, train: bool = True, download: bool = True, split: int = 1):
super().__init__(data_path=data_path, train=train, download=download, data_type=TaskType.IMAGE_PATH)
if not (1 <= int(split) <= 10):
raise ValueError(f"Available splits are [1, ..., 10], not {split}")
self.split = split
def _download(self):
archive_path = os.path.join(self.data_path, "dtd-r1.0.1.tar.gz")
if not os.path.exists(archive_path):
print("Downloading DTD dataset...")
download.download(self.url, self.data_path)
if not os.path.exists(os.path.join(self.data_path, "dtd")):
print("Uncompressing images...")
download.untar(archive_path)
def get_data(self):
x, y, t = self._format(torchdata.ImageFolder(os.path.join(self.data_path, "dtd", "images")).imgs)
if self.train:
index_files = [
os.path.join(self.data_path, "dtd", "labels", f"train{str(self.split)}.txt"),
os.path.join(self.data_path, "dtd", "labels", f"val{str(self.split)}.txt")
]
else:
index_files = [
os.path.join(self.data_path, "dtd", "labels", f"test{str(self.split)}.txt")
]
valid_paths = set()
for index_file in index_files:
with open(index_file) as f:
valid_paths.update(
map(lambda p: os.path.join(self.data_path, "dtd", "images", p.strip()),
f.readlines()
)
)
valid_paths = np.array(list(valid_paths))
indexes = np.isin(x, valid_paths)
return x[indexes], y[indexes], None
| [((30, 23, 30, 72), 'os.path.join', 'os.path.join', ({(30, 36, 30, 50): 'self.data_path', (30, 52, 30, 71): '"""dtd-r1.0.1.tar.gz"""'}, {}), "(self.data_path, 'dtd-r1.0.1.tar.gz')", False, 'import os\n'), ((60, 18, 60, 41), 'numpy.isin', 'np.isin', ({(60, 26, 60, 27): 'x', (60, 29, 60, 40): 'valid_paths'}, {}), '(x, valid_paths)', True, 'import numpy as np\n'), ((31, 15, 31, 43), 'os.path.exists', 'os.path.exists', ({(31, 30, 31, 42): 'archive_path'}, {}), '(archive_path)', False, 'import os\n'), ((33, 12, 33, 55), 'continuum.download.download', 'download.download', ({(33, 30, 33, 38): 'self.url', (33, 40, 33, 54): 'self.data_path'}, {}), '(self.url, self.data_path)', False, 'from continuum import download\n'), ((36, 12, 36, 40), 'continuum.download.untar', 'download.untar', ({(36, 27, 36, 39): 'archive_path'}, {}), '(archive_path)', False, 'from continuum import download\n'), ((34, 30, 34, 65), 'os.path.join', 'os.path.join', ({(34, 43, 34, 57): 'self.data_path', (34, 59, 34, 64): '"""dtd"""'}, {}), "(self.data_path, 'dtd')", False, 'import os\n'), ((39, 53, 39, 98), 'os.path.join', 'os.path.join', ({(39, 66, 39, 80): 'self.data_path', (39, 82, 39, 87): '"""dtd"""', (39, 89, 39, 97): '"""images"""'}, {}), "(self.data_path, 'dtd', 'images')", False, 'import os\n')] |
Doometnick/MaxiMin-2048 | src/tests/testdata.py | f1d795ec07fffe1aa239c105cf522d2c3bc9b011 | from board import Direction
# Tuples of input, action, expected output.
moving_tests = [
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.UP,
[[8,0,2,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0]]
),
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.DOWN,
[[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[8,0,2,0]]
),
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.LEFT,
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,2,0,0]]
),
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.RIGHT,
[[0,0,0,0],
[0,0,0,4],
[0,0,0,0],
[0,0,4,2]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.RIGHT,
[[0,0,8,8],
[0,0,16,4],
[0,0,32,32],
[16,8,2,4]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.LEFT,
[[8,8,0,0],
[16,4,0,0],
[32,32,0,0],
[16,8,2,4]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.UP,
[[4,4,4,8],
[8,16,8,16],
[32,8,2,4],
[16,0,0,0]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.DOWN,
[[4,0,0,0],
[8,4,4,8],
[32,16,8,16],
[16,8,2,4]]
)
] | [] |
HansBug/pji | test/utils/test_value.py | 449d171cea0c03f4c302da886988f36f70e34ee6 | import pytest
from pji.utils import ValueProxy
@pytest.mark.unittest
class TestUtilsValue:
def test_value_proxy_init(self):
value = ValueProxy()
assert value.value is None
value = ValueProxy(233)
assert value.value == 233
def test_value_proxy_set(self):
value = ValueProxy()
value.value = 233
assert value.value == 233
value.value = -27
assert value.value == -27
| [((9, 16, 9, 28), 'pji.utils.ValueProxy', 'ValueProxy', ({}, {}), '()', False, 'from pji.utils import ValueProxy\n'), ((12, 16, 12, 31), 'pji.utils.ValueProxy', 'ValueProxy', ({(12, 27, 12, 30): '233'}, {}), '(233)', False, 'from pji.utils import ValueProxy\n'), ((16, 16, 16, 28), 'pji.utils.ValueProxy', 'ValueProxy', ({}, {}), '()', False, 'from pji.utils import ValueProxy\n')] |
Ebenazer-2002/library-management | intro.py | 8c1ededc7167d2221a3947abfeec4773da39dca9 | #Intro Page
from tkinter import *
from PIL import Image, ImageTk
import cv2
#----------------------------Start Function--------------------------#
def start(event):
label1.destroy()
import log
win.destroy()
log.main()
#------------------------Main Window---------------------------------#li
def main_window():
global win
global label1
win = Tk()
win.title('Library Management System')
win.iconbitmap("images/main_icon.ico")
win.bind('<Key>', start) # start function on pressing any key
win.state('zoomed')
# opens video
cap = cv2.VideoCapture("images/vid.MP4")
global n
n = 0
#-----------------------------------------------------------------
# defining show function
def show():
global n # frame count
n = n+1
if n <= 30:
rest, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image).resize((1600, 850))
imgtk = ImageTk.PhotoImage(image=img)
label1.imgtk = imgtk
label1.configure(image=imgtk)
win.after(10, show)
else:
label1.destroy()
frm = Frame(win, bg='black')
frm.place(relx=0, rely=0, relwidth=1, relheight=1)
label = Label(frm, text='Press any Key to continue',
bg='black', fg='white')
label.place(relx=0.45, rely=0.5)
#-----------------------------------------------------------------
label1 = Label(win)
label1.place(relx=0, rely=0, relheight=1, relwidth=1)
show()
win.mainloop()
#-----------------------------------------------------------------
main_window()
| [((12, 4, 12, 14), 'log.main', 'log.main', ({}, {}), '()', False, 'import log\n'), ((26, 10, 26, 44), 'cv2.VideoCapture', 'cv2.VideoCapture', ({(26, 27, 26, 43): '"""images/vid.MP4"""'}, {}), "('images/vid.MP4')", False, 'import cv2\n'), ((37, 23, 37, 62), 'cv2.cvtColor', 'cv2.cvtColor', ({(37, 36, 37, 41): 'frame', (37, 43, 37, 61): 'cv2.COLOR_BGR2RGBA'}, {}), '(frame, cv2.COLOR_BGR2RGBA)', False, 'import cv2\n'), ((39, 20, 39, 49), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (), '', False, 'from PIL import Image, ImageTk\n'), ((38, 18, 38, 43), 'PIL.Image.fromarray', 'Image.fromarray', ({(38, 34, 38, 42): 'cv2image'}, {}), '(cv2image)', False, 'from PIL import Image, ImageTk\n')] |
hugh9876/04-multivariate-analysis | notebooks/week4_help.py | 0541962842df8844aa323c368f8a4e44999c2d7f | """
This module provides helper functions to support exercises during AM1
with outliers, robust regression and template regression in the CORE
data analytics workshop series, week 4.
"""
import numpy as np
import pandas as pd
import math
from collections import namedtuple
def recovery_sulphur_dataframe_with_outliers(outlier_probability):
"""Return dataframe representing recovery as a function of sulphur.
Parameters:
----------
outlier_probability:
This floating point parameter should range between 0 and 1
and is probability of an observation being an outlier.
Returns:
-------
Pandas dataframe:
A dataframe is returned with two series, the first being observed
recovery, and the second being sulphur %. The data may be sampled
from the true underlying relationship, plus gaussian noise, or
may be an outlier value taken from a non-gaussian distribution.
The proportion of outliers to non-outliers will depend on
the outlier_probability parameter.
"""
# Check that the outlier_probability is an ordinary number.
assert isinstance(outlier_probability, (float, int))
# As it's a probability, ensure that it ranges between 0 and 1.
assert outlier_probability >= 0.0
assert outlier_probability <= 1.0
# If no exceptions have been thrown then we likely have a valid input.
# Get 50 pairs of sulphur features and recovery labels
sulphur_percent = _draw_sulphur_observations(50)
recovery_percent = _observe_recovery(sulphur_percent,
outlier_probability)
return pd.DataFrame({'metal_recovery_percent': recovery_percent,
'feed_sulphur_percent': sulphur_percent})
def _initialise_randomstate(seed):
""" Use RandomState object with seed set."""
return np.random.RandomState(seed)
def _draw_sulphur_observations(count):
rs = _initialise_randomstate(7)
# draw "count" sulphur observations from a uniform distribution of
# sulphur percentages between 0.15% and 1.35%
sulphur_percent = rs.uniform(0.15, 1.35, count)
return sulphur_percent
def _draw_dilithium_observations(count):
rs = _initialise_randomstate(8)
return rs.uniform(25, 35, count)
def _draw_kryptonite_observations(count):
rs = _initialise_randomstate(9)
return rs.uniform(20, 25, count)
def _draw_unobtainium_observations(count):
rs = _initialise_randomstate(10)
return rs.uniform(0, 7, count)
def _draw_quartz_observations(count):
rs = _initialise_randomstate(11)
return rs.uniform(25, 35, count)
def _observe_recovery(sulphur_percent, outlier_probability):
"""Returns an array of metal recoveries.
This method returns an array of metal recoveries given both
an array of sulphur percentages and the probability of an
outlier being observed.
"""
recovery_percent = np.zeros_like(sulphur_percent)
is_outlier = _is_outlier(outlier_probability, len(sulphur_percent))
for index in range(0, len(recovery_percent)):
if is_outlier[index]:
recovery_percent [index]= _return_outlier_model_of_recovery(sulphur_percent[index])
else:
recovery_percent [index]=_noise_free_model_of_recovery(sulphur_percent[index])
return recovery_percent
def _noise_free_model_of_recovery(sulphur):
"""This method returns a metal recovery for a given sulphur %."""
return 74.81 - 6.81/sulphur
def _return_outlier_model_of_recovery(sulphur):
return (74.81 - 6.81/sulphur)/3
def _is_outlier(outlier_probability, how_many):
"""Return true/false numpy array
"""
rs = _initialise_randomstate(5)
uniformly_distributed = rs.uniform(0, 1, how_many)
is_outlier = np.zeros_like(uniformly_distributed)
for index in range(0, len(is_outlier)):
is_outlier[index]=uniformly_distributed[index]>(1-outlier_probability)
return is_outlier
def add_gaussian_noise(noise_free_input, mean, sigma):
"""Adds gaussian noise to vector, given mean and sigma
"""
bins = len(noise_free_input)
noise = np.random.normal(mean, sigma, bins)
return noise_free_input + noise
def gaussian_fwhm_pdf(X, height, x_position, fwhm):
"""Returns guassian probability distribution function, given FWHM
This computes a gaussian probability density function (pdf) given a
Full Width at Half Maximum (FWHM) instead of standard deviation, and
scales it by the height parameters. If the height is one, then the
area of the guassian will also be unity, as required for a pdf, and
for preserving area when used as an impulse response function in
convolution operations.
Note, this returns the function, it does not sample from the
distribution.
"""
return gaussian_pdf(X, height, x_position, fwhm / (2 * math.sqrt(2 * math.log(2))))
def gaussian_pdf(X, area, x_position, standard_deviation):
"""Returns gaussian probability distribution function multiplied by area.
This computes a gaussian with unit area and multiplies it
by the area parameter. It is translated to be centered
on x_position and has the width specified by standard_deviation.
Unit area gaussians are used as probability distributions functions,
and are also important in convolutions, as area of the convolution
of two functions is the product of their areas. If it is important
for the convolution to preserve area of a function when convolved
with a gaussian then that gaussian needs to have unit area. Preserving
area also implies conservation of energy in many physical models.
It can be shown that the integral of the gaussian function is unity
when the guassian's height is scaled as a function of standard_deviation
as:
height_scaling = 1/(standard_deviation*sqrt(2*pi))
So this function multiplies the height of the guassian by this factor and
then multiplies this result by the area parameter that is passed in.
If area parameter is 1, then the height of this gaussian with also
be 1 for all standard deviations, otherwise the area will be set by the
area parameter. The relationship between height and area, and the scaling
of height by the second parameter below, will be made clearer by
also studying the guassian function.
"""
return gaussian(X, area / (standard_deviation * math.sqrt(2 * math.pi)), x_position,
standard_deviation)
def gaussian(X, height, x_position, standard_deviation):
"""Return standard gaussian function
This is the unnormalised gaussian function
f(x)=height*exp(-(x-x_position)^2/(2*standard_deviation^2))
Parameters
----------
height:
This is the maximum of the gaussian peak.
This function does not normalise to constant area, the caller
must do this if this is what they want.
x_position:
This is the x position of the centre of the gaussian. If the
guassian is being used to apply the impulse response of an
instrument applied to an XRD reflection, then this will be the
two-theta position of the peak.
standard_deviation:
The standard deviation of the guassian curve.
If this function is being applied in spectroscopy, optics or
electrical engineering, it is common for gaussians to be
defined in terms of Full Width at Half Maximum (FWHM), which
is the width of the peak when the height drops to half
of the peak height, specified by the height parameter. If
the x-axis represents frequency, and the function height
is proportional to energy or power, then this will be the
gaussian's bandwidth, that is, the width between the -3db points.
To convert from FWHM to standard deviation use the relationship:
FWHM = 2*sqrt(2*log(2)) * standard_deviation
Returns
-------
double:
Evaluated gaussian function.
"""
return height * math.e**(-(X - x_position)**2 / 2 / standard_deviation**2)
class MultichannelXAxis:
"""Set up an X axis for isntrument
This object is set up with three inputs, min_x is the minimum value
on the axis. In the example I've chosen 5. The max_x
value is the highest value on the x axis, and spacing is
the x spacing between channels. In the example I've chosen
a max_x of 90 and spacing of 0.2. The unit is two-theta
degrees, and this unit (and the axis values) come from the
world of x-ray diffraction (XRD). We're describing the x-axis
of a low resolution XRD instrument.
The object's as_vector method can return the x_axis as an array
of numbers using numpy's linspace method, which we've already used
for plotting and other purposes.
"""
def __init__(self, min_x, max_x, spacing):
self._min = min_x
self._max = max_x
self._spacing = spacing
self._channel_count = \
round((self.max - self.min) / self.spacing + 1)
self._label = "r'$2\theta$ (degrees)"
@property
def min(self):
"""Return minimum two-theta for diffractogram x-axis."""
return self._min
@property
def max(self):
"""Return maximum two-theta for diffractogram x-axis."""
return self._max
@property
def spacing(self):
"""Return channel spacing in two-theta for diffractogram x-axis."""
return self._spacing
@property
def channel_count(self):
"""Return the count of channels in this diffractogram."""
return self._channel_count
@property
def label(self):
"""Return the x-axis label, for use with plot and report generation."""
return self._label
@property
def as_vector(self):
"""Return a numpy vector containing two-theta values for each channel."""
x_axis_vector = np.linspace(self.min, self.max, self.channel_count)
return x_axis_vector
def _apply_convolution_kernals(x_axis_vector, intensity, two_theta_angle,
instrument_broadening_fwhm,
reflection_broadening_fwhm):
"""Apply gaussian kernel for instrument broadening only."""
def _add_gaussian_fwhms(fwhm1, fwhm2):
sigma_fwhm_conversion_constant = 2*math.sqrt(2*math.log(2))
sigma_1 = fwhm1/sigma_fwhm_conversion_constant
sigma_2 = fwhm2/sigma_fwhm_conversion_constant
#squares of std_dev (ie sigma^2 which is variance) are additive
sigma_summed = math.sqrt(sigma_1*sigma_1 + sigma_2*sigma_2)
return sigma_summed*sigma_fwhm_conversion_constant
fwhm = _add_gaussian_fwhms (instrument_broadening_fwhm,
reflection_broadening_fwhm)
return gaussian_fwhm_pdf(x_axis_vector, intensity, two_theta_angle,
fwhm)
def create_templates_matrix():
"""Create templates for four test pure components.
This creates templates for quartz, dilithium, kryptonite and
unobtainium, in that order. The templates are returned
in an array where the first column is quartz, and the last is
unobtainium. If you plot them, you'll see gently varying
squiggly lines.
"""
# Create a templates matrix containing space for four templates, plus
# a column of ones.
x_axis = MultichannelXAxis(5, 90, 0.2)
template_count = 4
templates_matrix = np.zeros((x_axis.channel_count, template_count+1))
# set 4 two-theta units of instrument broadening
instrument_broadening = 4
# create a tuple for each reflection, and add it to a list. The loop
# then grabs each reflection from the list and then adds it to the
# template. The first value in the tuple is intensity, the second
# two-theta angle and the third is how much broadening to apply.
Reflection = namedtuple('Reflection', ('intensity', 'two_theta', 'broadening'))
quartz_reflections = []
quartz_reflections.append (Reflection(intensity=10.0, two_theta=25.0, broadening=3.0))
quartz_reflections.append (Reflection(13.0, 38.0, 6.0))
quartz_reflections.append (Reflection(10.0, 43.0, 2.0))
quartz_reflections.append (Reflection(25.0, 60, 2.0))
dilithium_reflections = []
dilithium_reflections.append (Reflection(25.0, 80, 1.0))
kryptonite_reflections = []
#kryptonite_reflections.append (Reflection(intensity=12.0, two_theta=25.0, broadening=9.0))
kryptonite_reflections.append (Reflection(17.0, 12.0, 1.0))
kryptonite_reflections.append (Reflection(19.0, 43.0, 12.0))
#kryptonite_reflections.append (Reflection(4.0, 70, 2.0))
#kryptonite_reflections.append (Reflection(32.0, 74, 2.0))
unobtainium_reflections = []
#unobtainium_reflections.append (Reflection(intensity=4.0, two_theta=25.0, broadening=12.0))
unobtainium_reflections.append (Reflection(5.0, 18.0, 6.0))
unobtainium_reflections.append (Reflection(1.0, 23.0, 1.0))
unobtainium_reflections.append (Reflection(5.0, 31.0, 2.0))
unobtainium_reflections.append (Reflection(3.0, 55.0, 6.0))
unobtainium_reflections.append (Reflection(7.0, 58.0, 1.0))
#unobtainium_reflections.append (Reflection(5.0, 80, 2.0))
phases=[]
# create four phases
phases.append(quartz_reflections)
phases.append(dilithium_reflections)
phases.append(kryptonite_reflections)
phases.append(unobtainium_reflections)
for phase_idx in range(0, template_count):
for a_reflection in phases[phase_idx]:
contribution_of_this_reflection = \
_apply_convolution_kernals(
x_axis.as_vector,
a_reflection.intensity,
a_reflection.two_theta,
instrument_broadening,
a_reflection.broadening)
templates_matrix[:, phase_idx] += \
contribution_of_this_reflection
# set the last column to be all ones
templates_matrix[:, template_count] = \
np.ones(x_axis.channel_count)
return templates_matrix
def create_composition_dataframe(observations_count):
"""Create a dataframe of observations of drilling samples
Returns:
Pandas DataFrame with observations_count observations.
The dataframe has four columns representing the amount
of quartz, dilithium, kryptonite and unobtainium present.
These values are drawn from uniform distributions."""
unobtainium = _draw_unobtainium_observations (observations_count)
dilithium = _draw_dilithium_observations(observations_count)
kryptonite = _draw_kryptonite_observations(observations_count)
quartz = _draw_quartz_observations(observations_count)
# Create clusters by imposing a relationship between quartz
# and dilithium.
for observation_idx in range(0, observations_count):
if quartz[observation_idx] > 30:
dilithium[observation_idx] = 5
if dilithium[observation_idx] > 30:
quartz[observation_idx] = 5
return pd.DataFrame({'Quartz': quartz,
'Dilithium': dilithium,
'Kryptonite': kryptonite,
'Unobtainium': unobtainium})
def create_observations(compositions_dataframe, templates):
"""Create a new array containing synthetic observations"""
observations_count = len(compositions_dataframe)
channels_count = len(templates[:,0])
observations_matrix = np.zeros((channels_count, observations_count))
for observation_idx in range (0, observations_count):
observations_matrix[:, observation_idx] = \
templates[:,0]*compositions_dataframe['Quartz'][observation_idx] + \
templates[:,1]*compositions_dataframe['Dilithium'][observation_idx] + \
templates[:,2]*compositions_dataframe['Kryptonite'][observation_idx] + \
templates[:,3]*compositions_dataframe['Unobtainium'][observation_idx]
# add gaussian noise. If you have time, try increasing this and watch
# prediction performance fall over.
observations_matrix[:, observation_idx] = \
add_gaussian_noise(observations_matrix[:, observation_idx], 10, 3)
return observations_matrix
| [((43, 11, 44, 49), 'pandas.DataFrame', 'pd.DataFrame', ({(43, 24, 44, 48): "{'metal_recovery_percent': recovery_percent, 'feed_sulphur_percent':\n sulphur_percent}"}, {}), "({'metal_recovery_percent': recovery_percent,\n 'feed_sulphur_percent': sulphur_percent})", True, 'import pandas as pd\n'), ((48, 11, 48, 38), 'numpy.random.RandomState', 'np.random.RandomState', ({(48, 33, 48, 37): 'seed'}, {}), '(seed)', True, 'import numpy as np\n'), ((80, 23, 80, 53), 'numpy.zeros_like', 'np.zeros_like', ({(80, 37, 80, 52): 'sulphur_percent'}, {}), '(sulphur_percent)', True, 'import numpy as np\n'), ((101, 17, 101, 53), 'numpy.zeros_like', 'np.zeros_like', ({(101, 31, 101, 52): 'uniformly_distributed'}, {}), '(uniformly_distributed)', True, 'import numpy as np\n'), ((110, 12, 110, 47), 'numpy.random.normal', 'np.random.normal', ({(110, 29, 110, 33): 'mean', (110, 35, 110, 40): 'sigma', (110, 42, 110, 46): 'bins'}, {}), '(mean, sigma, bins)', True, 'import numpy as np\n'), ((298, 23, 298, 73), 'numpy.zeros', 'np.zeros', ({(298, 32, 298, 72): '(x_axis.channel_count, template_count + 1)'}, {}), '((x_axis.channel_count, template_count + 1))', True, 'import numpy as np\n'), ((306, 17, 306, 83), 'collections.namedtuple', 'namedtuple', ({(306, 28, 306, 40): '"""Reflection"""', (306, 42, 306, 82): "('intensity', 'two_theta', 'broadening')"}, {}), "('Reflection', ('intensity', 'two_theta', 'broadening'))", False, 'from collections import namedtuple\n'), ((353, 8, 353, 37), 'numpy.ones', 'np.ones', ({(353, 16, 353, 36): 'x_axis.channel_count'}, {}), '(x_axis.channel_count)', True, 'import numpy as np\n'), ((381, 11, 384, 52), 'pandas.DataFrame', 'pd.DataFrame', ({(381, 24, 384, 51): "{'Quartz': quartz, 'Dilithium': dilithium, 'Kryptonite': kryptonite,\n 'Unobtainium': unobtainium}"}, {}), "({'Quartz': quartz, 'Dilithium': dilithium, 'Kryptonite':\n kryptonite, 'Unobtainium': unobtainium})", True, 'import pandas as pd\n'), ((391, 26, 391, 72), 'numpy.zeros', 'np.zeros', ({(391, 35, 391, 71): '(channels_count, observations_count)'}, {}), '((channels_count, observations_count))', True, 'import numpy as np\n'), ((264, 24, 264, 75), 'numpy.linspace', 'np.linspace', ({(264, 36, 264, 44): 'self.min', (264, 46, 264, 54): 'self.max', (264, 56, 264, 74): 'self.channel_count'}, {}), '(self.min, self.max, self.channel_count)', True, 'import numpy as np\n'), ((277, 23, 277, 67), 'math.sqrt', 'math.sqrt', ({(277, 33, 277, 66): 'sigma_1 * sigma_1 + sigma_2 * sigma_2'}, {}), '(sigma_1 * sigma_1 + sigma_2 * sigma_2)', False, 'import math\n'), ((161, 52, 161, 74), 'math.sqrt', 'math.sqrt', ({(161, 62, 161, 73): '(2 * math.pi)'}, {}), '(2 * math.pi)', False, 'import math\n'), ((273, 55, 273, 66), 'math.log', 'math.log', ({(273, 64, 273, 65): '(2)'}, {}), '(2)', False, 'import math\n'), ((128, 73, 128, 84), 'math.log', 'math.log', ({(128, 82, 128, 83): '(2)'}, {}), '(2)', False, 'import math\n')] |
0xOmarA/RadixLib | tests/actions/test_mutable_token_action.py | 85d75a47d4c4df4c1a319b74857ae2c513933623 | from radixlib.actions import CreateTokenDefinition
from typing import Dict, Any
import unittest
class TestMutableTokenAction(unittest.TestCase):
""" Unit tests for the CreateTokenDefinition action of mutable tokens """
ActionDict: Dict[str, Any] = {
"token_properties": {
"name": "MutableTest",
"description": "An amazing new token with great utility!",
"icon_url": "https://www.google.com/",
"url": "https://www.google.com/",
"symbol": "mutable",
"is_supply_mutable": True,
"granularity": "1",
"owner": {
"address": "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
}
},
"token_supply": {
"value": "0",
"token_identifier": {
"rri": "mutable_tr1q06dd0ut3qmyp4pqkvmeu2dvkwg5f7vm8yeslwvpkt9qcl5vqu"
}
},
"type": "CreateTokenDefinition"
}
def test_from_dict(self):
""" Tests the derivation of the mainnet wallet addresses from the public key """
# The action loaded from the dictionary
creation: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict)
# Asserting that the CreateTokenDefinition object understood the content of the dictionary
self.assertEqual(creation.name, self.ActionDict['token_properties']['name'])
self.assertEqual(creation.description, self.ActionDict['token_properties']['description'])
self.assertEqual(creation.icon_url, self.ActionDict['token_properties']['icon_url'])
self.assertEqual(creation.url, self.ActionDict['token_properties']['url'])
self.assertEqual(creation.symbol, self.ActionDict['token_properties']['symbol'])
self.assertEqual(creation.is_supply_mutable, self.ActionDict['token_properties']['is_supply_mutable'])
self.assertEqual(creation.granularity, int(self.ActionDict['token_properties']['granularity']))
self.assertEqual(creation.owner.address, self.ActionDict['token_properties']['owner']['address'])
self.assertEqual(creation.token_supply, int(self.ActionDict['token_supply']['value']))
self.assertEqual(creation.token_rri, self.ActionDict['token_supply']['token_identifier']['rri'])
self.assertEqual(creation.to_account, None)
def test_to_dict(self):
""" Tests the conversion of the token account to a dictionary """
# The account loaded from the dictionary
account: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict)
self.assertEqual(account.to_dict(), self.ActionDict) | [((34, 42, 34, 90), 'radixlib.actions.CreateTokenDefinition.from_dict', 'CreateTokenDefinition.from_dict', ({(34, 74, 34, 89): 'self.ActionDict'}, {}), '(self.ActionDict)', False, 'from radixlib.actions import CreateTokenDefinition\n'), ((54, 41, 54, 89), 'radixlib.actions.CreateTokenDefinition.from_dict', 'CreateTokenDefinition.from_dict', ({(54, 73, 54, 88): 'self.ActionDict'}, {}), '(self.ActionDict)', False, 'from radixlib.actions import CreateTokenDefinition\n')] |
Trimatix/carica | src/tests/testModules/loadCfg_typeCasting/allowsCastFailKeeping/primativeTypes.py | 074be16bdf50541eb3ba92ca42d0ad901cc51bd0 | floatVar = 1.0
listVar = [3, "hello"]
dictVar = {
"myField": "value"
}
aotVar = [dictVar, dictVar]
intVar = 1 | [] |
arosen93/HT-ASE | quacc/recipes/xtb/__init__.py | a76542e7a2bc5bf6e7382d8f1387374eb2abc713 | """Recipes for xTB"""
| [] |
StephanErb/pants | src/python/pants/backend/native/subsystems/xcode_cli_tools.py | a368267b6b4cf50138ba567f582409ed31bf5db9 | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.native.config.environment import Assembler, CCompiler, CppCompiler, Linker
from pants.engine.rules import rule
from pants.engine.selectors import Select
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import is_readable_dir
from pants.util.memo import memoized_method, memoized_property
MIN_OSX_SUPPORTED_VERSION = '10.11'
MIN_OSX_VERSION_ARG = '-mmacosx-version-min={}'.format(MIN_OSX_SUPPORTED_VERSION)
class XCodeCLITools(Subsystem):
"""Subsystem to detect and provide the XCode command line developer tools.
This subsystem exists to give a useful error message if the tools aren't
installed, and because the install location may not be on the PATH when Pants
is invoked.
"""
options_scope = 'xcode-cli-tools'
_REQUIRED_FILES = {
'bin': [
'as',
'cc',
'c++',
'clang',
'clang++',
'ld',
'lipo',
],
# Any of the entries that would be here are not directly below the 'include' or 'lib' dirs, and
# we haven't yet encountered an invalid XCode/CLI tools installation which has the include dirs,
# but incorrect files. These would need to be updated if such an issue arises.
'include': [],
'lib': [],
}
INSTALL_PREFIXES_DEFAULT = [
# Prefer files from this installation directory, if available. This doesn't appear to be
# populated with e.g. header files on travis.
'/usr',
# Populated by the XCode CLI tools.
'/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr',
# Populated by the XCode app. These are derived from using the -v or -H switches invoking the
# osx clang compiler.
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/9.1.0',
'/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr',
]
class XCodeToolsUnavailable(Exception):
"""Thrown if the XCode CLI tools could not be located."""
class XCodeToolsInvalid(Exception):
"""Thrown if a method within this subsystem requests a nonexistent tool."""
@classmethod
def register_options(cls, register):
super(XCodeCLITools, cls).register_options(register)
register('--install-prefixes', type=list, default=cls.INSTALL_PREFIXES_DEFAULT,
fingerprint=True, advanced=True,
help='Locations to search for resources from the XCode CLI tools, including a '
'compiler, linker, header files, and some libraries. '
'Under this directory should be some selection of these subdirectories: {}.'
.format(cls._REQUIRED_FILES.keys()))
@memoized_property
def _all_existing_install_prefixes(self):
return [pfx for pfx in self.get_options().install_prefixes if is_readable_dir(pfx)]
# NB: We use @memoized_method in this file for methods which may raise.
@memoized_method
def _get_existing_subdirs(self, subdir_name):
maybe_subdirs = [os.path.join(pfx, subdir_name) for pfx in self._all_existing_install_prefixes]
existing_dirs = [existing_dir for existing_dir in maybe_subdirs if is_readable_dir(existing_dir)]
required_files_for_dir = self._REQUIRED_FILES.get(subdir_name)
if required_files_for_dir:
for fname in required_files_for_dir:
found = False
for subdir in existing_dirs:
full_path = os.path.join(subdir, fname)
if os.path.isfile(full_path):
found = True
continue
if not found:
raise self.XCodeToolsUnavailable(
"File '{fname}' in subdirectory '{subdir_name}' does not exist at any of the specified "
"prefixes. This file is required to build native code on this platform. You may need "
"to install the XCode command line developer tools from the Mac App Store.\n\n"
"If the XCode tools are installed and you are still seeing this message, please file "
"an issue at https://github.com/pantsbuild/pants/issues/new describing your "
"OSX environment and which file could not be found.\n"
"The existing install prefixes were: {pfxs}. These can be extended with "
"--{scope}-install-prefixes."
.format(fname=fname,
subdir_name=subdir_name,
pfxs=self._all_existing_install_prefixes,
scope=self.get_options_scope_equivalent_flag_component()))
return existing_dirs
@memoized_method
def path_entries(self):
return self._get_existing_subdirs('bin')
@memoized_method
def lib_dirs(self):
return self._get_existing_subdirs('lib')
@memoized_method
def include_dirs(self):
base_inc_dirs = self._get_existing_subdirs('include')
all_inc_dirs = base_inc_dirs
for d in base_inc_dirs:
# TODO: figure out what this directory does and why it's not already found by this compiler.
secure_inc_dir = os.path.join(d, 'secure')
if is_readable_dir(secure_inc_dir):
all_inc_dirs.append(secure_inc_dir)
return all_inc_dirs
@memoized_method
def assembler(self):
return Assembler(
path_entries=self.path_entries(),
exe_filename='as',
library_dirs=[])
@memoized_method
def linker(self):
return Linker(
path_entries=self.path_entries(),
exe_filename='ld',
library_dirs=[],
linking_library_dirs=[],
extra_args=[MIN_OSX_VERSION_ARG])
@memoized_method
def c_compiler(self):
return CCompiler(
path_entries=self.path_entries(),
exe_filename='clang',
library_dirs=self.lib_dirs(),
include_dirs=self.include_dirs(),
extra_args=[MIN_OSX_VERSION_ARG])
@memoized_method
def cpp_compiler(self):
return CppCompiler(
path_entries=self.path_entries(),
exe_filename='clang++',
library_dirs=self.lib_dirs(),
include_dirs=self.include_dirs(),
extra_args=[MIN_OSX_VERSION_ARG])
@rule(Assembler, [Select(XCodeCLITools)])
def get_assembler(xcode_cli_tools):
return xcode_cli_tools.assembler()
@rule(Linker, [Select(XCodeCLITools)])
def get_ld(xcode_cli_tools):
return xcode_cli_tools.linker()
@rule(CCompiler, [Select(XCodeCLITools)])
def get_clang(xcode_cli_tools):
return xcode_cli_tools.c_compiler()
@rule(CppCompiler, [Select(XCodeCLITools)])
def get_clang_plusplus(xcode_cli_tools):
return xcode_cli_tools.cpp_compiler()
def create_xcode_cli_tools_rules():
return [
get_assembler,
get_ld,
get_clang,
get_clang_plusplus,
]
| [((173, 18, 173, 39), 'pants.engine.selectors.Select', 'Select', ({(173, 25, 173, 38): 'XCodeCLITools'}, {}), '(XCodeCLITools)', False, 'from pants.engine.selectors import Select\n'), ((178, 15, 178, 36), 'pants.engine.selectors.Select', 'Select', ({(178, 22, 178, 35): 'XCodeCLITools'}, {}), '(XCodeCLITools)', False, 'from pants.engine.selectors import Select\n'), ((183, 18, 183, 39), 'pants.engine.selectors.Select', 'Select', ({(183, 25, 183, 38): 'XCodeCLITools'}, {}), '(XCodeCLITools)', False, 'from pants.engine.selectors import Select\n'), ((188, 20, 188, 41), 'pants.engine.selectors.Select', 'Select', ({(188, 27, 188, 40): 'XCodeCLITools'}, {}), '(XCodeCLITools)', False, 'from pants.engine.selectors import Select\n'), ((87, 21, 87, 51), 'os.path.join', 'os.path.join', ({(87, 34, 87, 37): 'pfx', (87, 39, 87, 50): 'subdir_name'}, {}), '(pfx, subdir_name)', False, 'import os\n'), ((132, 23, 132, 48), 'os.path.join', 'os.path.join', ({(132, 36, 132, 37): 'd', (132, 39, 132, 47): '"""secure"""'}, {}), "(d, 'secure')", False, 'import os\n'), ((133, 9, 133, 40), 'pants.util.dirutil.is_readable_dir', 'is_readable_dir', ({(133, 25, 133, 39): 'secure_inc_dir'}, {}), '(secure_inc_dir)', False, 'from pants.util.dirutil import is_readable_dir\n'), ((82, 66, 82, 86), 'pants.util.dirutil.is_readable_dir', 'is_readable_dir', ({(82, 82, 82, 85): 'pfx'}, {}), '(pfx)', False, 'from pants.util.dirutil import is_readable_dir\n'), ((88, 71, 88, 100), 'pants.util.dirutil.is_readable_dir', 'is_readable_dir', ({(88, 87, 88, 99): 'existing_dir'}, {}), '(existing_dir)', False, 'from pants.util.dirutil import is_readable_dir\n'), ((95, 22, 95, 49), 'os.path.join', 'os.path.join', ({(95, 35, 95, 41): 'subdir', (95, 43, 95, 48): 'fname'}, {}), '(subdir, fname)', False, 'import os\n'), ((96, 13, 96, 38), 'os.path.isfile', 'os.path.isfile', ({(96, 28, 96, 37): 'full_path'}, {}), '(full_path)', False, 'import os\n')] |
yzhaobom/improver | improver_tests/regrid/test_RegridWithLandSeaMask.py | 47f9e103c63f890bfbb24d5e08d9d01d041514f7 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the RegridWithLandSeaMask class"""
# set up a special data set and corresponding land-sea mask info
# set up target grid and its land-sea mask info
# it is designed to cover different scenarios for regridding with land-sea
# the regridding reference results are manually checked for different methods
# not using "set_up_variable_cube" because of different spacing at lat/lon
import numpy as np
from improver.regrid.bilinear import basic_indexes
from improver.regrid.grid import calculate_input_grid_spacing, latlon_from_cube
from improver.regrid.landsea import RegridLandSea
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
def modify_cube_coordinate_value(cube, coord_x, coord_y):
"""modify x(longitude) & y(latitude) andcoordinates for a cube"""
cube.coord(axis="x").points = coord_x
cube.coord(axis="x").bounds = None
cube.coord(axis="x").guess_bounds()
cube.coord(axis="y").points = coord_y
cube.coord(axis="y").bounds = None
cube.coord(axis="y").guess_bounds()
return cube
def define_source_target_grid_data():
""" define cube_in, cube_in_mask,cube_out_mask using assumed data """
# source (input) grid
in_lats = np.linspace(0, 15, 4)
in_lons = np.linspace(0, 40, 5)
# target (output) grid
out_lats = np.linspace(0, 14, 8)
out_lons = np.linspace(5, 35, 11)
# assume a set of nwp data
data = np.arange(20).reshape(4, 5).astype(np.float32)
# input grid mask info
in_mask = np.empty((4, 5), dtype=np.int)
in_mask[:, :] = 1
in_mask[0, 2] = 0
in_mask[2, 2:4] = 0
in_mask[3, 2:4] = 0
# output grid mask info
out_mask = np.empty((8, 11), dtype=np.int)
out_mask[:, :] = 1
out_mask[0, 4:7] = 0
out_mask[1, 5] = 0
out_mask[5:9, 4:10] = 0
out_mask[6, 6] = 1
out_mask[7, 6] = 1
out_mask[1, 0] = 0
# create cube with default spacing
cube_in = set_up_variable_cube(data, "air_temperature", "Celsius")
cube_in_mask = set_up_variable_cube(in_mask, "Land_Binary_Mask", "1")
cube_out_mask = set_up_variable_cube(out_mask, "Land_Binary_Mask", "1")
# modify cube coordinates to the designed value
cube_in = modify_cube_coordinate_value(cube_in, in_lons, in_lats)
cube_in_mask = modify_cube_coordinate_value(cube_in_mask, in_lons, in_lats)
cube_out_mask = modify_cube_coordinate_value(cube_out_mask, out_lons, out_lats)
return cube_in, cube_out_mask, cube_in_mask
def define_source_target_grid_data_same_domain():
""" define cube_in, cube_in_mask,cube_out_mask, assume the same domain """
# source (input) grid
in_lats = np.linspace(0, 15, 4)
in_lons = np.linspace(0, 40, 5)
# target (output) grid
out_lats = np.linspace(0, 15, 7)
out_lons = np.linspace(5, 40, 9)
# assume a set of nwp data
data = np.arange(20).reshape(4, 5).astype(np.float32)
# input grid mask info
in_mask = np.empty((4, 5), dtype=np.int)
in_mask[:, :] = 1
in_mask[0, 2] = 0
in_mask[2, 2:4] = 0
in_mask[3, 2:4] = 0
# output grid mask info
out_mask = np.empty((7, 9), dtype=np.int)
out_mask[:, :] = 1
out_mask[0, 3:6] = 0
out_mask[1, 4] = 0
out_mask[4:9, 4:8] = 0
out_mask[6, 6] = 1
out_mask[1, 0] = 0
# create cube with default spacing
cube_in = set_up_variable_cube(data, "air_temperature", "Celsius")
cube_in_mask = set_up_variable_cube(in_mask, "Land_Binary_Mask", "1")
cube_out_mask = set_up_variable_cube(out_mask, "Land_Binary_Mask", "1")
# modify cube coordinates to the designed value
cube_in = modify_cube_coordinate_value(cube_in, in_lons, in_lats)
cube_in_mask = modify_cube_coordinate_value(cube_in_mask, in_lons, in_lats)
cube_out_mask = modify_cube_coordinate_value(cube_out_mask, out_lons, out_lats)
return cube_in, cube_out_mask, cube_in_mask
def test_basic_indexes():
"""Test basic_indexes for identical source and target domain case """
cube_in, cube_out_mask, _ = define_source_target_grid_data_same_domain()
in_latlons = latlon_from_cube(cube_in)
out_latlons = latlon_from_cube(cube_out_mask)
in_lons_size = cube_in.coord(axis="x").shape[0]
lat_spacing, lon_spacing = calculate_input_grid_spacing(cube_in)
indexes = basic_indexes(
out_latlons, in_latlons, in_lons_size, lat_spacing, lon_spacing
)
test_results = indexes[58:63, :]
expected_results = np.array(
[
[12, 17, 18, 13],
[12, 17, 18, 13],
[13, 18, 19, 14],
[13, 18, 19, 14],
[13, 18, 19, 14],
]
)
np.testing.assert_array_equal(test_results, expected_results)
def test_regrid_nearest_2():
"""Test nearest neighbour regridding option 'nearest-2'"""
cube_in, cube_out_mask, _ = define_source_target_grid_data()
regrid_nearest = RegridLandSea(regrid_mode="nearest-2",)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],
[15, 16, 16, 16, 17, 17, 17, 18, 18, 18, 18],
]
)
np.testing.assert_allclose(regrid_nearest.data, expected_results, atol=1e-3)
def test_regrid_bilinear_2():
"""Test bilinear regridding option 'bilinear-2'"""
cube_in, cube_out_mask, _ = define_source_target_grid_data()
regrid_bilinear = RegridLandSea(regrid_mode="bilinear-2",)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0.5, 0.8, 1.1, 1.4, 1.7, 2.0, 2.3, 2.6, 2.9, 3.2, 3.5],
[2.5, 2.8, 3.1, 3.4, 3.7, 4.0, 4.3, 4.6, 4.9, 5.2, 5.5],
[4.5, 4.8, 5.1, 5.4, 5.7, 6.0, 6.3, 6.6, 6.9, 7.2, 7.5],
[6.5, 6.8, 7.1, 7.4, 7.7, 8.0, 8.3, 8.6, 8.9, 9.2, 9.5],
[8.5, 8.8, 9.1, 9.4, 9.7, 10.0, 10.3, 10.6, 10.9, 11.2, 11.5],
[10.5, 10.8, 11.1, 11.4, 11.7, 12.0, 12.3, 12.6, 12.9, 13.2, 13.5],
[12.5, 12.8, 13.1, 13.4, 13.7, 14.0, 14.3, 14.6, 14.9, 15.2, 15.5],
[14.5, 14.8, 15.1, 15.4, 15.7, 16.0, 16.3, 16.6, 16.9, 17.2, 17.5],
]
)
np.testing.assert_allclose(regrid_bilinear.data, expected_results, atol=1e-3)
def test_regrid_nearest_with_mask_2():
"""Test nearest-with-mask-2 regridding"""
cube_in, cube_out_mask, cube_in_mask = define_source_target_grid_data()
regrid_nearest_with_mask = RegridLandSea(
regrid_mode="nearest-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[0, 1, 1, 1, 7, 2, 7, 3, 3, 3, 3],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9],
[10, 11, 11, 11, 7, 7, 7, 8, 8, 8, 14],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 14],
[10, 11, 11, 11, 12, 12, 7, 13, 13, 13, 14],
[15, 16, 16, 16, 17, 17, 7, 18, 18, 18, 19],
]
)
np.testing.assert_allclose(
regrid_nearest_with_mask.data, expected_results, atol=1e-3
)
# consider constant field
cube_in.data = np.repeat(1.0, 20).reshape(4, 5).astype(np.float32)
regrid_nearest_with_mask = RegridLandSea(
regrid_mode="nearest-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.repeat(1.0, 88).reshape(8, 11).astype(np.float32)
np.testing.assert_allclose(
regrid_nearest_with_mask.data, expected_results, atol=1e-3
)
def test_regrid_bilinear_with_mask_2():
"""Test bilinear-with-mask-2 regridding """
cube_in, cube_out_mask, cube_in_mask = define_source_target_grid_data()
regrid_bilinear_with_mask = RegridLandSea(
regrid_mode="bilinear-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0.5, 0.8, 1.40096, 3.2916, 2.0, 2.0, 2.0, 4.94333, 3.25586, 3.2, 3.5],
[2.5, 2.8, 3.1, 3.4, 5.48911, 2.76267, 6.32926, 4.6, 4.9, 5.2, 5.5],
[4.5, 4.8, 5.1, 5.4, 5.7, 7.0154, 6.3, 6.6, 6.9, 7.2, 7.5],
[6.5, 6.8, 7.1, 7.4, 7.7, 7.0, 7.19033, 7.6681, 7.6618, 9.2, 9.5],
[
8.5,
8.8,
9.1,
9.4,
8.10633,
7.0,
7.0,
7.62915,
7.21672,
9.11434,
10.52363,
],
[
10.5,
10.8,
11.00012,
11.01183,
13.15439,
12.0,
12.3,
12.6,
12.9,
13.71286,
15.74504,
],
[
12.5,
12.8,
12.23411,
13.25881,
14.14155,
14.0,
8.07328,
14.6,
14.9,
14.96332,
16.3334,
],
[
14.5,
14.8,
15.0997,
14.22659,
15.50905,
16.0,
9.8733,
16.6,
16.9,
16.91114,
17.03773,
],
]
)
np.testing.assert_allclose(
regrid_bilinear_with_mask.data, expected_results, atol=1e-3
)
# consider constant field
cube_in.data = np.repeat(1.0, 20).reshape(4, 5).astype(np.float32)
regrid_bilinear_with_mask = RegridLandSea(
regrid_mode="bilinear-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.repeat(1.0, 88).reshape(8, 11).astype(np.float32)
np.testing.assert_allclose(
regrid_bilinear_with_mask.data, expected_results, atol=1e-3
)
| [((60, 14, 60, 35), 'numpy.linspace', 'np.linspace', ({(60, 26, 60, 27): '0', (60, 29, 60, 31): '15', (60, 33, 60, 34): '4'}, {}), '(0, 15, 4)', True, 'import numpy as np\n'), ((61, 14, 61, 35), 'numpy.linspace', 'np.linspace', ({(61, 26, 61, 27): '0', (61, 29, 61, 31): '40', (61, 33, 61, 34): '5'}, {}), '(0, 40, 5)', True, 'import numpy as np\n'), ((64, 15, 64, 36), 'numpy.linspace', 'np.linspace', ({(64, 27, 64, 28): '0', (64, 30, 64, 32): '14', (64, 34, 64, 35): '8'}, {}), '(0, 14, 8)', True, 'import numpy as np\n'), ((65, 15, 65, 37), 'numpy.linspace', 'np.linspace', ({(65, 27, 65, 28): '5', (65, 30, 65, 32): '35', (65, 34, 65, 36): '11'}, {}), '(5, 35, 11)', True, 'import numpy as np\n'), ((71, 14, 71, 44), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((78, 15, 78, 46), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((89, 14, 89, 70), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', ({(89, 35, 89, 39): 'data', (89, 41, 89, 58): '"""air_temperature"""', (89, 60, 89, 69): '"""Celsius"""'}, {}), "(data, 'air_temperature', 'Celsius')", False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((90, 19, 90, 73), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', ({(90, 40, 90, 47): 'in_mask', (90, 49, 90, 67): '"""Land_Binary_Mask"""', (90, 69, 90, 72): '"""1"""'}, {}), "(in_mask, 'Land_Binary_Mask', '1')", False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((91, 20, 91, 75), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', ({(91, 41, 91, 49): 'out_mask', (91, 51, 91, 69): '"""Land_Binary_Mask"""', (91, 71, 91, 74): '"""1"""'}, {}), "(out_mask, 'Land_Binary_Mask', '1')", False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((104, 14, 104, 35), 'numpy.linspace', 'np.linspace', ({(104, 26, 104, 27): '0', (104, 29, 104, 31): '15', (104, 33, 104, 34): '4'}, {}), '(0, 15, 4)', True, 'import numpy as np\n'), ((105, 14, 105, 35), 'numpy.linspace', 'np.linspace', ({(105, 26, 105, 27): '0', (105, 29, 105, 31): '40', (105, 33, 105, 34): '5'}, {}), '(0, 40, 5)', True, 'import numpy as np\n'), ((108, 15, 108, 36), 'numpy.linspace', 'np.linspace', ({(108, 27, 108, 28): '0', (108, 30, 108, 32): '15', (108, 34, 108, 35): '7'}, {}), '(0, 15, 7)', True, 'import numpy as np\n'), ((109, 15, 109, 36), 'numpy.linspace', 'np.linspace', ({(109, 27, 109, 28): '5', (109, 30, 109, 32): '40', (109, 34, 109, 35): '9'}, {}), '(5, 40, 9)', True, 'import numpy as np\n'), ((115, 14, 115, 44), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((122, 15, 122, 45), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((131, 14, 131, 70), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', ({(131, 35, 131, 39): 'data', (131, 41, 131, 58): '"""air_temperature"""', (131, 60, 131, 69): '"""Celsius"""'}, {}), "(data, 'air_temperature', 'Celsius')", False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((132, 19, 132, 73), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', ({(132, 40, 132, 47): 'in_mask', (132, 49, 132, 67): '"""Land_Binary_Mask"""', (132, 69, 132, 72): '"""1"""'}, {}), "(in_mask, 'Land_Binary_Mask', '1')", False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((133, 20, 133, 75), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', ({(133, 41, 133, 49): 'out_mask', (133, 51, 133, 69): '"""Land_Binary_Mask"""', (133, 71, 133, 74): '"""1"""'}, {}), "(out_mask, 'Land_Binary_Mask', '1')", False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((146, 17, 146, 42), 'improver.regrid.grid.latlon_from_cube', 'latlon_from_cube', ({(146, 34, 146, 41): 'cube_in'}, {}), '(cube_in)', False, 'from improver.regrid.grid import calculate_input_grid_spacing, latlon_from_cube\n'), ((147, 18, 147, 49), 'improver.regrid.grid.latlon_from_cube', 'latlon_from_cube', ({(147, 35, 147, 48): 'cube_out_mask'}, {}), '(cube_out_mask)', False, 'from improver.regrid.grid import calculate_input_grid_spacing, latlon_from_cube\n'), ((149, 31, 149, 68), 'improver.regrid.grid.calculate_input_grid_spacing', 'calculate_input_grid_spacing', ({(149, 60, 149, 67): 'cube_in'}, {}), '(cube_in)', False, 'from improver.regrid.grid import calculate_input_grid_spacing, latlon_from_cube\n'), ((150, 14, 152, 5), 'improver.regrid.bilinear.basic_indexes', 'basic_indexes', ({(151, 8, 151, 19): 'out_latlons', (151, 21, 151, 31): 'in_latlons', (151, 33, 151, 45): 'in_lons_size', (151, 47, 151, 58): 'lat_spacing', (151, 60, 151, 71): 'lon_spacing'}, {}), '(out_latlons, in_latlons, in_lons_size, lat_spacing, lon_spacing)', False, 'from improver.regrid.bilinear import basic_indexes\n'), ((154, 23, 162, 5), 'numpy.array', 'np.array', ({(155, 8, 161, 9): '[[12, 17, 18, 13], [12, 17, 18, 13], [13, 18, 19, 14], [13, 18, 19, 14], [\n 13, 18, 19, 14]]'}, {}), '([[12, 17, 18, 13], [12, 17, 18, 13], [13, 18, 19, 14], [13, 18, 19,\n 14], [13, 18, 19, 14]])', True, 'import numpy as np\n'), ((163, 4, 163, 65), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', ({(163, 34, 163, 46): 'test_results', (163, 48, 163, 64): 'expected_results'}, {}), '(test_results, expected_results)', True, 'import numpy as np\n'), ((171, 23, 182, 5), 'numpy.array', 'np.array', ({(172, 8, 181, 9): '[[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], [0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], [5, \n 6, 6, 6, 7, 7, 7, 8, 8, 8, 8], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8], [10, \n 11, 11, 11, 12, 12, 12, 13, 13, 13, 13], [10, 11, 11, 11, 12, 12, 12, \n 13, 13, 13, 13], [10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13], [15, 16,\n 16, 16, 17, 17, 17, 18, 18, 18, 18]]'}, {}), '([[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], [0, 1, 1, 1, 2, 2, 2, 3, 3, 3,\n 3], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8\n ], [10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13], [10, 11, 11, 11, 12, \n 12, 12, 13, 13, 13, 13], [10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],\n [15, 16, 16, 16, 17, 17, 17, 18, 18, 18, 18]])', True, 'import numpy as np\n'), ((183, 4, 183, 80), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (), '', True, 'import numpy as np\n'), ((192, 23, 203, 5), 'numpy.array', 'np.array', ({(193, 8, 202, 9): '[[0.5, 0.8, 1.1, 1.4, 1.7, 2.0, 2.3, 2.6, 2.9, 3.2, 3.5], [2.5, 2.8, 3.1, \n 3.4, 3.7, 4.0, 4.3, 4.6, 4.9, 5.2, 5.5], [4.5, 4.8, 5.1, 5.4, 5.7, 6.0,\n 6.3, 6.6, 6.9, 7.2, 7.5], [6.5, 6.8, 7.1, 7.4, 7.7, 8.0, 8.3, 8.6, 8.9,\n 9.2, 9.5], [8.5, 8.8, 9.1, 9.4, 9.7, 10.0, 10.3, 10.6, 10.9, 11.2, 11.5\n ], [10.5, 10.8, 11.1, 11.4, 11.7, 12.0, 12.3, 12.6, 12.9, 13.2, 13.5],\n [12.5, 12.8, 13.1, 13.4, 13.7, 14.0, 14.3, 14.6, 14.9, 15.2, 15.5], [\n 14.5, 14.8, 15.1, 15.4, 15.7, 16.0, 16.3, 16.6, 16.9, 17.2, 17.5]]'}, {}), '([[0.5, 0.8, 1.1, 1.4, 1.7, 2.0, 2.3, 2.6, 2.9, 3.2, 3.5], [2.5, \n 2.8, 3.1, 3.4, 3.7, 4.0, 4.3, 4.6, 4.9, 5.2, 5.5], [4.5, 4.8, 5.1, 5.4,\n 5.7, 6.0, 6.3, 6.6, 6.9, 7.2, 7.5], [6.5, 6.8, 7.1, 7.4, 7.7, 8.0, 8.3,\n 8.6, 8.9, 9.2, 9.5], [8.5, 8.8, 9.1, 9.4, 9.7, 10.0, 10.3, 10.6, 10.9, \n 11.2, 11.5], [10.5, 10.8, 11.1, 11.4, 11.7, 12.0, 12.3, 12.6, 12.9, \n 13.2, 13.5], [12.5, 12.8, 13.1, 13.4, 13.7, 14.0, 14.3, 14.6, 14.9, \n 15.2, 15.5], [14.5, 14.8, 15.1, 15.4, 15.7, 16.0, 16.3, 16.6, 16.9, \n 17.2, 17.5]])', True, 'import numpy as np\n'), ((205, 4, 205, 81), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (), '', True, 'import numpy as np\n'), ((218, 23, 229, 5), 'numpy.array', 'np.array', ({(219, 8, 228, 9): '[[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], [0, 1, 1, 1, 7, 2, 7, 3, 3, 3, 3], [5, \n 6, 6, 6, 7, 7, 7, 8, 8, 8, 8], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9], [10, \n 11, 11, 11, 7, 7, 7, 8, 8, 8, 14], [10, 11, 11, 11, 12, 12, 12, 13, 13,\n 13, 14], [10, 11, 11, 11, 12, 12, 7, 13, 13, 13, 14], [15, 16, 16, 16, \n 17, 17, 7, 18, 18, 18, 19]]'}, {}), '([[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3], [0, 1, 1, 1, 7, 2, 7, 3, 3, 3,\n 3], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8], [5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9\n ], [10, 11, 11, 11, 7, 7, 7, 8, 8, 8, 14], [10, 11, 11, 11, 12, 12, 12,\n 13, 13, 13, 14], [10, 11, 11, 11, 12, 12, 7, 13, 13, 13, 14], [15, 16, \n 16, 16, 17, 17, 7, 18, 18, 18, 19]])', True, 'import numpy as np\n'), ((231, 4, 233, 5), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (), '', True, 'import numpy as np\n'), ((244, 4, 246, 5), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (), '', True, 'import numpy as np\n'), ((259, 23, 318, 5), 'numpy.array', 'np.array', ({(260, 8, 317, 9): '[[0.5, 0.8, 1.40096, 3.2916, 2.0, 2.0, 2.0, 4.94333, 3.25586, 3.2, 3.5], [\n 2.5, 2.8, 3.1, 3.4, 5.48911, 2.76267, 6.32926, 4.6, 4.9, 5.2, 5.5], [\n 4.5, 4.8, 5.1, 5.4, 5.7, 7.0154, 6.3, 6.6, 6.9, 7.2, 7.5], [6.5, 6.8, \n 7.1, 7.4, 7.7, 7.0, 7.19033, 7.6681, 7.6618, 9.2, 9.5], [8.5, 8.8, 9.1,\n 9.4, 8.10633, 7.0, 7.0, 7.62915, 7.21672, 9.11434, 10.52363], [10.5, \n 10.8, 11.00012, 11.01183, 13.15439, 12.0, 12.3, 12.6, 12.9, 13.71286, \n 15.74504], [12.5, 12.8, 12.23411, 13.25881, 14.14155, 14.0, 8.07328, \n 14.6, 14.9, 14.96332, 16.3334], [14.5, 14.8, 15.0997, 14.22659, \n 15.50905, 16.0, 9.8733, 16.6, 16.9, 16.91114, 17.03773]]'}, {}), '([[0.5, 0.8, 1.40096, 3.2916, 2.0, 2.0, 2.0, 4.94333, 3.25586, 3.2,\n 3.5], [2.5, 2.8, 3.1, 3.4, 5.48911, 2.76267, 6.32926, 4.6, 4.9, 5.2, \n 5.5], [4.5, 4.8, 5.1, 5.4, 5.7, 7.0154, 6.3, 6.6, 6.9, 7.2, 7.5], [6.5,\n 6.8, 7.1, 7.4, 7.7, 7.0, 7.19033, 7.6681, 7.6618, 9.2, 9.5], [8.5, 8.8,\n 9.1, 9.4, 8.10633, 7.0, 7.0, 7.62915, 7.21672, 9.11434, 10.52363], [\n 10.5, 10.8, 11.00012, 11.01183, 13.15439, 12.0, 12.3, 12.6, 12.9, \n 13.71286, 15.74504], [12.5, 12.8, 12.23411, 13.25881, 14.14155, 14.0, \n 8.07328, 14.6, 14.9, 14.96332, 16.3334], [14.5, 14.8, 15.0997, 14.22659,\n 15.50905, 16.0, 9.8733, 16.6, 16.9, 16.91114, 17.03773]])', True, 'import numpy as np\n'), ((320, 4, 322, 5), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (), '', True, 'import numpy as np\n'), ((334, 4, 336, 5), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (), '', True, 'import numpy as np\n'), ((170, 21, 170, 60), 'improver.regrid.landsea.RegridLandSea', 'RegridLandSea', (), '', False, 'from improver.regrid.landsea import RegridLandSea\n'), ((190, 22, 190, 62), 'improver.regrid.landsea.RegridLandSea', 'RegridLandSea', (), '', False, 'from improver.regrid.landsea import RegridLandSea\n'), ((212, 31, 216, 5), 'improver.regrid.landsea.RegridLandSea', 'RegridLandSea', (), '', False, 'from improver.regrid.landsea import RegridLandSea\n'), ((237, 31, 241, 5), 'improver.regrid.landsea.RegridLandSea', 'RegridLandSea', (), '', False, 'from improver.regrid.landsea import RegridLandSea\n'), ((253, 32, 257, 5), 'improver.regrid.landsea.RegridLandSea', 'RegridLandSea', (), '', False, 'from improver.regrid.landsea import RegridLandSea\n'), ((326, 32, 330, 5), 'improver.regrid.landsea.RegridLandSea', 'RegridLandSea', (), '', False, 'from improver.regrid.landsea import RegridLandSea\n'), ((68, 11, 68, 24), 'numpy.arange', 'np.arange', ({(68, 21, 68, 23): '20'}, {}), '(20)', True, 'import numpy as np\n'), ((112, 11, 112, 24), 'numpy.arange', 'np.arange', ({(112, 21, 112, 23): '20'}, {}), '(20)', True, 'import numpy as np\n'), ((236, 19, 236, 37), 'numpy.repeat', 'np.repeat', ({(236, 29, 236, 32): '1.0', (236, 34, 236, 36): '20'}, {}), '(1.0, 20)', True, 'import numpy as np\n'), ((243, 23, 243, 41), 'numpy.repeat', 'np.repeat', ({(243, 33, 243, 36): '1.0', (243, 38, 243, 40): '88'}, {}), '(1.0, 88)', True, 'import numpy as np\n'), ((325, 19, 325, 37), 'numpy.repeat', 'np.repeat', ({(325, 29, 325, 32): '1.0', (325, 34, 325, 36): '20'}, {}), '(1.0, 20)', True, 'import numpy as np\n'), ((332, 23, 332, 41), 'numpy.repeat', 'np.repeat', ({(332, 33, 332, 36): '1.0', (332, 38, 332, 40): '88'}, {}), '(1.0, 88)', True, 'import numpy as np\n')] |
garnaat/details | setup.py | 07f2fc7f27b29a6ddcda918abf6ae0882450319e | #!/usr/bin/env python
from setuptools import setup, find_packages
import os
requires = [
]
setup(
name='details',
version=open(os.path.join('details', '_version')).read(),
description='Tools for processing AWS detailed billing reports',
long_description=open('README.md').read(),
author='Mitch Garnaat',
author_email='[email protected]',
url='https://github.com/scopely-devops/details',
packages=find_packages(exclude=['tests*']),
package_dir={'details': 'details'},
install_requires=requires,
license=open("LICENSE").read(),
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
),
)
| [((19, 13, 19, 46), 'setuptools.find_packages', 'find_packages', (), '', False, 'from setuptools import setup, find_packages\n'), ((13, 17, 13, 52), 'os.path.join', 'os.path.join', ({(13, 30, 13, 39): '"""details"""', (13, 41, 13, 51): '"""_version"""'}, {}), "('details', '_version')", False, 'import os\n')] |
YannickDieter/beam_telescope_analysis | beam_telescope_analysis/testing/test_kalman.py | 0c678ad991a9ef42178b2eeaf58059d387362f2a | ''' Script to check the correctness of the analysis. The analysis is done on raw data and all results are compared to a recorded analysis.
'''
import os
import unittest
import numpy as np
from beam_telescope_analysis import track_analysis
from beam_telescope_analysis.tools import test_tools
class TestTrackAnalysis(unittest.TestCase):
@classmethod
def setUpClass(cls):
# virtual X server for plots under headless LINUX travis testing is needed
if os.getenv('TRAVIS', False) and os.getenv('TRAVIS_OS_NAME', False) == 'linux':
from xvfbwrapper import Xvfb # virtual X server for plots under headless LINUX travis testing is needed
cls.vdisplay = Xvfb()
cls.vdisplay.start()
@classmethod
def tearDownClass(cls): # Remove created files
pass
# os.remove(os.path.join(cls.output_folder, 'Tracks_merged.pdf'))
def test_kalman(self):
# pixel size of sensor
pixel_size = np.array([(18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (250., 50.)])
pixel_resolution = pixel_size / np.sqrt(12)
material_budget = np.array([100., 100., 100., 100., 100., 100., 250.]) / np.array([125390., 125390., 125390., 125390., 125390., 125390., 93700.])
prealignment = {'z': [0., 29900., 60300., 82100., 118700., 160700., 197800.]}
kwargs = {'track_hits': np.array([[[-1229.22372954, 2828.19616302, 0., pixel_resolution[0][0], pixel_resolution[0][1], 0.],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], # [-1254.51224282, 2827.4291421, 29900.],
[-1285.6117892, 2822.34536687, 60300., pixel_resolution[2][0], pixel_resolution[2][1], 0.],
[-1311.31083616, 2823.56121414, 82100., pixel_resolution[3][0], pixel_resolution[3][1], 0.],
[-1335.8529645, 2828.43359043, 118700., pixel_resolution[4][0], pixel_resolution[4][1], 0.],
[-1357.81872222, 2840.86947964, 160700., pixel_resolution[5][0], pixel_resolution[5][1], 0.],
[-1396.35698339, 2843.76799577, 197800., pixel_resolution[6][0], pixel_resolution[6][1], 0.]]]),
'dut_fit_selection': 61,
'z_positions': [[0., 29900, 60300, 82100, 118700, 160700, 197800]],
'alignment': [prealignment],
'use_prealignment': True,
'pixel_size': pixel_size,
'n_pixels': ((576, 1152), (576, 1152), (576, 1152), (576, 1152), (576, 1152), (576, 1152), (80, 336)),
'beam_energy': 2500.,
'material_budget': material_budget,
'add_scattering_plane': False}
# expected result array: (state estimates, chi, x error, y errors)
result = [[[-1.23045812e+03, 2.82684464e+03, 0.00000000e+00, -9.54188957e-04, -5.78722777e-05, 9.99999543e-01],
[-1.25900270e+03, 2.82511339e+03, 2.99000000e+04, -9.54667558e-04, -5.79013065e-05, 9.99999543e-01],
[-1.28705254e+03, 2.82443254e+03, 6.03000000e+04, -9.22691847e-04, -2.23966180e-05, 9.99999574e-01],
[-1.30575083e+03, 2.82550588e+03, 8.21000000e+04, -8.57719095e-04, 4.92360053e-05, 9.99999631e-01],
[-1.33339390e+03, 2.83014572e+03, 1.18700000e+05, -7.55274948e-04, 1.26771487e-04, 9.99999707e-01],
[-1.36192826e+03, 2.83782855e+03, 1.60700000e+05, -6.79389377e-04, 1.82924497e-04, 9.99999752e-01],
[-1.38713361e+03, 2.84461505e+03, 1.97800000e+05, -6.79389377e-04, 1.82924497e-04, 9.99999752e-01]],
[79.59176738400244],
[3.62429044, 3.2884327, 3.21655702, 3.1539946, 3.23671172, 4.66501707, 8.62909928],
[3.62429044, 3.2884327, 3.21655702, 3.1539946, 3.23671172, 4.66501707, 8.62909928]]
for i in range(4): # test each return (state estimates, chi, x error, y errors) seperatly
test = test_tools._call_function_with_args(function=track_analysis._fit_tracks_kalman_loop,
**kwargs)[0][i]
data_equal = np.allclose(test, result[i])
self.assertTrue(data_equal)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
suite = unittest.TestLoader().loadTestsFromTestCase(TestTrackAnalysis)
unittest.TextTestRunner(verbosity=2).run(suite)
| [((72, 4, 72, 128), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((30, 21, 30, 128), 'numpy.array', 'np.array', ({(30, 30, 30, 127): '[(18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (\n 18.5, 18.5), (250.0, 50.0)]'}, {}), '([(18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, \n 18.5), (18.5, 18.5), (250.0, 50.0)])', True, 'import numpy as np\n'), ((18, 11, 18, 37), 'os.getenv', 'os.getenv', ({(18, 21, 18, 29): '"""TRAVIS"""', (18, 31, 18, 36): '(False)'}, {}), "('TRAVIS', False)", False, 'import os\n'), ((20, 27, 20, 33), 'xvfbwrapper.Xvfb', 'Xvfb', ({}, {}), '()', False, 'from xvfbwrapper import Xvfb\n'), ((31, 40, 31, 51), 'numpy.sqrt', 'np.sqrt', ({(31, 48, 31, 50): '(12)'}, {}), '(12)', True, 'import numpy as np\n'), ((32, 26, 32, 78), 'numpy.array', 'np.array', ({(32, 35, 32, 77): '[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 250.0]'}, {}), '([100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 250.0])', True, 'import numpy as np\n'), ((32, 81, 32, 153), 'numpy.array', 'np.array', ({(32, 90, 32, 152): '[125390.0, 125390.0, 125390.0, 125390.0, 125390.0, 125390.0, 93700.0]'}, {}), '([125390.0, 125390.0, 125390.0, 125390.0, 125390.0, 125390.0, 93700.0])', True, 'import numpy as np\n'), ((34, 32, 40, 138), 'numpy.array', 'np.array', ({(34, 41, 40, 137): '[[[-1229.22372954, 2828.19616302, 0.0, pixel_resolution[0][0],\n pixel_resolution[0][1], 0.0], [np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan], [-1285.6117892, 2822.34536687, 60300.0, pixel_resolution[2][0],\n pixel_resolution[2][1], 0.0], [-1311.31083616, 2823.56121414, 82100.0,\n pixel_resolution[3][0], pixel_resolution[3][1], 0.0], [-1335.8529645, \n 2828.43359043, 118700.0, pixel_resolution[4][0], pixel_resolution[4][1],\n 0.0], [-1357.81872222, 2840.86947964, 160700.0, pixel_resolution[5][0],\n pixel_resolution[5][1], 0.0], [-1396.35698339, 2843.76799577, 197800.0,\n pixel_resolution[6][0], pixel_resolution[6][1], 0.0]]]'}, {}), '([[[-1229.22372954, 2828.19616302, 0.0, pixel_resolution[0][0],\n pixel_resolution[0][1], 0.0], [np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan], [-1285.6117892, 2822.34536687, 60300.0, pixel_resolution[2][0],\n pixel_resolution[2][1], 0.0], [-1311.31083616, 2823.56121414, 82100.0,\n pixel_resolution[3][0], pixel_resolution[3][1], 0.0], [-1335.8529645, \n 2828.43359043, 118700.0, pixel_resolution[4][0], pixel_resolution[4][1],\n 0.0], [-1357.81872222, 2840.86947964, 160700.0, pixel_resolution[5][0],\n pixel_resolution[5][1], 0.0], [-1396.35698339, 2843.76799577, 197800.0,\n pixel_resolution[6][0], pixel_resolution[6][1], 0.0]]])', True, 'import numpy as np\n'), ((66, 25, 66, 53), 'numpy.allclose', 'np.allclose', ({(66, 37, 66, 41): 'test', (66, 43, 66, 52): 'result[i]'}, {}), '(test, result[i])', True, 'import numpy as np\n'), ((73, 12, 73, 33), 'unittest.TestLoader', 'unittest.TestLoader', ({}, {}), '()', False, 'import unittest\n'), ((74, 4, 74, 40), 'unittest.TextTestRunner', 'unittest.TextTestRunner', (), '', False, 'import unittest\n'), ((18, 42, 18, 76), 'os.getenv', 'os.getenv', ({(18, 52, 18, 68): '"""TRAVIS_OS_NAME"""', (18, 70, 18, 75): '(False)'}, {}), "('TRAVIS_OS_NAME', False)", False, 'import os\n'), ((64, 19, 65, 64), 'beam_telescope_analysis.tools.test_tools._call_function_with_args', 'test_tools._call_function_with_args', (), '', False, 'from beam_telescope_analysis.tools import test_tools\n')] |
asnramos/asv | test/test_workflow.py | 8a0979b532d06c7c352826e2acf0dd872922260e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import json
from os.path import join, isfile
import pytest
from asv import util
from . import tools
def test_run_publish(capfd, basic_conf_2):
tmpdir, local, conf, machine_file = basic_conf_2
tmpdir = util.long_path(tmpdir)
conf.matrix = {
"req": dict(conf.matrix),
"env": {"SOME_TEST_VAR": ["1"]},
}
# Tests a typical complete run/publish workflow
ret = tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--quick', '--show-stderr', '--profile',
'-a', 'warmup_time=0',
'--durations=5',
_machine_file=machine_file)
assert ret is None
text, err = capfd.readouterr()
assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5
assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2
assert 'asv: benchmark timed out (timeout 0.1s)' in text
assert 'total duration' in text
tools.run_asv_with_conf(conf, 'publish')
assert isfile(join(tmpdir, 'html', 'index.html'))
assert isfile(join(tmpdir, 'html', 'index.json'))
assert isfile(join(tmpdir, 'html', 'asv.js'))
assert isfile(join(tmpdir, 'html', 'asv.css'))
# Check parameterized test json data format
filename = glob.glob(join(tmpdir, 'html', 'graphs', 'arch-x86_64',
'asv_dummy_test_package_1',
'asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1],
'branch-master',
'cpu-Blazingly fast',
'env-SOME_TEST_VAR-1',
'machine-orangutan',
'os-GNU_Linux', 'python-*', 'ram-128GB',
'params_examples.time_skip.json'))[0]
with open(filename, 'r') as fp:
data = json.load(fp)
assert len(data) == 2
assert isinstance(data[0][0], int) # revision
assert len(data[0][1]) == 3
assert len(data[1][1]) == 3
assert isinstance(data[0][1][0], float)
assert isinstance(data[0][1][1], float)
assert data[0][1][2] is None
# Check that the skip options work
capfd.readouterr()
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--quick', '--skip-existing-successful',
'--bench=time_secondary.track_value',
'--skip-existing-failed',
_machine_file=join(tmpdir, 'asv-machine.json'))
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--bench=time_secondary.track_value',
'--quick', '--skip-existing-commits',
_machine_file=join(tmpdir, 'asv-machine.json'))
text, err = capfd.readouterr()
assert 'Running benchmarks.' not in text
# Check EXISTING and --environment work
python = "{0[0]}.{0[1]}".format(sys.version_info)
env_type = tools.get_default_environment_type(conf, python)
env_spec = ("-E", env_type + ":" + python)
tools.run_asv_with_conf(conf, 'run', "EXISTING", '--quick',
'--bench=time_secondary.track_value',
*env_spec,
_machine_file=machine_file)
# Remove the benchmarks.json file and check publish fails
os.remove(join(tmpdir, "results_workflow", "benchmarks.json"))
with pytest.raises(util.UserError):
tools.run_asv_with_conf(conf, 'publish')
| [((18, 13, 18, 35), 'asv.util.long_path', 'util.long_path', ({(18, 28, 18, 34): 'tmpdir'}, {}), '(tmpdir)', False, 'from asv import util\n'), ((41, 18, 41, 52), 'os.path.join', 'join', ({(41, 23, 41, 29): 'tmpdir', (41, 31, 41, 37): '"""html"""', (41, 39, 41, 51): '"""index.html"""'}, {}), "(tmpdir, 'html', 'index.html')", False, 'from os.path import join, isfile\n'), ((42, 18, 42, 52), 'os.path.join', 'join', ({(42, 23, 42, 29): 'tmpdir', (42, 31, 42, 37): '"""html"""', (42, 39, 42, 51): '"""index.json"""'}, {}), "(tmpdir, 'html', 'index.json')", False, 'from os.path import join, isfile\n'), ((43, 18, 43, 48), 'os.path.join', 'join', ({(43, 23, 43, 29): 'tmpdir', (43, 31, 43, 37): '"""html"""', (43, 39, 43, 47): '"""asv.js"""'}, {}), "(tmpdir, 'html', 'asv.js')", False, 'from os.path import join, isfile\n'), ((44, 18, 44, 49), 'os.path.join', 'join', ({(44, 23, 44, 29): 'tmpdir', (44, 31, 44, 37): '"""html"""', (44, 39, 44, 48): '"""asv.css"""'}, {}), "(tmpdir, 'html', 'asv.css')", False, 'from os.path import join, isfile\n'), ((57, 15, 57, 28), 'json.load', 'json.load', ({(57, 25, 57, 27): 'fp'}, {}), '(fp)', False, 'import json\n'), ((91, 14, 91, 65), 'os.path.join', 'join', ({(91, 19, 91, 25): 'tmpdir', (91, 27, 91, 45): '"""results_workflow"""', (91, 47, 91, 64): '"""benchmarks.json"""'}, {}), "(tmpdir, 'results_workflow', 'benchmarks.json')", False, 'from os.path import join, isfile\n'), ((93, 9, 93, 38), 'pytest.raises', 'pytest.raises', ({(93, 23, 93, 37): 'util.UserError'}, {}), '(util.UserError)', False, 'import pytest\n'), ((47, 25, 55, 63), 'os.path.join', 'join', ({(47, 30, 47, 36): 'tmpdir', (47, 38, 47, 44): '"""html"""', (47, 46, 47, 54): '"""graphs"""', (47, 56, 47, 69): '"""arch-x86_64"""', (48, 30, 48, 56): '"""asv_dummy_test_package_1"""', (49, 30, 49, 84): "('asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1])", (50, 30, 50, 45): '"""branch-master"""', (51, 30, 51, 50): '"""cpu-Blazingly fast"""', (52, 30, 52, 51): '"""env-SOME_TEST_VAR-1"""', (53, 30, 53, 49): '"""machine-orangutan"""', (54, 30, 54, 44): '"""os-GNU_Linux"""', (54, 46, 54, 56): '"""python-*"""', (54, 58, 54, 69): '"""ram-128GB"""', (55, 30, 55, 62): '"""params_examples.time_skip.json"""'}, {}), "(tmpdir, 'html', 'graphs', 'arch-x86_64', 'asv_dummy_test_package_1', \n 'asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1], 'branch-master',\n 'cpu-Blazingly fast', 'env-SOME_TEST_VAR-1', 'machine-orangutan',\n 'os-GNU_Linux', 'python-*', 'ram-128GB', 'params_examples.time_skip.json')", False, 'from os.path import join, isfile\n'), ((72, 42, 72, 74), 'os.path.join', 'join', ({(72, 47, 72, 53): 'tmpdir', (72, 55, 72, 73): '"""asv-machine.json"""'}, {}), "(tmpdir, 'asv-machine.json')", False, 'from os.path import join, isfile\n'), ((76, 42, 76, 74), 'os.path.join', 'join', ({(76, 47, 76, 53): 'tmpdir', (76, 55, 76, 73): '"""asv-machine.json"""'}, {}), "(tmpdir, 'asv-machine.json')", False, 'from os.path import join, isfile\n'), ((34, 26, 34, 71), 'os.path.join', 'join', ({(34, 31, 34, 37): 'tmpdir', (34, 39, 34, 57): '"""results_workflow"""', (34, 59, 34, 70): '"""orangutan"""'}, {}), "(tmpdir, 'results_workflow', 'orangutan')", False, 'from os.path import join, isfile\n'), ((35, 26, 35, 58), 'os.path.join', 'join', ({(35, 31, 35, 37): 'tmpdir', (35, 39, 35, 57): '"""results_workflow"""'}, {}), "(tmpdir, 'results_workflow')", False, 'from os.path import join, isfile\n')] |
Metro1998/P-DQN | trainer.py | 6ab2ac6991d2685f10887c16f854ebba6144b306 | # @author Metro
# @time 2021/11/24
import os.path
import gym
from agents.pdqn import P_DQN
from utilities.memory import ReplayBuffer
from utilities.utilities import *
from utilities.route_generator import generate_routefile
class Train_and_Evaluate(object):
def __init__(self, config):
# Environment
generate_routefile(seed=config.seed, demand=config.demand)
self.env = gym.make(config.environment)
# Agent
self.agent = P_DQN(config, self.env)
# Memory
self.replay_memory_size = config.hyperparameters['replay_memory_size']
self.batch_size = config.hyperparameters['batch_size']
self.updates_per_step = config.hyperparameters['updates_per_step']
self.memory = ReplayBuffer(self.replay_memory_size)
self.total_steps = 0
self.total_updates = 0
self.save_freq = config.save_freq
self.file_to_save = config.file_to_save
self.maximum_episodes = config.hyperparameters['maximum_episodes']
self.train = config.train
self.evaluate = config.evaluate
self.evaluate_internal = config.evaluate_internal
self.agent_to_color_dictionary = config.agent_to_color_dictionary
self.standard_deviation_results = config.standard_deviation_results
self.colors = ['red', 'blue', 'green', 'orange', 'yellow', 'purple']
self.color_idx = 0
self.rolling_score_window = config.rolling_score_window
self.runs_per_agent = config.runs_per_agent
self.agent_name = config.agent_name
self.ceil = config.ceil
# Training Loop
def train_agent(self):
"""
:return:
"""
rolling_scores_for_diff_runs = []
file_to_save_actor = os.path.join(self.file_to_save, 'actor/')
file_to_save_actor_param = os.path.join(self.file_to_save, 'actor_param/')
file_to_save_runs = os.path.join(self.file_to_save, 'runs_1/')
file_to_save_rolling_scores = os.path.join(self.file_to_save, 'rolling_scores/')
os.makedirs(file_to_save_actor, exist_ok=True)
os.makedirs(file_to_save_actor_param, exist_ok=True)
os.makedirs(file_to_save_runs, exist_ok=True)
os.makedirs(file_to_save_rolling_scores, exist_ok=True)
for run in range(self.runs_per_agent):
game_full_episodes_scores = []
game_full_episodes_rolling_scores = []
for i_episode in range(self.maximum_episodes):
if self.save_freq > 0 and i_episode % self.save_freq == 0:
actor_path = os.path.join(file_to_save_actor, 'episode{}'.format(i_episode))
actor_param_path = os.path.join(file_to_save_actor_param, 'episode{}'.format(i_episode))
self.agent.save_models(actor_path, actor_param_path)
episode_score = []
episode_steps = 0
done = 0
state = self.env.reset() # n_steps
while not done:
if len(self.memory) > self.batch_size:
action, action_params = self.agent.select_action(state, self.train)
if self.ceil:
action_params = np.ceil(action_params).squeeze(0)
action_for_env = [action, int(action_params[action])]
for i in range(self.updates_per_step):
self.agent.update(self.memory)
self.total_updates += 1
else:
action_params = np.random.randint(low=10, high=31, size=8)
action = np.random.randint(7, size=1)[0]
action_for_env = [action, action_params[action]]
next_state, reward, done, info = self.env.step(action_for_env)
print(reward)
episode_steps += 1
episode_score.append(info)
self.total_steps += 1
self.memory.push(state, action, action_params, reward, next_state, done)
state = next_state
episode_score_so_far = np.mean(episode_score)
game_full_episodes_scores.append(episode_score_so_far)
game_full_episodes_rolling_scores.append(
np.mean(game_full_episodes_scores[-1 * self.rolling_score_window:]))
print("Episode: {}, total steps:{}, episode steps:{}, scores:{}".format(
i_episode, self.total_steps, episode_steps, episode_score_so_far))
self.env.close()
file_path_for_pic = os.path.join(file_to_save_runs, 'episode{}_run{}.jpg'.format(i_episode, run))
visualize_results_per_run(agent_results=game_full_episodes_scores,
agent_name=self.agent_name,
save_freq=1,
file_path_for_pic=file_path_for_pic)
rolling_scores_for_diff_runs.append(game_full_episodes_rolling_scores)
file_path_for_pic = os.path.join(file_to_save_rolling_scores, 'rolling_scores.jpg')
visualize_overall_agent_results(agent_results=rolling_scores_for_diff_runs,
agent_name=self.agent_name,
show_mean_and_std_range=True,
agent_to_color_dictionary=self.agent_to_color_dictionary,
standard_deviation_results=1,
file_path_for_pic=file_path_for_pic
)
| [((16, 8, 16, 66), 'utilities.route_generator.generate_routefile', 'generate_routefile', (), '', False, 'from utilities.route_generator import generate_routefile\n'), ((17, 19, 17, 47), 'gym.make', 'gym.make', ({(17, 28, 17, 46): 'config.environment'}, {}), '(config.environment)', False, 'import gym\n'), ((20, 21, 20, 44), 'agents.pdqn.P_DQN', 'P_DQN', ({(20, 27, 20, 33): 'config', (20, 35, 20, 43): 'self.env'}, {}), '(config, self.env)', False, 'from agents.pdqn import P_DQN\n'), ((26, 22, 26, 59), 'utilities.memory.ReplayBuffer', 'ReplayBuffer', ({(26, 35, 26, 58): 'self.replay_memory_size'}, {}), '(self.replay_memory_size)', False, 'from utilities.memory import ReplayBuffer\n')] |
splunk/splunk-webframework | server/splunkdj/views.py | a4179558616f5f4fcbfa2b54e9179f30e6395264 | import sys
import pprint
import json
import datetime
import uuid
import urllib
import types
import traceback
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponseRedirect, Http404, HttpResponseServerError, HttpResponseNotFound
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.views.debug import ExceptionReporter, get_safe_settings
from django.template import TemplateDoesNotExist, Context
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.shortcuts import render
from splunkdj.decorators.render import render_to
from splunkdj.utility import make_splunkweb_url
from urlparse import urlparse
import logging
logger = logging.getLogger('spl.django.service')
error_logger = logging.getLogger('spl.django.request_error')
def format(value):
"""
Format values appropriately for json.dumps:
- Basic types will remain the same
- Unicode will be converted to str
- Everything else will be formatted using pprint
"""
if value is None:
return value
if isinstance(value, (int, long, str, float, list, dict, tuple, bool, unicode)):
return value
return str(pprint.pformat(value))
def get_exception_info(request):
# We use Django's debug reporter, even though we are doing our own template.
# This is because it has a great way of collecting all the useful info we
# need, so no reason not to leverage it
exc_info = sys.exc_info()
reporter = ExceptionReporter(request, *exc_info)
ctx = reporter.get_traceback_data()
# This is a refactor of what the technical_500_template contains, just
# doing the logic in Python rather than in a template. We collect all this
# information so that we can log it.
exception_type = ctx['exception_type'] if 'exception_type' in ctx else "No exception supplied"
exception_value = ctx['exception_value'] if 'exception_value' in ctx else "No exception supplied"
django_version = ctx["django_version_info"]
python_executable = ctx['sys_executable']
python_version = ctx['sys_version_info']
python_path = ctx['sys_path']
server_time = str(ctx['server_time'])
unicode_hint = None
if 'unicode_hint' in ctx:
unicdoe_hint = ctx['unicode_hint']
last_frame = None
if 'lastframe' in ctx:
frame_info = ctx['lastframe']
last_frame = "%s in %s, line %s" % (frame_info['filename'], frame_info['function'], frame_info['lineno'])
loaders = []
if 'template_does_not_exist' in ctx and 'loader_debug_info' in ctx and ctx['loader_debug_info']:
for loader in ctx['loader_debug_info']:
loader_info = {"name": loader['loader'], "templates": []}
for tmpl in loader['templates']:
loader_info['templates'].append({"file": tmpl['name'], "exists": tmpl['exists']})
loaders.append(loader_info)
template_errors = None
if 'template_info' in ctx and ctx['template_info']:
template_info = ctx['template_info']
template_errors = {
"name": template_info['name'],
"line": template_info['line'],
"message": template_info['message']
}
exception_info = []
if 'frames' in ctx:
frames = ctx['frames']
for frame in frames:
frame_info = {
"filename": frame['filename'],
"function": frame['function'],
"line": frame['lineno'],
"context_line": frame['context_line'],
"vars": []
}
if 'vars' in frame:
for var in frame['vars']:
frame_info['vars'].append({
"variable": str(var[0]),
"value": format(var[1])
})
exception_info.append(frame_info)
request_info = {
"path_info": request.path_info,
"method": request.META['REQUEST_METHOD'],
"url": request.build_absolute_uri(),
"GET": {},
"POST": {},
"FILES": {},
"COOKIES": {},
"META": {}
}
if hasattr(request, "GET"):
for key, value in request.GET.iteritems():
request_info['GET'][key] = format(value)
if "filtered_POST" in ctx:
for key, value in ctx['filtered_POST'].iteritems():
request_info['POST'][key] = format(value)
if hasattr(request, "FILES"):
for key, value in request.FILES.iteritems():
request_info['FILES'][key] = format(value)
if hasattr(request, "COOKIES"):
for key, value in request.COOKIES.iteritems():
request_info['COOKIES'][key] = format(value)
if hasattr(request, "META"):
for key, value in request.META.iteritems():
request_info['META'][key] = format(value)
settings_info = {}
for key, value in ctx['settings'].iteritems():
settings_info[key] = format(value)
ctx['errorid'] = errorid = uuid.uuid4().hex
full_info = dict(
__time=datetime.datetime.now().isoformat(),
__uuid=errorid,
settings=settings_info,
request=request_info,
traceback=exception_info,
stack=traceback.format_exc(exc_info[2]),
last_frame=last_frame,
template_loaders=loaders,
template_errors=template_errors,
unicode_hint=unicdoe_hint,
exception_type=exception_type,
exception_value=exception_value,
django_version=django_version,
python_version=python_version,
python_executable=python_executable,
python_path=python_path,
server_time=server_time
)
return (errorid, ctx, full_info)
def redirector(request, app, view):
params = {}
for (key, val) in request.GET.iteritems():
params[key] = val
full_name = "%s:%s" % (app, view)
if not view or not app:
logger.error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view))
raise Error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view))
return HttpResponseRedirect(reverse(full_name, kwargs=params))
def default_search(request):
app = request.app_name
lang_code = request.LANGUAGE_CODE
return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/search" % (lang_code, app)))
def default_flashtimeline(request):
app = request.app_name
lang_code = request.LANGUAGE_CODE
return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/flashtimeline" % (lang_code, app)))
@render_to()
@login_required
def default_template_render(request, template_name):
app = request.app_name
template_path = "%s:%s.html" % (app, template_name)
return {
"TEMPLATE": template_path
}
@never_cache
def handle404(request):
# This code is modified from views/debug.py in Django, as we want to display
# a debug style view, just modified slightly.
exc_info = sys.exc_info()
exception = exc_info[1]
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(render_to_string('splunkdj:404.html', context_instance=c))
@never_cache
def handle500(request):
# Let's attempt to render a more useful error message
errorid, ctx, exception = get_exception_info(request)
# We log the raw error to the log file, so that splunk can pick it up as
# JSON.
error_logger.error(json.dumps(exception, sort_keys=True))
# Build up the URL for making the query
lang_code = request.LANGUAGE_CODE
query_args = {
"q": 'search index=_internal sourcetype=django_error "%s" | head 1 | spath' % errorid,
"display.events.maxlines": 0,
"display.general.type": "events",
"earliest": 0,
"latest": ""
}
query_string = urllib.urlencode(query_args)
ctx['search_url'] = make_splunkweb_url("/%s/app/search/search?%s" % (lang_code, query_string))
return HttpResponseServerError(render_to_string('splunkdj:500.html', context_instance=Context(ctx)))
@never_cache
@render_to('splunkdj:page_config.html', mimetype="application/javascript")
@login_required
def get_page_config(request):
referer = request.META.get("HTTP_REFERER", "")
app = ""
app_label = ""
if referer:
try:
parsed = urlparse(referer)
parsed_path = parsed.path.replace("/%s/" % settings.MOUNT, "/")
resolved = resolve(parsed_path)
app = resolved.app_name
if app:
app_label = request.service.apps[app]["label"]
except Exception, e:
# If there was an error here, don't kill the entire page
# just return some default info
app = app or ""
app_label = app_label or app
zone_info = request.service.get('/services/search/timeparser/tz').body.read()
return {
"autoload": "1" == request.GET.get("autoload", "0"),
"config": json.dumps({
"SPLUNKD_FREE_LICENSE": request.user.is_free,
"MRSPARKLE_ROOT_PATH": "/%s" % str(settings.SPLUNK_WEB_MOUNT).strip("/"),
"DJANGO_ROOT_PATH": "/%s" % str(settings.RAW_MOUNT),
"MRSPARKLE_PORT_NUMBER": str(settings.SPLUNK_WEB_PORT),
"DJANGO_PORT_NUMBER": str(settings.DJANGO_PORT),
"LOCALE": str(request.LANGUAGE_CODE),
"JS_LOGGER_MODE": "None",
"USERNAME": str(request.user.username),
"USER_DISPLAYNAME": str(request.user.realname),
"APP": str(app),
"APP_DISPLAYNAME": str(app_label),
"SERVER_ZONEINFO": str(zone_info),
})
}
| [] |
mmulich/wildbook-ia | wbia/plottool/interact_keypoints.py | 81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663 | # -*- coding: utf-8 -*-
import logging
import utool as ut
import six
from . import draw_func2 as df2
from wbia.plottool import plot_helpers as ph
from wbia.plottool import interact_helpers as ih
from wbia.plottool.viz_featrow import draw_feat_row
from wbia.plottool.viz_keypoints import show_keypoints
from wbia.plottool import abstract_interaction
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
class KeypointInteraction(abstract_interaction.AbstractInteraction):
r"""
CommandLine:
python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show
python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show --fname=lena.png
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.interact_keypoints import * # NOQA
>>> import numpy as np
>>> import wbia.plottool as pt
>>> import utool as ut
>>> import pyhesaff
>>> import vtool as vt
>>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts()
>>> ut.quit_if_noshow()
>>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct')
>>> pt.interact_keypoints.KeypointInteraction(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, autostart=True)
>>> pt.show_if_requested()
"""
def __init__(self, chip, kpts, vecs, fnum=0, figtitle=None, **kwargs):
self.chip = chip
self.kpts = kpts
self.vecs = vecs
self.figtitle = figtitle
self.mode = 0
super(KeypointInteraction, self).__init__(**kwargs)
def plot(self, fnum=None, pnum=(1, 1, 1), **kwargs):
import wbia.plottool as pt
fnum = pt.ensure_fnum(fnum)
pt.figure(fnum=fnum, docla=True, doclf=True)
show_keypoints(self.chip, self.kpts, fnum=fnum, pnum=pnum, **kwargs)
if self.figtitle is not None:
pt.set_figtitle(self.figtitle)
def _select_ith_kpt(self, fx):
logger.info('[interact] viewing ith=%r keypoint' % fx)
# Get the fx-th keypiont
kp, sift = self.kpts[fx], self.vecs[fx]
# Draw the image with keypoint fx highlighted
self.plot(self.fnum, (2, 1, 1), sel_fx=fx)
# Draw the selected feature
nRows, nCols, px = (2, 3, 3)
draw_feat_row(self.chip, fx, kp, sift, self.fnum, nRows, nCols, px, None)
def on_click_outside(self, event):
self.mode = (self.mode + 1) % 3
ell = self.mode == 1
pts = self.mode == 2
logger.info('... default kpts view mode=%r' % self.mode)
self.plot(self.fnum, ell=ell, pts=pts)
self.draw()
def on_click_inside(self, event, ax):
import wbia.plottool as pt
viztype = ph.get_plotdat(ax, 'viztype', None)
logger.info('[ik] viztype=%r' % viztype)
if viztype is None:
pass
elif viztype == 'keypoints':
kpts = ph.get_plotdat(ax, 'kpts', [])
if len(kpts) == 0:
logger.info('...nokpts')
else:
logger.info('...nearest')
x, y = event.xdata, event.ydata
import vtool as vt
fx = vt.nearest_point(x, y, kpts)[0]
self._select_ith_kpt(fx)
elif viztype == 'warped':
hs_fx = ph.get_plotdat(ax, 'fx', None)
if hs_fx is not None:
kp = self.kpts[hs_fx] # FIXME
sift = self.vecs[hs_fx]
df2.draw_keypoint_gradient_orientations(
self.chip, kp, sift=sift, mode='vec', fnum=pt.next_fnum()
)
pt.draw()
elif viztype.startswith('colorbar'):
pass
else:
logger.info('...unhandled')
self.draw()
def ishow_keypoints(chip, kpts, desc, fnum=0, figtitle=None, nodraw=False, **kwargs):
"""
TODO: Depricate in favor of the class
CommandLine:
python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show
python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show --fname zebra.png
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.interact_keypoints import * # NOQA
>>> import numpy as np
>>> import wbia.plottool as pt
>>> import utool as ut
>>> import pyhesaff
>>> import vtool as vt
>>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts()
>>> ut.quit_if_noshow()
>>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct')
>>> pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4)
>>> pt.show_if_requested()
"""
if isinstance(chip, six.string_types):
import vtool as vt
chip = vt.imread(chip)
fig = ih.begin_interaction('keypoint', fnum)
annote_ptr = [1]
self = ut.DynStruct() # MOVE TO A CLASS INTERACTION
self.kpts = kpts
vecs = desc
self.vecs = vecs
def _select_ith_kpt(fx):
logger.info('[interact] viewing ith=%r keypoint' % fx)
# Get the fx-th keypiont
kp, sift = kpts[fx], vecs[fx]
# Draw the image with keypoint fx highlighted
_viz_keypoints(fnum, (2, 1, 1), sel_fx=fx, **kwargs) # MAYBE: remove kwargs
# Draw the selected feature
nRows, nCols, px = (2, 3, 3)
draw_feat_row(chip, fx, kp, sift, fnum, nRows, nCols, px, None)
def _viz_keypoints(fnum, pnum=(1, 1, 1), **kwargs):
df2.figure(fnum=fnum, docla=True, doclf=True)
show_keypoints(chip, kpts, fnum=fnum, pnum=pnum, **kwargs)
if figtitle is not None:
df2.set_figtitle(figtitle)
def _on_keypoints_click(event):
logger.info('[viz] clicked keypoint view')
if event is None or event.xdata is None or event.inaxes is None:
annote_ptr[0] = (annote_ptr[0] + 1) % 3
mode = annote_ptr[0]
ell = mode == 1
pts = mode == 2
logger.info('... default kpts view mode=%r' % mode)
_viz_keypoints(fnum, ell=ell, pts=pts, **kwargs) # MAYBE: remove kwargs
else:
ax = event.inaxes
viztype = ph.get_plotdat(ax, 'viztype', None)
logger.info('[ik] viztype=%r' % viztype)
if viztype == 'keypoints':
kpts = ph.get_plotdat(ax, 'kpts', [])
if len(kpts) == 0:
logger.info('...nokpts')
else:
logger.info('...nearest')
x, y = event.xdata, event.ydata
import vtool as vt
fx = vt.nearest_point(x, y, kpts)[0]
_select_ith_kpt(fx)
elif viztype == 'warped':
hs_fx = ph.get_plotdat(ax, 'fx', None)
# kpts = ph.get_plotdat(ax, 'kpts', [])
if hs_fx is not None:
# Ugly. Interactions should be changed to classes.
kp = self.kpts[hs_fx] # FIXME
sift = self.vecs[hs_fx]
df2.draw_keypoint_gradient_orientations(
chip, kp, sift=sift, mode='vec', fnum=df2.next_fnum()
)
elif viztype.startswith('colorbar'):
pass
# Hack to get a specific scoring feature
# sortx = self.fs.argsort()
# idx = np.clip(int(np.round(y * len(sortx))), 0, len(sortx) - 1)
# mx = sortx[idx]
# (fx1, fx2) = self.fm[mx]
# (fx1, fx2) = self.fm[mx]
# logger.info('... selected score at rank idx=%r' % (idx,))
# logger.info('... selected score with fs=%r' % (self.fs[mx],))
# logger.info('... resolved to mx=%r' % mx)
# logger.info('... fx1, fx2 = %r, %r' % (fx1, fx2,))
# self.select_ith_match(mx)
else:
logger.info('...unhandled')
ph.draw()
# Draw without keypoints the first time
_viz_keypoints(fnum, **kwargs) # MAYBE: remove kwargs
ih.connect_callback(fig, 'button_press_event', _on_keypoints_click)
if not nodraw:
ph.draw()
| [((12, 24, 12, 44), 'utool.inject2', 'ut.inject2', ({(12, 35, 12, 43): '__name__'}, {}), '(__name__)', True, 'import utool as ut\n'), ((13, 9, 13, 34), 'logging.getLogger', 'logging.getLogger', ({(13, 27, 13, 33): '"""wbia"""'}, {}), "('wbia')", False, 'import logging\n'), ((132, 10, 132, 48), 'wbia.plottool.interact_helpers.begin_interaction', 'ih.begin_interaction', ({(132, 31, 132, 41): '"""keypoint"""', (132, 43, 132, 47): 'fnum'}, {}), "('keypoint', fnum)", True, 'from wbia.plottool import interact_helpers as ih\n'), ((135, 11, 135, 25), 'utool.DynStruct', 'ut.DynStruct', ({}, {}), '()', True, 'import utool as ut\n'), ((209, 4, 209, 71), 'wbia.plottool.interact_helpers.connect_callback', 'ih.connect_callback', ({(209, 24, 209, 27): 'fig', (209, 29, 209, 49): '"""button_press_event"""', (209, 51, 209, 70): '_on_keypoints_click'}, {}), "(fig, 'button_press_event', _on_keypoints_click)", True, 'from wbia.plottool import interact_helpers as ih\n'), ((48, 15, 48, 35), 'wbia.plottool.ensure_fnum', 'pt.ensure_fnum', ({(48, 30, 48, 34): 'fnum'}, {}), '(fnum)', True, 'import wbia.plottool as pt\n'), ((49, 8, 49, 52), 'wbia.plottool.figure', 'pt.figure', (), '', True, 'import wbia.plottool as pt\n'), ((50, 8, 50, 76), 'wbia.plottool.viz_keypoints.show_keypoints', 'show_keypoints', (), '', False, 'from wbia.plottool.viz_keypoints import show_keypoints\n'), ((62, 8, 62, 81), 'wbia.plottool.viz_featrow.draw_feat_row', 'draw_feat_row', ({(62, 22, 62, 31): 'self.chip', (62, 33, 62, 35): 'fx', (62, 37, 62, 39): 'kp', (62, 41, 62, 45): 'sift', (62, 47, 62, 56): 'self.fnum', (62, 58, 62, 63): 'nRows', (62, 65, 62, 70): 'nCols', (62, 72, 62, 74): 'px', (62, 76, 62, 80): 'None'}, {}), '(self.chip, fx, kp, sift, self.fnum, nRows, nCols, px, None)', False, 'from wbia.plottool.viz_featrow import draw_feat_row\n'), ((75, 18, 75, 53), 'wbia.plottool.plot_helpers.get_plotdat', 'ph.get_plotdat', ({(75, 33, 75, 35): 'ax', (75, 37, 75, 46): '"""viztype"""', (75, 48, 75, 52): 'None'}, {}), "(ax, 'viztype', None)", True, 'from wbia.plottool import plot_helpers as ph\n'), ((131, 15, 131, 30), 'vtool.imread', 'vt.imread', ({(131, 25, 131, 29): 'chip'}, {}), '(chip)', True, 'import vtool as vt\n'), ((148, 8, 148, 71), 'wbia.plottool.viz_featrow.draw_feat_row', 'draw_feat_row', ({(148, 22, 148, 26): 'chip', (148, 28, 148, 30): 'fx', (148, 32, 148, 34): 'kp', (148, 36, 148, 40): 'sift', (148, 42, 148, 46): 'fnum', (148, 48, 148, 53): 'nRows', (148, 55, 148, 60): 'nCols', (148, 62, 148, 64): 'px', (148, 66, 148, 70): 'None'}, {}), '(chip, fx, kp, sift, fnum, nRows, nCols, px, None)', False, 'from wbia.plottool.viz_featrow import draw_feat_row\n'), ((152, 8, 152, 66), 'wbia.plottool.viz_keypoints.show_keypoints', 'show_keypoints', (), '', False, 'from wbia.plottool.viz_keypoints import show_keypoints\n'), ((205, 8, 205, 17), 'wbia.plottool.plot_helpers.draw', 'ph.draw', ({}, {}), '()', True, 'from wbia.plottool import plot_helpers as ph\n'), ((211, 8, 211, 17), 'wbia.plottool.plot_helpers.draw', 'ph.draw', ({}, {}), '()', True, 'from wbia.plottool import plot_helpers as ph\n'), ((52, 12, 52, 42), 'wbia.plottool.set_figtitle', 'pt.set_figtitle', ({(52, 28, 52, 41): 'self.figtitle'}, {}), '(self.figtitle)', True, 'import wbia.plottool as pt\n'), ((167, 22, 167, 57), 'wbia.plottool.plot_helpers.get_plotdat', 'ph.get_plotdat', ({(167, 37, 167, 39): 'ax', (167, 41, 167, 50): '"""viztype"""', (167, 52, 167, 56): 'None'}, {}), "(ax, 'viztype', None)", True, 'from wbia.plottool import plot_helpers as ph\n'), ((80, 19, 80, 49), 'wbia.plottool.plot_helpers.get_plotdat', 'ph.get_plotdat', ({(80, 34, 80, 36): 'ax', (80, 38, 80, 44): '"""kpts"""', (80, 46, 80, 48): '[]'}, {}), "(ax, 'kpts', [])", True, 'from wbia.plottool import plot_helpers as ph\n'), ((170, 23, 170, 53), 'wbia.plottool.plot_helpers.get_plotdat', 'ph.get_plotdat', ({(170, 38, 170, 40): 'ax', (170, 42, 170, 48): '"""kpts"""', (170, 50, 170, 52): '[]'}, {}), "(ax, 'kpts', [])", True, 'from wbia.plottool import plot_helpers as ph\n'), ((91, 20, 91, 50), 'wbia.plottool.plot_helpers.get_plotdat', 'ph.get_plotdat', ({(91, 35, 91, 37): 'ax', (91, 39, 91, 43): '"""fx"""', (91, 45, 91, 49): 'None'}, {}), "(ax, 'fx', None)", True, 'from wbia.plottool import plot_helpers as ph\n'), ((181, 24, 181, 54), 'wbia.plottool.plot_helpers.get_plotdat', 'ph.get_plotdat', ({(181, 39, 181, 41): 'ax', (181, 43, 181, 47): '"""fx"""', (181, 49, 181, 53): 'None'}, {}), "(ax, 'fx', None)", True, 'from wbia.plottool import plot_helpers as ph\n'), ((88, 21, 88, 49), 'vtool.nearest_point', 'vt.nearest_point', ({(88, 38, 88, 39): 'x', (88, 41, 88, 42): 'y', (88, 44, 88, 48): 'kpts'}, {}), '(x, y, kpts)', True, 'import vtool as vt\n'), ((98, 16, 98, 25), 'wbia.plottool.draw', 'pt.draw', ({}, {}), '()', True, 'import wbia.plottool as pt\n'), ((178, 25, 178, 53), 'vtool.nearest_point', 'vt.nearest_point', ({(178, 42, 178, 43): 'x', (178, 45, 178, 46): 'y', (178, 48, 178, 52): 'kpts'}, {}), '(x, y, kpts)', True, 'import vtool as vt\n'), ((96, 63, 96, 77), 'wbia.plottool.next_fnum', 'pt.next_fnum', ({}, {}), '()', True, 'import wbia.plottool as pt\n')] |
DronMDF/manabot | tb/storage/__init__.py | b412e8cb9b5247f05487bed4cbf4967f7b58327f | from .database import StDatabase
from .telegram import StTelegram
from .tinydb import TinyDataBase, TinySelect
from .utility import StDispatch
| [] |
hbqdev/algorithms | algorithms/maths/chinese_remainder_theorem.py | 65cc8551d86d7e065069d165dd8bf9baf10345a0 | from algorithms.maths.gcd import gcd
from typing import List
def solve_chinese_remainder(num : List[int], rem : List[int]):
"""
Computes the smallest x that satisfies the chinese remainder theorem
for a system of equations.
The system of equations has the form:
x % num[0] = rem[0]
x % num[1] = rem[1]
...
x % num[k - 1] = rem[k - 1]
Where k is the number of elements in num and rem, k > 0.
All numbers in num needs to be pariwise coprime otherwise an exception is raised
returns x: the smallest value for x that satisfies the system of equations
"""
if not len(num) == len(rem):
raise Exception("num and rem should have equal length")
if not len(num) > 0:
raise Exception("Lists num and rem need to contain at least one element")
for n in num:
if not n > 1:
raise Exception("All numbers in num needs to be > 1")
if not _check_coprime(num):
raise Exception("All pairs of numbers in num are not coprime")
k = len(num)
x = 1
while True:
i = 0
while i < k:
if x % num[i] != rem[i]:
break
i += 1
if i == k:
return x
else:
x += 1
def _check_coprime(l : List[int]):
for i in range(len(l)):
for j in range(len(l)):
if i == j:
continue
if gcd(l[i], l[j]) != 1:
return False
return True
| [((44, 15, 44, 30), 'algorithms.maths.gcd.gcd', 'gcd', ({(44, 19, 44, 23): 'l[i]', (44, 25, 44, 29): 'l[j]'}, {}), '(l[i], l[j])', False, 'from algorithms.maths.gcd import gcd\n')] |
Jette16/spacy-course | exercises/ja/exc_03_16_01.py | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | import spacy
nlp = spacy.load("ja_core_news_sm")
text = (
"チックフィレイはジョージア州カレッジパークに本社を置く、"
"チキンサンドを専門とするアメリカのファストフードレストランチェーンです。"
)
# トークナイズのみ行う
doc = nlp(text)
print([token.text for token in doc])
| [((3, 6, 3, 35), 'spacy.load', 'spacy.load', ({(3, 17, 3, 34): '"""ja_core_news_sm"""'}, {}), "('ja_core_news_sm')", False, 'import spacy\n')] |
caiyueliang/chineseocr | apphelper/image.py | 4495598f938936c6bcb2222fa44f840a7919212c | # -*- coding: utf-8 -*-
"""
##图像相关函数
@author: lywen
"""
import sys
import six
import os
import base64
import requests
import numpy as np
import cv2
from PIL import Image
import traceback
import uuid
from glob import glob
from bs4 import BeautifulSoup
def sort_box_(box):
x1,y1,x2,y2,x3,y3,x4,y4 = box[:8]
pts = (x1,y1),(x2,y2),(x3,y3),(x4,y4)
pts = np.array(pts, dtype="float32")
(x1,y1),(x2,y2),(x3,y3),(x4,y4) = _order_points(pts)
"""
newBox = [[x1,y1],[x2,y2],[x3,y3],[x4,y4]]
## sort x
newBox = sorted(newBox,key=lambda x:x[0])
x1,y1 = sorted(newBox[:2],key=lambda x:x[1])[0]
index = newBox.index([x1,y1])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[1])
x4,y4 = sorted(newBox[:2],key=lambda x:x[0])[0]
index = newBox.index([x4,y4])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[0])
x2,y2 = sorted(newBox[:2],key=lambda x:x[1])[0]
index = newBox.index([x2,y2])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[1])
x3,y3 = sorted(newBox[:2],key=lambda x:x[0])[0]
"""
return x1,y1,x2,y2,x3,y3,x4,y4
import numpy as np
from scipy.spatial import distance as dist
def _order_points(pts):
# 根据x坐标对点进行排序
"""
---------------------
作者:Tong_T
来源:CSDN
原文:https://blog.csdn.net/Tong_T/article/details/81907132
版权声明:本文为博主原创文章,转载请附上博文链接!
"""
x_sorted = pts[np.argsort(pts[:, 0]), :]
# 从排序中获取最左侧和最右侧的点
# x坐标点
left_most = x_sorted[:2, :]
right_most = x_sorted[2:, :]
# 现在,根据它们的y坐标对最左边的坐标进行排序,这样我们就可以分别抓住左上角和左下角
left_most = left_most[np.argsort(left_most[:, 1]), :]
(tl, bl) = left_most
# 现在我们有了左上角坐标,用它作为锚来计算左上角和右上角之间的欧氏距离;
# 根据毕达哥拉斯定理,距离最大的点将是我们的右下角
distance = dist.cdist(tl[np.newaxis], right_most, "euclidean")[0]
(br, tr) = right_most[np.argsort(distance)[::-1], :]
# 返回左上角,右上角,右下角和左下角的坐标
return np.array([tl, tr, br, bl], dtype="float32")
def solve(box):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x = cx-w/2
y = cy-h/2
x1-cx = -w/2*cos(angle) +h/2*sin(angle)
y1 -cy= -w/2*sin(angle) -h/2*cos(angle)
h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle)
w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle)
(hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy)
"""
x1,y1,x2,y2,x3,y3,x4,y4= box[:8]
cx = (x1+x3+x2+x4)/4.0
cy = (y1+y3+y4+y2)/4.0
w = (np.sqrt((x2-x1)**2+(y2-y1)**2)+np.sqrt((x3-x4)**2+(y3-y4)**2))/2
h = (np.sqrt((x2-x3)**2+(y2-y3)**2)+np.sqrt((x1-x4)**2+(y1-y4)**2))/2
#x = cx-w/2
#y = cy-h/2
sinA = (h*(x1-cx)-w*(y1 -cy))*1.0/(h*h+w*w)*2
if abs(sinA)>1:
angle = None
else:
angle = np.arcsin(sinA)
return angle,w,h,cx,cy
def read_singLine_for_yolo(p):
"""
单行文本
"""
im = Image.open(p).convert('RGB')
w,h = im.size
boxes = [{'cx':w/2,'cy':h/2,'w':w,'h':h,'angle':0.0}]
return im,boxes
def read_voc_xml(p):
##读取voc xml 文件
boxes = []
if os.path.exists(p):
with open(p) as f:
xmlString = f.read()
xmlString = BeautifulSoup(xmlString,'lxml')
objList = xmlString.findAll('object')
for obj in objList:
robndbox = obj.find('robndbox')
bndbox = obj.find('bndbox')
if robndbox is not None and bndbox is None:
cx = np.float(robndbox.find('cx').text)
cy = np.float(robndbox.find('cy').text)
w = np.float(robndbox.find('w').text)
h = np.float(robndbox.find('h').text)
angle = robndbox.find('angle').text
if angle=='nan' or h==0 or w==0:
#boxes = []
continue
angle = np.float(angle)
if abs(angle)>np.pi/2:
w,h = h,w
angle = abs(angle)%(np.pi/2)*np.sign(angle)
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle)
x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4])
"""
if abs(angle)>np.pi/2:
##lableImg bug
x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4])
"""
angle,w,h,cx,cy = solve([x1,y1,x2,y2,x3,y3,x4,y4])
else:
xmin = np.float(bndbox.find('xmin').text)
xmax = np.float(bndbox.find('xmax').text)
ymin = np.float(bndbox.find('ymin').text)
ymax = np.float(bndbox.find('ymax').text)
cx = (xmin+xmax)/2.0
cy = (ymin+ymax)/2.0
w = (-xmin+xmax)#/2.0
h = (-ymin+ymax)#/2.0
angle =0.0
boxes.append({'cx':cx,'cy':cy,'w':w,'h':h,'angle':angle})
return boxes
def xy_rotate_box(cx,cy,w,h,angle):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*sin(angle)+cy
"""
cx = float(cx)
cy = float(cy)
w = float(w)
h = float(h)
angle = float(angle)
x1,y1 = rotate(cx-w/2,cy-h/2,angle,cx,cy)
x2,y2 = rotate(cx+w/2,cy-h/2,angle,cx,cy)
x3,y3 = rotate(cx+w/2,cy+h/2,angle,cx,cy)
x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy)
return x1,y1,x2,y2,x3,y3,x4,y4
from numpy import cos,sin,pi,tan
def rotate(x,y,angle,cx,cy):
"""
点(x,y) 绕(cx,cy)点旋转
"""
#angle = angle*pi/180
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*cos(angle)+cy
return x_new,y_new
def resize_box(boxes,scale):
newBoxes = []
for box in boxes:
cx = box['cx']*scale
cy = box['cy']*scale
w = box['w']*scale
h = box['h']*scale
angle = box['angle']
newBoxes.append({'cx':cx,'cy':cy,'w':w,'h':h,'angle':angle})
return newBoxes
def resize_im(w,h, scale=416, max_scale=608):
f=float(scale)/min(h, w)
if max_scale is not None:
if f*max(h, w)>max_scale:
f=float(max_scale)/max(h, w)
newW,newH = int(w*f),int(h*f)
return newW-(newW%32),newH-(newH%32)
def get_rorate(boxes,im,degree=0):
"""
获取旋转角度后的box及im
"""
imgW,imgH = im.size
newBoxes = []
for line in boxes:
cx0,cy0 = imgW/2.0,imgH/2.0
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(**line)
x1,y1 = rotate(x1,y1,-degree/180*np.pi,cx0,cy0)
x2,y2 = rotate(x2,y2,-degree/180*np.pi,cx0,cy0)
x3,y3 = rotate(x3,y3,-degree/180*np.pi,cx0,cy0)
x4,y4 = rotate(x4,y4,-degree/180*np.pi,cx0,cy0)
box = (x1,y1,x2,y2,x3,y3,x4,y4)
degree_,w_,h_,cx_,cy_ = solve(box)
newLine = {'angle':degree_,'w':w_,'h':h_,'cx':cx_,'cy':cy_}
newBoxes.append(newLine)
return im.rotate(degree,center=(imgW/2.0,imgH/2.0 )),newBoxes
def letterbox_image(image, size,fillValue=[128,128,128]):
'''
resize image with unchanged aspect ratio using padding
'''
image_w, image_h = image.size
w, h = size
new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
resized_image = image.resize((new_w,new_h), Image.BICUBIC)
if fillValue is None:
fillValue = [int(x.mean()) for x in cv2.split(np.array(im))]
boxed_image = Image.new('RGB', size, tuple(fillValue))
boxed_image.paste(resized_image,)
return boxed_image,new_w/image_w
def box_split(boxes,splitW = 15):
newBoxes = []
for box in boxes:
w = box['w']
h = box['h']
cx = box['cx']
cy=box['cy']
angle = box['angle']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle)
splitBoxes =[]
i = 1
tanAngle = tan(-angle)
while True:
flag = 0 if i==1 else 1
xmin = x1+(i-1)*splitW
ymin = y1-tanAngle*splitW*i
xmax = x1+i*splitW
ymax = y4-(i-1)*tanAngle*splitW +flag*tanAngle*(x4-x1)
if xmax>max(x2,x3) and xmin>max(x2,x3):
break
splitBoxes.append([int(xmin),int(ymin),int(xmax),int(ymax)])
i+=1
newBoxes.append(splitBoxes)
return newBoxes
def get_box_spilt(boxes,im,sizeW,SizeH,splitW=8,isRoate=False,rorateDegree=0):
"""
isRoate:是否旋转box
"""
size = sizeW,SizeH
if isRoate:
##旋转box
im,boxes = get_rorate(boxes,im,degree=rorateDegree)
newIm,f = letterbox_image(im, size)
newBoxes = resize_box(boxes,f)
newBoxes = sum(box_split(newBoxes,splitW),[])
newBoxes = [box+[1] for box in newBoxes]
return newBoxes,newIm
def box_rotate(box,angle=0,imgH=0,imgW=0):
"""
对坐标进行旋转 逆时针方向 0\90\180\270,
"""
x1,y1,x2,y2,x3,y3,x4,y4 = box[:8]
if angle==90:
x1_,y1_ = y2,imgW-x2
x2_,y2_ = y3,imgW-x3
x3_,y3_ = y4,imgW-x4
x4_,y4_ = y1,imgW-x1
elif angle==180:
x1_,y1_ = imgW-x3,imgH-y3
x2_,y2_ = imgW-x4,imgH-y4
x3_,y3_ = imgW-x1,imgH-y1
x4_,y4_ = imgW-x2,imgH-y2
elif angle==270:
x1_,y1_ = imgH-y4,x4
x2_,y2_ = imgH-y1,x1
x3_,y3_ = imgH-y2,x2
x4_,y4_ = imgH-y3,x3
else:
x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_ = x1,y1,x2,y2,x3,y3,x4,y4
return (x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_)
def solve(box):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x = cx-w/2
y = cy-h/2
x1-cx = -w/2*cos(angle) +h/2*sin(angle)
y1 -cy= -w/2*sin(angle) -h/2*cos(angle)
h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle)
w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle)
(hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy)
"""
x1,y1,x2,y2,x3,y3,x4,y4= box[:8]
cx = (x1+x3+x2+x4)/4.0
cy = (y1+y3+y4+y2)/4.0
w = (np.sqrt((x2-x1)**2+(y2-y1)**2)+np.sqrt((x3-x4)**2+(y3-y4)**2))/2
h = (np.sqrt((x2-x3)**2+(y2-y3)**2)+np.sqrt((x1-x4)**2+(y1-y4)**2))/2
sinA = (h*(x1-cx)-w*(y1 -cy))*1.0/(h*h+w*w)*2
angle = np.arcsin(sinA)
return angle,w,h,cx,cy
from numpy import cos,sin,pi
def rotate(x,y,angle,cx,cy):
angle = angle#*pi/180
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*cos(angle)+cy
return x_new,y_new
def xy_rotate_box(cx,cy,w,h,angle):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*sin(angle)+cy
"""
cx = float(cx)
cy = float(cy)
w = float(w)
h = float(h)
angle = float(angle)
x1,y1 = rotate(cx-w/2,cy-h/2,angle,cx,cy)
x2,y2 = rotate(cx+w/2,cy-h/2,angle,cx,cy)
x3,y3 = rotate(cx+w/2,cy+h/2,angle,cx,cy)
x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy)
return x1,y1,x2,y2,x3,y3,x4,y4
# def rotate_cut_img(im, degree, box, w, h, leftAdjust=False, rightAdjust=False, alph=0.2):
# x1, y1, x2, y2, x3, y3, x4, y4 = box[:8]
# # print('rotate_cut_img', x1, y1, x2, y2, x3, y3, x4, y4)
#
# x_center, y_center = np.mean([x1, x2, x3, x4]), np.mean([y1, y2, y3, y4])
# right = 0
# left = 0
# if rightAdjust:
# right = 1
# if leftAdjust:
# left = 1
#
# # print(im.shape)
# box = (max(1, x_center - w / 2 - left * alph * (w / 2)), # xmin
# y_center - h / 2, # ymin
# min(x_center + w / 2 + right * alph * (w / 2), im.shape[1] - 1), # xmax
# y_center + h / 2) # ymax
# # print('box', box)
#
# newW = int(box[2] - box[0])
# newH = int(box[3] - box[1])
#
# # =====================================================
# # remap_points = np.array([[0, 0], [164, 0], [164, 48], [0, 48]], dtype=np.float32)
# remap_points = np.array([[0, 0], [newW, 0], [newW, newH], [0, newH]], dtype=np.float32)
# old_points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], dtype=np.float32)
# # 透视变换:用到opencv函数
# M = cv2.getPerspectiveTransform(old_points, remap_points)
# tmpImg = cv2.warpPerspective(im, M, (newW, newH))
# # cv2.imshow('rotate_cut_img', tmpImg)
# # cv2.waitKey(0)
#
# return tmpImg, newW, newH
def rotate_cut_img(im, degree, box, w, h, leftAdjust=False, rightAdjust=False, alph=0.2):
x1, y1, x2, y2, x3, y3, x4, y4 = box[:8]
x_center, y_center = np.mean([x1, x2, x3, x4]), np.mean([y1, y2, y3, y4])
degree_ = degree * 180.0 / np.pi
right = 0
left = 0
if rightAdjust:
right = 1
if leftAdjust:
left = 1
box = (max(1, x_center - w / 2 - left * alph * (w / 2)), # xmin
y_center - h / 2, # ymin
min(x_center + w / 2 + right * alph * (w / 2), im.size[0] - 1), # xmax
y_center + h / 2) # ymax
newW = box[2] - box[0]
newH = box[3] - box[1]
tmpImg = im.rotate(degree_, center=(x_center, y_center)).crop(box)
return tmpImg, newW, newH
def letterbox_image(image, size, fillValue=[128, 128, 128]):
'''resize image with unchanged aspect ratio using padding'''
image_w, image_h = image.size
w, h = size
new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
resized_image = image.resize((new_w,new_h), Image.BICUBIC)
if fillValue is None:
fillValue = [int(x.mean()) for x in cv2.split(np.array(im))]
boxed_image = Image.new('RGB', size, tuple(fillValue))
boxed_image.paste(resized_image, (0,0))
return boxed_image,new_w/image_w
from scipy.ndimage import filters,interpolation,morphology,measurements,minimum
#from pylab import amin, amax
from numpy import amin, amax
def estimate_skew_angle(raw):
"""
估计图像文字角度
"""
def resize_im(im, scale, max_scale=None):
f=float(scale)/min(im.shape[0], im.shape[1])
if max_scale!=None and f*max(im.shape[0], im.shape[1])>max_scale:
f=float(max_scale)/max(im.shape[0], im.shape[1])
return cv2.resize(im, (0, 0), fx=f, fy=f)
raw = resize_im(raw, scale=600, max_scale=900)
image = raw-amin(raw)
image = image/amax(image)
m = interpolation.zoom(image,0.5)
m = filters.percentile_filter(m,80,size=(20,2))
m = filters.percentile_filter(m,80,size=(2,20))
m = interpolation.zoom(m,1.0/0.5)
w,h = min(image.shape[1],m.shape[1]),min(image.shape[0],m.shape[0])
flat = np.clip(image[:h,:w]-m[:h,:w]+1,0,1)
d0,d1 = flat.shape
o0,o1 = int(0.1*d0),int(0.1*d1)
flat = amax(flat)-flat
flat -= amin(flat)
est = flat[o0:d0-o0,o1:d1-o1]
angles = range(-15,15)
estimates = []
for a in angles:
roest =interpolation.rotate(est,a,order=0,mode='constant')
v = np.mean(roest,axis=1)
v = np.var(v)
estimates.append((v,a))
_,a = max(estimates)
return a
def sort_box(box):
"""
对box排序,及页面进行排版
box[index, 0] = x1
box[index, 1] = y1
box[index, 2] = x2
box[index, 3] = y2
box[index, 4] = x3
box[index, 5] = y3
box[index, 6] = x4
box[index, 7] = y4
"""
box = sorted(box,key=lambda x:sum([x[1],x[3],x[5],x[7]]))
return list(box)
def get_boxes( bboxes):
"""
boxes: bounding boxes
"""
text_recs=np.zeros((len(bboxes), 8), np.int)
index = 0
for box in bboxes:
b1 = box[6] - box[7] / 2
b2 = box[6] + box[7] / 2
x1 = box[0]
y1 = box[5] * box[0] + b1
x2 = box[2]
y2 = box[5] * box[2] + b1
x3 = box[0]
y3 = box[5] * box[0] + b2
x4 = box[2]
y4 = box[5] * box[2] + b2
disX = x2 - x1
disY = y2 - y1
width = np.sqrt(disX*disX + disY*disY)
fTmp0 = y3 - y1
fTmp1 = fTmp0 * disY / width
x = np.fabs(fTmp1*disX / width)
y = np.fabs(fTmp1*disY / width)
if box[5] < 0:
x1 -= x
y1 += y
x4 += x
y4 -= y
else:
x2 += x
y2 += y
x3 -= x
y3 -= y
text_recs[index, 0] = x1
text_recs[index, 1] = y1
text_recs[index, 2] = x2
text_recs[index, 3] = y2
text_recs[index, 4] = x3
text_recs[index, 5] = y3
text_recs[index, 6] = x4
text_recs[index, 7] = y4
index = index + 1
return text_recs
def union_rbox(result,alpha=0.1):
"""
按行合并box
"""
def diff(box1,box2):
"""
计算box1,box2之间的距离
"""
cy1 = box1['cy']
cy2 = box2['cy']
h1 = box1['h']
h2 = box2['h']
return abs(cy1-cy2)/max(0.01,min(h1/2,h2/2))
def sort_group_box(boxes):
"""
对box进行排序, 并合并box
"""
N = len(boxes)
boxes = sorted(boxes,key=lambda x:x['cx'])
text = ' '.join([bx['text'] for bx in boxes])
box4 = np.zeros((N,8))
for i in range(N):
cx =boxes[i]['cx']
cy = boxes[i]['cy']
degree =boxes[i]['degree']
w = boxes[i]['w']
h = boxes[i]['h']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree/180*np.pi)
box4[i] = [x1,y1,x2,y2,x3,y3,x4,y4]
x1 = box4[:,0].min()
y1 = box4[:,1].min()
x2 = box4[:,2].max()
y2 = box4[:,3].min()
x3 = box4[:,4].max()
y3 = box4[:,5].max()
x4 = box4[:,6].min()
y4 = box4[:,7].max()
angle,w,h,cx,cy = solve([x1,y1,x2,y2,x3,y3,x4,y4])
return {'text':text,'cx':cx,'cy':cy,'w':w,'h':h,'degree':angle/np.pi*180}
newBox = []
for line in result:
if len(newBox)==0:
newBox.append([line])
else:
check=False
for box in newBox[-1]:
if diff(line,box)>alpha:
check = True
if not check:
newBox[-1].append(line)
else:
newBox.append([line])
newBox = [sort_group_box(bx) for bx in newBox]
return newBox
def adjust_box_to_origin(img,angle, result):
"""
调整box到原图坐标
"""
h,w = img.shape[:2]
if angle in [90,270]:
imgW,imgH = img.shape[:2]
else:
imgH,imgW= img.shape[:2]
newresult = []
for line in result:
cx =line['box']['cx']
cy = line['box']['cy']
degree =line['box']['angle']
w = line['box']['w']
h = line['box']['h']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree/180*np.pi)
x1,y1,x2,y2,x3,y3,x4,y4 = box_rotate([x1,y1,x2,y2,x3,y3,x4,y4],angle=(360-angle)%360,imgH=imgH,imgW=imgW)
box = x1,y1,x2,y2,x3,y3,x4,y4
newresult.append({'name':line['name'],'text':line['text'],'box':box})
return newresult | [((22, 10, 22, 40), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((74, 11, 74, 54), 'numpy.array', 'np.array', (), '', True, 'import numpy as np\n'), ((118, 7, 118, 24), 'os.path.exists', 'os.path.exists', ({(118, 22, 118, 23): 'p'}, {}), '(p)', False, 'import os\n'), ((350, 13, 350, 28), 'numpy.arcsin', 'np.arcsin', ({(350, 23, 350, 27): 'sinA'}, {}), '(sinA)', True, 'import numpy as np\n'), ((470, 8, 470, 37), 'scipy.ndimage.interpolation.zoom', 'interpolation.zoom', ({(470, 27, 470, 32): 'image', (470, 33, 470, 36): '0.5'}, {}), '(image, 0.5)', False, 'from scipy.ndimage import filters, interpolation, morphology, measurements, minimum\n'), ((471, 8, 471, 51), 'scipy.ndimage.filters.percentile_filter', 'filters.percentile_filter', (), '', False, 'from scipy.ndimage import filters, interpolation, morphology, measurements, minimum\n'), ((472, 8, 472, 51), 'scipy.ndimage.filters.percentile_filter', 'filters.percentile_filter', (), '', False, 'from scipy.ndimage import filters, interpolation, morphology, measurements, minimum\n'), ((473, 8, 473, 37), 'scipy.ndimage.interpolation.zoom', 'interpolation.zoom', ({(473, 27, 473, 28): 'm', (473, 29, 473, 36): '1.0 / 0.5'}, {}), '(m, 1.0 / 0.5)', False, 'from scipy.ndimage import filters, interpolation, morphology, measurements, minimum\n'), ((476, 11, 476, 47), 'numpy.clip', 'np.clip', ({(476, 19, 476, 42): 'image[:h, :w] - m[:h, :w] + 1', (476, 43, 476, 44): '0', (476, 45, 476, 46): '1'}, {}), '(image[:h, :w] - m[:h, :w] + 1, 0, 1)', True, 'import numpy as np\n'), ((480, 12, 480, 22), 'numpy.amin', 'amin', ({(480, 17, 480, 21): 'flat'}, {}), '(flat)', False, 'from numpy import amin, amax\n'), ((70, 15, 70, 66), 'scipy.spatial.distance.cdist', 'dist.cdist', ({(70, 26, 70, 40): 'tl[np.newaxis]', (70, 42, 70, 52): 'right_most', (70, 54, 70, 65): '"""euclidean"""'}, {}), "(tl[np.newaxis], right_most, 'euclidean')", True, 'from scipy.spatial import distance as dist\n'), ((103, 16, 103, 31), 'numpy.arcsin', 'np.arcsin', ({(103, 26, 103, 30): 'sinA'}, {}), '(sinA)', True, 'import numpy as np\n'), ((121, 20, 121, 51), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(121, 34, 121, 43): 'xmlString', (121, 44, 121, 50): '"""lxml"""'}, {}), "(xmlString, 'lxml')", False, 'from bs4 import BeautifulSoup\n'), ((266, 19, 266, 30), 'numpy.tan', 'tan', ({(266, 23, 266, 29): '-angle'}, {}), '(-angle)', False, 'from numpy import cos, sin, pi, tan\n'), ((419, 25, 419, 50), 'numpy.mean', 'np.mean', ({(419, 33, 419, 49): '[x1, x2, x3, x4]'}, {}), '([x1, x2, x3, x4])', True, 'import numpy as np\n'), ((419, 52, 419, 77), 'numpy.mean', 'np.mean', ({(419, 60, 419, 76): '[y1, y2, y3, y4]'}, {}), '([y1, y2, y3, y4])', True, 'import numpy as np\n'), ((465, 15, 465, 49), 'cv2.resize', 'cv2.resize', (), '', False, 'import cv2\n'), ((468, 16, 468, 25), 'numpy.amin', 'amin', ({(468, 21, 468, 24): 'raw'}, {}), '(raw)', False, 'from numpy import amin, amax\n'), ((469, 18, 469, 29), 'numpy.amax', 'amax', ({(469, 23, 469, 28): 'image'}, {}), '(image)', False, 'from numpy import amin, amax\n'), ((479, 11, 479, 21), 'numpy.amax', 'amax', ({(479, 16, 479, 20): 'flat'}, {}), '(flat)', False, 'from numpy import amin, amax\n'), ((485, 15, 485, 66), 'scipy.ndimage.interpolation.rotate', 'interpolation.rotate', (), '', False, 'from scipy.ndimage import filters, interpolation, morphology, measurements, minimum\n'), ((486, 12, 486, 33), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((487, 12, 487, 21), 'numpy.var', 'np.var', ({(487, 19, 487, 20): 'v'}, {}), '(v)', True, 'import numpy as np\n'), ((534, 16, 534, 46), 'numpy.sqrt', 'np.sqrt', ({(534, 24, 534, 45): 'disX * disX + disY * disY'}, {}), '(disX * disX + disY * disY)', True, 'import numpy as np\n'), ((537, 12, 537, 39), 'numpy.fabs', 'np.fabs', ({(537, 20, 537, 38): 'fTmp1 * disX / width'}, {}), '(fTmp1 * disX / width)', True, 'import numpy as np\n'), ((538, 12, 538, 39), 'numpy.fabs', 'np.fabs', ({(538, 20, 538, 38): 'fTmp1 * disY / width'}, {}), '(fTmp1 * disY / width)', True, 'import numpy as np\n'), ((586, 15, 586, 30), 'numpy.zeros', 'np.zeros', ({(586, 24, 586, 29): '(N, 8)'}, {}), '((N, 8))', True, 'import numpy as np\n'), ((94, 10, 94, 40), 'numpy.sqrt', 'np.sqrt', ({(94, 18, 94, 39): '((x2 - x1) ** 2 + (y2 - y1) ** 2)'}, {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)', True, 'import numpy as np\n'), ((94, 41, 94, 71), 'numpy.sqrt', 'np.sqrt', ({(94, 49, 94, 70): '((x3 - x4) ** 2 + (y3 - y4) ** 2)'}, {}), '((x3 - x4) ** 2 + (y3 - y4) ** 2)', True, 'import numpy as np\n'), ((95, 10, 95, 40), 'numpy.sqrt', 'np.sqrt', ({(95, 18, 95, 39): '((x2 - x3) ** 2 + (y2 - y3) ** 2)'}, {}), '((x2 - x3) ** 2 + (y2 - y3) ** 2)', True, 'import numpy as np\n'), ((95, 41, 95, 71), 'numpy.sqrt', 'np.sqrt', ({(95, 49, 95, 70): '((x1 - x4) ** 2 + (y1 - y4) ** 2)'}, {}), '((x1 - x4) ** 2 + (y1 - y4) ** 2)', True, 'import numpy as np\n'), ((110, 9, 110, 22), 'PIL.Image.open', 'Image.open', ({(110, 20, 110, 21): 'p'}, {}), '(p)', False, 'from PIL import Image\n'), ((346, 10, 346, 40), 'numpy.sqrt', 'np.sqrt', ({(346, 18, 346, 39): '((x2 - x1) ** 2 + (y2 - y1) ** 2)'}, {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)', True, 'import numpy as np\n'), ((346, 41, 346, 71), 'numpy.sqrt', 'np.sqrt', ({(346, 49, 346, 70): '((x3 - x4) ** 2 + (y3 - y4) ** 2)'}, {}), '((x3 - x4) ** 2 + (y3 - y4) ** 2)', True, 'import numpy as np\n'), ((347, 10, 347, 40), 'numpy.sqrt', 'np.sqrt', ({(347, 18, 347, 39): '((x2 - x3) ** 2 + (y2 - y3) ** 2)'}, {}), '((x2 - x3) ** 2 + (y2 - y3) ** 2)', True, 'import numpy as np\n'), ((347, 41, 347, 71), 'numpy.sqrt', 'np.sqrt', ({(347, 49, 347, 70): '((x1 - x4) ** 2 + (y1 - y4) ** 2)'}, {}), '((x1 - x4) ** 2 + (y1 - y4) ** 2)', True, 'import numpy as np\n'), ((57, 19, 57, 40), 'numpy.argsort', 'np.argsort', ({(57, 30, 57, 39): 'pts[:, (0)]'}, {}), '(pts[:, (0)])', True, 'import numpy as np\n'), ((65, 26, 65, 53), 'numpy.argsort', 'np.argsort', ({(65, 37, 65, 52): 'left_most[:, (1)]'}, {}), '(left_most[:, (1)])', True, 'import numpy as np\n'), ((136, 24, 136, 39), 'numpy.float', 'np.float', ({(136, 33, 136, 38): 'angle'}, {}), '(angle)', True, 'import numpy as np\n'), ((190, 19, 190, 29), 'numpy.cos', 'cos', ({(190, 23, 190, 28): 'angle'}, {}), '(angle)', False, 'from numpy import cos, sin, pi\n'), ((190, 39, 190, 49), 'numpy.sin', 'sin', ({(190, 43, 190, 48): 'angle'}, {}), '(angle)', False, 'from numpy import cos, sin, pi\n'), ((191, 19, 191, 29), 'numpy.sin', 'sin', ({(191, 23, 191, 28): 'angle'}, {}), '(angle)', False, 'from numpy import cos, sin, pi\n'), ((191, 39, 191, 49), 'numpy.cos', 'cos', ({(191, 43, 191, 48): 'angle'}, {}), '(angle)', False, 'from numpy import cos, sin, pi\n'), ((357, 19, 357, 29), 'numpy.cos', 'cos', ({(357, 23, 357, 28): 'angle'}, {}), '(angle)', False, 'from numpy import cos, sin, pi\n'), ((357, 39, 357, 49), 'numpy.sin', 'sin', ({(357, 43, 357, 48): 'angle'}, {}), '(angle)', False, 'from numpy import cos, sin, pi\n'), ((358, 19, 358, 29), 'numpy.sin', 'sin', ({(358, 23, 358, 28): 'angle'}, {}), '(angle)', False, 'from numpy import cos, sin, pi\n'), ((358, 39, 358, 49), 'numpy.cos', 'cos', ({(358, 43, 358, 48): 'angle'}, {}), '(angle)', False, 'from numpy import cos, sin, pi\n'), ((71, 26, 71, 46), 'numpy.argsort', 'np.argsort', ({(71, 37, 71, 45): 'distance'}, {}), '(distance)', True, 'import numpy as np\n'), ((248, 53, 248, 65), 'numpy.array', 'np.array', ({(248, 62, 248, 64): 'im'}, {}), '(im)', True, 'import numpy as np\n'), ((448, 53, 448, 65), 'numpy.array', 'np.array', ({(448, 62, 448, 64): 'im'}, {}), '(im)', True, 'import numpy as np\n'), ((140, 53, 140, 67), 'numpy.sign', 'np.sign', ({(140, 61, 140, 66): 'angle'}, {}), '(angle)', True, 'import numpy as np\n')] |
harveywwu/OpenData | opendatatools/common/ui_util.py | cf421465dd9b11fdbb2fbf4d00512e3aaf09d070 | # -*- coding: UTF-8 -*-
import sys, time
class ShowProcess():
"""
显示处理进度的类
调用该类相关函数即可实现处理进度的显示
"""
i = 0 # 当前的处理进度
max_steps = 0 # 总共需要处理的次数
max_arrow = 50 #进度条的长度
infoDone = 'done'
# 初始化函数,需要知道总共的处理次数
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
# 显示函数,根据当前的处理进度i显示进度
# 效果为[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]100.00%
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps) #计算显示多少个'>'
num_line = self.max_arrow - num_arrow #计算显示多少个'-'
percent = self.i * 100.0 / self.max_steps #计算完成进度,格式为xx.xx%
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r' #带输出的字符串,'\r'表示不换行回到最左边
print process_bar #打印字符到终端
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
if __name__=='__main__':
max_steps = 100
process_bar = ShowProcess(max_steps, 'OK')
for i in range(max_steps):
process_bar.show_process()
time.sleep(0.1) | [] |
JunzhongLin/leetcode_practice | data_structure/stack_and_queue/494. Target Sum_ Medium.py | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | '''
You are given an integer array nums and an integer target.
You want to build an expression out of nums by adding one of the symbols '+' and '-' before each integer in nums and then concatenate all the integers.
For example, if nums = [2, 1], you can add a '+' before 2 and a '-' before 1 and concatenate them to build the expression "+2-1".
Return the number of different expressions that you can build, which evaluates to target.
'''
from collections import defaultdict
class Solution:
def findTargetSumWays(self, nums, target) -> int:
count = 0
target_depth = len(nums) - 1
stack = [(0, -1, 0)]
cache = defaultdict(int)
while stack:
# print(stack)
# count += 1
# if count == 10:
# break
curr_sum, depth, visited = stack.pop()
if visited:
if depth == target_depth:
if curr_sum == target:
cache[(curr_sum, depth, visited)] = 1
else:
l = cache[(curr_sum + nums[depth + 1], depth + 1, 1)]
r = cache[(curr_sum - nums[depth + 1], depth + 1, 1)]
cache[(curr_sum, depth, visited)] = l + r
continue
else:
if (curr_sum, depth, 1) in cache:
continue
stack.append((curr_sum, depth, 1))
if depth < target_depth:
stack.append((curr_sum + nums[depth + 1], depth + 1, 0))
stack.append((curr_sum - nums[depth + 1], depth + 1, 0))
return cache[(0, -1, 1)]
input_val, target = [1,1,1,1,1], 3
res = Solution().findTargetSumWays(input_val, target) | [((17, 16, 17, 32), 'collections.defaultdict', 'defaultdict', ({(17, 28, 17, 31): 'int'}, {}), '(int)', False, 'from collections import defaultdict\n')] |
os-gabe/fixtrack | fixtrack/frontend/pickable_markers.py | a0af4dfa9342acc0ba05c0249a32806c825b74b2 | import numpy as np
from fixtrack.frontend.pickable_base import PickableBase
from vispy import scene
class PickableMarkers(PickableBase):
"""
Markers that can highlight on hover and be selected
"""
class State(PickableBase.State):
def __init__(self, **kwargs):
super(PickableMarkers.State, self).__init__(**kwargs)
self.sizes_raw = None
self.sizes = None
class Config(PickableBase.Config):
def __init__(self, select_scale=1.0, hover_scale=1.0, **kwargs):
super(PickableMarkers.Config, self).__init__(**kwargs)
self.select_scale = select_scale
self.hover_scale = hover_scale
_kwargs_ignore = ["size", "color_select", "color_hover"]
def __init__(self, parent=None, data=np.zeros((0, 3)), select_scale=2.0, **kwargs):
super(PickableMarkers, self).__init__(
scene.visuals.Markers(pos=data, parent=parent), data=data, parent=parent, **kwargs
)
self.visual.set_gl_state("translucent", depth_test=False, blend=True)
self._cfg.select_scale = select_scale
self._cfg.hover_scale = select_scale * 1.15
self.multi_sel = None
@property
def marker_size(self):
return self._cfg.vis_args["size"]
@marker_size.setter
def marker_size(self, s):
self._cfg.vis_args["size"] = max(1, s)
self._init_data()
self.set_data()
def _selected_idxs(self):
sel = []
if self.multi_sel is None:
if self._state.idx_selected >= 0:
sel = [self._state.idx_selected]
else:
sel = self.multi_sel
return sel
def _init_data(self):
super(PickableMarkers, self)._init_data()
n = len(self._state.data)
self._state.sizes_raw = np.full((n, ), self._cfg.vis_args["size"])
self._state.sizes = self._state.sizes_raw.copy()
def _highlight(self):
self._state.sizes = self._state.sizes_raw.copy()
super(PickableMarkers, self)._highlight()
def _highlight_selected(self):
super(PickableMarkers, self)._highlight_selected()
cfg = self._cfg
state = self._state
if (state.idx_selected >= 0) and cfg.pickable:
state.sizes[self._selected_idxs()] = cfg.vis_args["size"] * cfg.select_scale
def _highlight_hovered(self):
super(PickableMarkers, self)._highlight_hovered()
cfg = self._cfg
state = self._state
if (state.idx_hover >= 0) and cfg.hoverable:
state.sizes[self._hover_idxs()] = cfg.vis_args["size"] * cfg.hover_scale
def _set_data(self):
if len(self._state.data) > 0:
kwargs = {
k: v
for k, v in self._cfg.vis_args.items() if k not in self._kwargs_ignore
}
self._state.edge_colors[:, 3] = self._state.colors[:, 3]
self.visual.set_data(
pos=self._state.data,
size=self._state.sizes,
face_color=self._state.colors,
edge_color=self._state.edge_colors,
edge_width=3,
**kwargs
)
else:
self.visual.set_data(np.zeros((0, 3)))
def _set_data_false(self):
if len(self._state.data) > 0:
colors = self._pa.unique_colors(id(self)) / 255.0
colors[self._state.colors[:, 3] < 1.0e-3] = 0.0
self.visual.set_data(
pos=self._state.data,
size=self._state.sizes,
face_color=colors,
edge_color=colors,
edge_width=0,
)
else:
self.visual.set_data(np.zeros((0, 3)))
| [((26, 41, 26, 57), 'numpy.zeros', 'np.zeros', ({(26, 50, 26, 56): '(0, 3)'}, {}), '((0, 3))', True, 'import numpy as np\n'), ((57, 32, 57, 74), 'numpy.full', 'np.full', ({(57, 40, 57, 45): '(n,)', (57, 47, 57, 73): "self._cfg.vis_args['size']"}, {}), "((n,), self._cfg.vis_args['size'])", True, 'import numpy as np\n'), ((28, 12, 28, 58), 'vispy.scene.visuals.Markers', 'scene.visuals.Markers', (), '', False, 'from vispy import scene\n'), ((95, 33, 95, 49), 'numpy.zeros', 'np.zeros', ({(95, 42, 95, 48): '(0, 3)'}, {}), '((0, 3))', True, 'import numpy as np\n'), ((109, 33, 109, 49), 'numpy.zeros', 'np.zeros', ({(109, 42, 109, 48): '(0, 3)'}, {}), '((0, 3))', True, 'import numpy as np\n')] |
FFY00/jeepney | examples/blocking_subscribe.py | 293241a54fbb73581755e97191720ed1603aed34 | """
Example of subscribing to a D-Bus signal using blocking I/O.
This subscribes to the signal for a desktop notification being closed.
To try it, start this script, then trigger a desktop notification, and close it
somehow to trigger the signal. Use Ctrl-C to stop the script.
This example relies on the ``org.freedesktop.Notifications.NotificationClosed``
signal; some desktops may not support it. See the notification spec for more
details:
https://people.gnome.org/~mccann/docs/notification-spec/notification-spec-latest.html
Match rules are defined in the D-Bus specification:
https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules
"""
from jeepney.bus_messages import MatchRule, message_bus
from jeepney.integrate.blocking import connect_and_authenticate, Proxy
from jeepney.wrappers import DBusAddress
noti = DBusAddress('/org/freedesktop/Notifications',
bus_name='org.freedesktop.Notifications',
interface='org.freedesktop.Notifications')
connection = connect_and_authenticate(bus="SESSION")
match_rule = MatchRule(
type="signal",
sender=noti.bus_name,
interface=noti.interface,
member="NotificationClosed",
path=noti.object_path,
)
# This defines messages for talking to the D-Bus bus daemon itself:
session_bus = Proxy(message_bus, connection)
# Tell the session bus to pass us matching signal messages:
print("Match added?", session_bus.AddMatch(match_rule) == ())
reasons = {1: 'expiry', 2: 'dismissal', 3: 'dbus', '4': 'undefined'}
def notification_closed(data):
"""Callback for when we receive a notification closed signal"""
nid, reason_no = data
reason = reasons.get(reason_no, 'unknown')
print('Notification {} closed by: {}'.format(nid, reason))
# Connect the callback to the relevant signal
connection.router.subscribe_signal(
callback=notification_closed,
path=noti.object_path,
interface=noti.interface,
member="NotificationClosed"
)
# Using dbus-send or d-feet or blocking_notify.py, send a notification and
# manually close it or call ``.CloseNotification`` after a beat.
try:
while True:
connection.recv_messages()
except KeyboardInterrupt:
pass
connection.close()
| [((21, 7, 23, 61), 'jeepney.wrappers.DBusAddress', 'DBusAddress', (), '', False, 'from jeepney.wrappers import DBusAddress\n'), ((25, 13, 25, 52), 'jeepney.integrate.blocking.connect_and_authenticate', 'connect_and_authenticate', (), '', False, 'from jeepney.integrate.blocking import connect_and_authenticate, Proxy\n'), ((27, 13, 33, 1), 'jeepney.bus_messages.MatchRule', 'MatchRule', (), '', False, 'from jeepney.bus_messages import MatchRule, message_bus\n'), ((36, 14, 36, 44), 'jeepney.integrate.blocking.Proxy', 'Proxy', ({(36, 20, 36, 31): 'message_bus', (36, 33, 36, 43): 'connection'}, {}), '(message_bus, connection)', False, 'from jeepney.integrate.blocking import connect_and_authenticate, Proxy\n')] |
league3236/shholiday | test.py | 54d0fcfd393d09183cd77cab697f5bc60864b314 | from shholiday import holiday2020 as hd
daytuple = (1,1)
nowholiday = hd.holiday2020()
print(nowholiday.is_holiday(daytuple)) | [((4, 13, 4, 29), 'shholiday.holiday2020.holiday2020', 'hd.holiday2020', ({}, {}), '()', True, 'from shholiday import holiday2020 as hd\n')] |
dalejung/pandas-composition | setup.py | e73e5295b2d2f44f09805dcf06db12108c555197 | from distutils.core import setup
DISTNAME='pandas_composition'
FULLVERSION='0.1'
setup(name=DISTNAME,
version=FULLVERSION,
packages=['pandas_composition',
]
)
| [((6, 0, 10, 7), 'distutils.core.setup', 'setup', (), '', False, 'from distutils.core import setup\n')] |
encukou/Zpetnovazebnik | blog/migrations/0005_title_null.py | 0d058fd67049a3d42814b04486bde93bc406fa3b | # Generated by Django 2.1.7 on 2019-02-27 14:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_longer_password'),
]
operations = [
migrations.AlterField(
model_name='session',
name='title',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| [((16, 18, 16, 73), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
Kuba77/Xian-DB | setup.py | 2f15ef1b9b7a96c21bd46e9fb8481de6feb713b7 | from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='xiandb',
version='0.2.0',
description='A database model for Xian',
long_description=long_description,
url='https://github.com/Kuba77/Xian-DB',
author='Jakub Chronowski',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: XIAN Collaborators',
'Topic :: Software Development :: Database',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'
],
keywords='xian database db',
packages=['xiandb', 'xiandb.models'],
install_requires=['mongokat', 'pyyaml', 'bcrypt'],
extras_require={}
)
| [((10, 0, 42, 1), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n'), ((5, 20, 5, 42), 'os.path.dirname', 'path.dirname', ({(5, 33, 5, 41): '__file__'}, {}), '(__file__)', False, 'from os import path\n'), ((7, 10, 7, 39), 'os.path.join', 'path.join', ({(7, 20, 7, 24): 'here', (7, 26, 7, 38): '"""README.rst"""'}, {}), "(here, 'README.rst')", False, 'from os import path\n')] |
ashishpatel26/tf2-yolo3 | yolo3/focal_loss.py | 38814178643eb8e1f8b5e4fe8d448faed44ad574 | from functools import partial
import tensorflow as tf
_EPSILON = tf.keras.backend.epsilon()
def register_keras_custom_object(cls):
tf.keras.utils.get_custom_objects()[cls.__name__] = cls
return cls
def binary_focal_loss(y_true, y_pred, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None):
y_pred = tf.convert_to_tensor(y_pred)
if not y_pred.dtype.is_floating:
y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32)
if from_logits:
return _binary_focal_loss_from_logits(labels=y_true,
logits=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
else:
return _binary_focal_loss_from_probs(labels=y_true,
p=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
@register_keras_custom_object
class BinaryFocalLoss(tf.keras.losses.Loss):
def __init__(self, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None, **kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.pos_weight = pos_weight
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def get_config(self):
config = super().get_config()
config.update(gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
return config
def call(self, y_true, y_pred):
return binary_focal_loss(y_true=y_true,
y_pred=y_pred,
gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
# Helper functions below
def _process_labels(labels, label_smoothing, dtype):
labels = tf.dtypes.cast(labels, dtype=dtype)
if label_smoothing is not None:
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels
def _binary_focal_loss_from_logits(labels, logits, gamma, pos_weight, label_smoothing):
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=logits.dtype)
# Compute probabilities for the positive class
p = tf.math.sigmoid(logits)
if label_smoothing is None:
labels_shape = labels.shape
logits_shape = logits.shape
if not labels_shape.is_fully_defined() or labels_shape != logits_shape:
labels_shape = tf.shape(labels)
logits_shape = tf.shape(logits)
shape = tf.broadcast_dynamic_shape(labels_shape, logits_shape)
labels = tf.broadcast_to(labels, shape)
logits = tf.broadcast_to(logits, shape)
if pos_weight is None:
loss_func = tf.nn.sigmoid_cross_entropy_with_logits
else:
loss_func = partial(tf.nn.weighted_cross_entropy_with_logits, pos_weight=pos_weight)
loss = loss_func(labels=labels, logits=logits)
modulation_pos = (1 - p)**gamma
modulation_neg = p**gamma
mask = tf.dtypes.cast(labels, dtype=tf.bool)
modulation = tf.where(mask, modulation_pos, modulation_neg)
return modulation * loss
# Terms for the positive and negative class components of the loss
pos_term = labels * ((1 - p)**gamma)
neg_term = (1 - labels) * (p**gamma)
# Term involving the log and ReLU
log_weight = pos_term
if pos_weight is not None:
log_weight *= pos_weight
log_weight += neg_term
log_term = tf.math.log1p(tf.math.exp(-tf.math.abs(logits)))
log_term += tf.nn.relu(-logits)
log_term *= log_weight
# Combine all the terms into the loss
loss = neg_term * logits + log_term
return loss
def _binary_focal_loss_from_probs(labels, p, gamma, pos_weight, label_smoothing):
q = 1 - p
# For numerical stability (so we don't inadvertently take the log of 0)
p = tf.math.maximum(p, _EPSILON)
q = tf.math.maximum(q, _EPSILON)
# Loss for the positive examples
pos_loss = -(q**gamma) * tf.math.log(p)
if pos_weight is not None:
pos_loss *= pos_weight
# Loss for the negative examples
neg_loss = -(p**gamma) * tf.math.log(q)
# Combine loss terms
if label_smoothing is None:
labels = tf.dtypes.cast(labels, dtype=tf.bool)
loss = tf.where(labels, pos_loss, neg_loss)
else:
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=p.dtype)
loss = labels * pos_loss + (1 - labels) * neg_loss
return loss | [((4, 11, 4, 37), 'tensorflow.keras.backend.epsilon', 'tf.keras.backend.epsilon', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((13, 13, 13, 41), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ({(13, 34, 13, 40): 'y_pred'}, {}), '(y_pred)', True, 'import tensorflow as tf\n'), ((62, 13, 62, 48), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (), '', True, 'import tensorflow as tf\n'), ((72, 8, 72, 31), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', ({(72, 24, 72, 30): 'logits'}, {}), '(logits)', True, 'import tensorflow as tf\n'), ((104, 16, 104, 35), 'tensorflow.nn.relu', 'tf.nn.relu', ({(104, 27, 104, 34): '(-logits)'}, {}), '(-logits)', True, 'import tensorflow as tf\n'), ((116, 8, 116, 36), 'tensorflow.math.maximum', 'tf.math.maximum', ({(116, 24, 116, 25): 'p', (116, 27, 116, 35): '_EPSILON'}, {}), '(p, _EPSILON)', True, 'import tensorflow as tf\n'), ((117, 8, 117, 36), 'tensorflow.math.maximum', 'tf.math.maximum', ({(117, 24, 117, 25): 'q', (117, 27, 117, 35): '_EPSILON'}, {}), '(q, _EPSILON)', True, 'import tensorflow as tf\n'), ((8, 4, 8, 39), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((15, 17, 15, 57), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (), '', True, 'import tensorflow as tf\n'), ((90, 15, 90, 52), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (), '', True, 'import tensorflow as tf\n'), ((91, 21, 91, 67), 'tensorflow.where', 'tf.where', ({(91, 30, 91, 34): 'mask', (91, 36, 91, 50): 'modulation_pos', (91, 52, 91, 66): 'modulation_neg'}, {}), '(mask, modulation_pos, modulation_neg)', True, 'import tensorflow as tf\n'), ((120, 29, 120, 43), 'tensorflow.math.log', 'tf.math.log', ({(120, 41, 120, 42): 'p'}, {}), '(p)', True, 'import tensorflow as tf\n'), ((125, 29, 125, 43), 'tensorflow.math.log', 'tf.math.log', ({(125, 41, 125, 42): 'q'}, {}), '(q)', True, 'import tensorflow as tf\n'), ((129, 17, 129, 54), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (), '', True, 'import tensorflow as tf\n'), ((130, 15, 130, 51), 'tensorflow.where', 'tf.where', ({(130, 24, 130, 30): 'labels', (130, 32, 130, 40): 'pos_loss', (130, 42, 130, 50): 'neg_loss'}, {}), '(labels, pos_loss, neg_loss)', True, 'import tensorflow as tf\n'), ((78, 27, 78, 43), 'tensorflow.shape', 'tf.shape', ({(78, 36, 78, 42): 'labels'}, {}), '(labels)', True, 'import tensorflow as tf\n'), ((79, 27, 79, 43), 'tensorflow.shape', 'tf.shape', ({(79, 36, 79, 42): 'logits'}, {}), '(logits)', True, 'import tensorflow as tf\n'), ((80, 20, 80, 74), 'tensorflow.broadcast_dynamic_shape', 'tf.broadcast_dynamic_shape', ({(80, 47, 80, 59): 'labels_shape', (80, 61, 80, 73): 'logits_shape'}, {}), '(labels_shape, logits_shape)', True, 'import tensorflow as tf\n'), ((81, 21, 81, 51), 'tensorflow.broadcast_to', 'tf.broadcast_to', ({(81, 37, 81, 43): 'labels', (81, 45, 81, 50): 'shape'}, {}), '(labels, shape)', True, 'import tensorflow as tf\n'), ((82, 21, 82, 51), 'tensorflow.broadcast_to', 'tf.broadcast_to', ({(82, 37, 82, 43): 'logits', (82, 45, 82, 50): 'shape'}, {}), '(logits, shape)', True, 'import tensorflow as tf\n'), ((86, 24, 86, 96), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((103, 42, 103, 61), 'tensorflow.math.abs', 'tf.math.abs', ({(103, 54, 103, 60): 'logits'}, {}), '(logits)', True, 'import tensorflow as tf\n')] |
Sult/evetool | characters/models/characters.py | 155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f | import time
from collections import OrderedDict
from datetime import datetime, timedelta
from django.db import models
from django.conf import settings
from django.utils.timezone import utc
from .skills import Skill, SkillGroup
from metrics.models import Corporation
from tasks.models import EveApiCache, Task
from evetool.storage import OverwriteStorage
import utils
class CharacterApi(models.Model):
""" charactertype apis """
api = models.ForeignKey("apis.Api")
characterid = models.BigIntegerField()
charactername = models.CharField(max_length=254)
corporationid = models.BigIntegerField()
corporationname = models.CharField(max_length=254)
def __unicode__(self):
return self.charactername
#get right icon for characters view
def view_icon(self):
try:
icon = self.characterapiicon_set.get(size=128, relation=self)
return icon.icon
except CharacterApiIcon.DoesNotExist:
return None
#def character sheet image
def sheet_icon(self):
try:
icon = self.characterapiicon_set.get(size=200, relation=self)
return icon.icon
except CharacterApiIcon.DoesNotExist:
return None
def current_balance(self):
if self.api.access_to("CharacterInfo"):
sheet = utils.connection.api_request(
"CharacterInfoAuth", obj=self
)
if sheet.accountBalance:
return round(float(sheet.accountBalance), 2)
return 0
def sheet_cache_key(self):
key = "CharacterInfo"
category = EveApiCache.EVE
kwargs = {"characterID": self.characterid}
if self.api.access_to("CharacterInfo"):
return utils.connection.generate_cache_key(
category, key, api=self.api, **kwargs
)
else:
return utils.connection.generate_cache_key(category, key)
def sheet_set_cache_job(self):
key = "CharacterInfo"
category = EveApiCache.EVE
kwargs = {"characterID": self.characterid}
if self.api.access_to("CharacterInfo"):
api = self.api
else:
api = None
EveApiCache.objects.create(
priority=Task.VERY_HIGH,
api=api,
category=category,
key=key,
kwargs=kwargs,
)
#get the data for landing page after character selection
def character_sheet(self):
sheet = utils.connection.get_cache(self.sheet_cache_key())
employment = self.employment_history(sheet)
return sheet, employment
#employment history of a player
@staticmethod
def employment_history(sheet):
cache_key = "employment_history_%d" % int(sheet.characterID)
#result = utils.connection.get_cache(cache_key)
result = None
if not result:
cache_timer = 60 * 60
result = []
for corp_data in sheet.employmentHistory:
result.append({
"corporation": Corporation.find_corporation(
corp_data.corporationID
),
"startdate": utils.common.convert_timestamp(
corp_data.startDate
)
})
utils.connection.set_cache(cache_key, result, cache_timer)
return result
#get skill in training
def skill_in_training(self):
training_skill = None
if self.api.access_to("SkillInTraining"):
in_training = utils.connection.api_request(
"SkillInTraining", obj=self
)
try:
training_skill = {
"skill": Skill.objects.get(
typeid=int(in_training.trainingTypeID)
).typename,
"to_level": int(in_training.trainingToLevel),
"finnished": utils.common.convert_timestamp(
in_training.trainingEndTime
)
}
except AttributeError:
training_skill = {"skill": "No skill in training"}
return training_skill
#characters trained skills
def trained_skills(self):
cache_key = "trained_skills_%d" % self.pk
result = utils.connection.get_cache(cache_key)
if not result:
cache_timer = 60 * 5
sheet = utils.connection.api_request("CharacterSheet", obj=self)
groups = SkillGroup.objects.exclude(
groupname="Fake Skills"
).order_by("groupname")
skills = Skill.objects.order_by("typename")
all_skills = OrderedDict()
skillpoints = {}
for group in groups:
all_skills[group.groupname] = list()
skillpoints[group.groupname] = 0
for skill in skills:
trained = sheet.skills.Get(skill.typeid, False)
if trained:
all_skills[skill.skillgroup.groupname].append(
{
"skill": skill,
"level": int(trained.level)
}
)
skillpoints[skill.skillgroup.groupname] += \
trained.skillpoints
result = {
"all_skills": all_skills,
"skillpoints": skillpoints,
}
utils.connection.set_cache(cache_key, result, cache_timer)
return result
#get skillqueue
def skill_queue(self):
queue = None
if self.api.access_to("SkillQueue"):
queue = {}
skills = utils.connection.api_request(
"SkillQueue", obj=self
).skillqueue
queue["skills"] = skills
queue["total"] = self.total_skillpoints(skills)
now = datetime.now().replace(tzinfo=utc)
try:
trainingtime = utils.common.convert_timestamp(
skills[-1].endTime
) - now
trainingtime -= timedelta(
microseconds=trainingtime.microseconds
)
queue["trainingtime"] = trainingtime
except TypeError:
pass
return queue
#get total skillpoints for skills in queue
@staticmethod
def total_skillpoints(skills):
total = 0
for skill in skills:
total += int(skill.endSP - skill.startSP)
return total
#walletjournal
def wallet_journal(self):
cache_key = "walletjournal_character_%d" % self.pk
result = utils.connection.get_cache(cache_key)
if not result:
self.update_journal()
cache_timer = 60 * 10
utils.connection.set_cache(cache_key, True, cache_timer)
return CharacterJournal.objects.filter(characterapi=self)
#updates journal to current moment
def update_journal(self):
fromid = 0
transactions = utils.connection.api_request(
"WalletJournal", obj=self, rowcount=2500
).transactions
while True:
for trans in transactions:
date = utils.common.convert_timestamp(trans.date)
#check for duplicate
if CharacterJournal.objects.filter(
characterapi=self,
balance=trans.balance,
date=date,
).exists():
continue
else:
CharacterJournal.create_entry(self, trans)
if int(trans.refID) < fromid or fromid == 0:
fromid = int(trans.refID)
if len(transactions) < 2500:
break
else:
time.sleep(1)
transactions = utils.connection.api_request(
"WalletJournal", obj=self, rowcount=2500, fromid=fromid
).transactions
class CharacterApiIcon(models.Model):
""" images related to characters """
relation = models.ForeignKey("characters.CharacterApi")
size = models.IntegerField(choices=settings.IMAGE_SIZES)
typeid = models.IntegerField()
icon = models.ImageField(
upload_to="images/characters/",
storage=OverwriteStorage(),
blank=True,
null=True
)
class Meta:
unique_together = ["size", "relation"]
def __unicode__(self):
return "Character Image %s" % self.relation.charactername
# def save(self, *args, **kwargs):
# try:
# temp = CharacterApiIcon.objects.get(pk=self.pk)
# if temp.icon != self.icon:
# temp.icon.delete()
# except ObjectDoesNotExist:
# pass
# super(CharacterApiIcon, self).save(*args, **kwargs)
#get list of wanted character icon sizes
@staticmethod
def icon_sizes():
return [128, 200]
class Transaction(models.Model):
reftypeid = models.SmallIntegerField()
ownername1 = models.CharField(max_length=254)
ownerid1 = models.IntegerField()
ownername2 = models.CharField(max_length=254)
ownerid2 = models.IntegerField()
argname1 = models.CharField(max_length=254)
argid1 = models.IntegerField()
amount = models.FloatField(null=True)
reason = models.TextField(blank=True)
taxreceiverid = models.IntegerField(null=True)
taxamount = models.FloatField(null=True)
class Meta:
abstract = True
class CharacterJournal(Transaction):
"""
Wallet transcations of a player. Saved to database so data can
be filtered, and metadata can be created.
Like balance graphs, see how much you paid in taxes and more.
"""
characterapi = models.ForeignKey(CharacterApi)
date = models.DateTimeField()
balance = models.FloatField()
class Meta:
unique_together = ["characterapi", "date", "balance"]
ordering = ["-date", "-reftypeid"]
def __unicode__(self):
return "%s's transaction" % self.characterapi.charactername
@staticmethod
def create_entry(characterapi, transaction):
if transaction.taxReceiverID == "":
taxreceiverid = None
else:
taxreceiverid = int(transaction.taxReceiverID)
if transaction.taxAmount == "":
taxamount = None
else:
taxamount = round(float(transaction.taxAmount), 2)
date = utils.common.convert_timestamp(transaction.date)
CharacterJournal.objects.create(
characterapi=characterapi,
date=date,
balance=round(float(transaction.balance), 2),
reftypeid=int(transaction.refTypeID),
ownername1=str(transaction.ownerName1),
ownerid1=int(transaction.ownerID1),
ownername2=str(transaction.ownerName2),
ownerid2=int(transaction.ownerID2),
argname1=str(transaction.argName1),
argid1=int(transaction.argID1),
amount=round(float(transaction.amount), 2),
reason=str(transaction.reason),
taxreceiverid=taxreceiverid,
taxamount=taxamount,
)
@staticmethod
def monthly_balance(characterapi):
last_restart = utils.common.last_server_restart()
days = last_restart - timedelta(days=31)
entries = CharacterJournal.objects.filter(
characterapi=characterapi,
date__range=[days, last_restart]
)
balance = []
for days in range(31):
first = entries.first()
date = (last_restart - timedelta(days=days))
#make timestamp in miliseconds
timestamp = int(time.mktime(date.timetuple()) * 1000)
if first:
isk = first.balance
else:
try:
isk = balance[-1][1]
except IndexError:
isk = characterapi.current_balance()
balance.append([timestamp, isk])
entries = entries.filter(date__lt=(date - timedelta(days=1)))
#return reversed list
return balance[::-1]
@staticmethod
def weekly_balance(characterapi):
now = datetime.now().replace(tzinfo=utc)
entries = CharacterJournal.objects.filter(
characterapi=characterapi,
date__range=[
now.replace(hour=23, minute=59, second=0) - timedelta(days=9),
now
]
)
balance = []
for days in range(8):
date = now.replace(
hour=0, minute=0, second=0
) - timedelta(days=days)
day_entries = entries.filter(
date__lt=now.replace(
hour=23, minute=59, second=59
) - timedelta(days=days),
date__gt=date
)
if not day_entries.count() > 0:
try:
isk = balance[-1][1]
except IndexError:
isk = characterapi.current_balance()
timestamp = int(time.mktime(date.timetuple()) * 1000)
balance.append([timestamp, isk])
else:
for entry in day_entries:
timestamp = int(time.mktime(entry.date.timetuple()) * 1000)
balance.append([timestamp, entry.balance])
#add last value for date on xaxis
date = now.replace(hour=23, minute=59, second=59) - timedelta(days=8)
isk = balance[-1][1]
timestamp = int(time.mktime(date.timetuple()) * 1000)
balance.append([timestamp, isk])
return balance[::-1]
| [((20, 10, 20, 39), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(20, 28, 20, 38): '"""apis.Api"""'}, {}), "('apis.Api')", False, 'from django.db import models\n'), ((21, 18, 21, 42), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((22, 20, 22, 52), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((23, 20, 23, 44), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((24, 22, 24, 54), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((244, 15, 244, 59), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(244, 33, 244, 58): '"""characters.CharacterApi"""'}, {}), "('characters.CharacterApi')", False, 'from django.db import models\n'), ((245, 11, 245, 60), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((246, 13, 246, 34), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((277, 16, 277, 42), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((278, 17, 278, 49), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((279, 15, 279, 36), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((280, 17, 280, 49), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((281, 15, 281, 36), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((282, 15, 282, 47), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((283, 13, 283, 34), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((284, 13, 284, 41), 'django.db.models.FloatField', 'models.FloatField', (), '', False, 'from django.db import models\n'), ((285, 13, 285, 41), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((286, 20, 286, 50), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((287, 16, 287, 44), 'django.db.models.FloatField', 'models.FloatField', (), '', False, 'from django.db import models\n'), ((300, 19, 300, 50), 'django.db.models.ForeignKey', 'models.ForeignKey', ({(300, 37, 300, 49): 'CharacterApi'}, {}), '(CharacterApi)', False, 'from django.db import models\n'), ((301, 11, 301, 33), 'django.db.models.DateTimeField', 'models.DateTimeField', ({}, {}), '()', False, 'from django.db import models\n'), ((302, 14, 302, 33), 'django.db.models.FloatField', 'models.FloatField', ({}, {}), '()', False, 'from django.db import models\n'), ((74, 8, 80, 9), 'tasks.models.EveApiCache.objects.create', 'EveApiCache.objects.create', (), '', False, 'from tasks.models import EveApiCache, Task\n'), ((133, 17, 133, 54), 'utils.connection.get_cache', 'utils.connection.get_cache', ({(133, 44, 133, 53): 'cache_key'}, {}), '(cache_key)', False, 'import utils\n'), ((203, 17, 203, 54), 'utils.connection.get_cache', 'utils.connection.get_cache', ({(203, 44, 203, 53): 'cache_key'}, {}), '(cache_key)', False, 'import utils\n'), ((323, 15, 323, 63), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', ({(323, 46, 323, 62): 'transaction.date'}, {}), '(transaction.date)', False, 'import utils\n'), ((343, 23, 343, 57), 'utils.common.last_server_restart', 'utils.common.last_server_restart', ({}, {}), '()', False, 'import utils\n'), ((47, 20, 49, 13), 'utils.connection.api_request', 'utils.connection.api_request', (), '', False, 'import utils\n'), ((59, 19, 61, 13), 'utils.connection.generate_cache_key', 'utils.connection.generate_cache_key', (), '', False, 'import utils\n'), ((63, 19, 63, 69), 'utils.connection.generate_cache_key', 'utils.connection.generate_cache_key', ({(63, 55, 63, 63): 'category', (63, 65, 63, 68): 'key'}, {}), '(category, key)', False, 'import utils\n'), ((106, 12, 106, 70), 'utils.connection.set_cache', 'utils.connection.set_cache', ({(106, 39, 106, 48): 'cache_key', (106, 50, 106, 56): 'result', (106, 58, 106, 69): 'cache_timer'}, {}), '(cache_key, result, cache_timer)', False, 'import utils\n'), ((113, 26, 115, 13), 'utils.connection.api_request', 'utils.connection.api_request', (), '', False, 'import utils\n'), ((136, 20, 136, 76), 'utils.connection.api_request', 'utils.connection.api_request', (), '', False, 'import utils\n'), ((141, 25, 141, 38), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((165, 12, 165, 70), 'utils.connection.set_cache', 'utils.connection.set_cache', ({(165, 39, 165, 48): 'cache_key', (165, 50, 165, 56): 'result', (165, 58, 165, 69): 'cache_timer'}, {}), '(cache_key, result, cache_timer)', False, 'import utils\n'), ((207, 12, 207, 68), 'utils.connection.set_cache', 'utils.connection.set_cache', ({(207, 39, 207, 48): 'cache_key', (207, 50, 207, 54): '(True)', (207, 56, 207, 67): 'cache_timer'}, {}), '(cache_key, True, cache_timer)', False, 'import utils\n'), ((213, 23, 215, 9), 'utils.connection.api_request', 'utils.connection.api_request', (), '', False, 'import utils\n'), ((249, 16, 249, 34), 'evetool.storage.OverwriteStorage', 'OverwriteStorage', ({}, {}), '()', False, 'from evetool.storage import OverwriteStorage\n'), ((344, 30, 344, 48), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((406, 60, 406, 77), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((174, 21, 176, 13), 'utils.connection.api_request', 'utils.connection.api_request', (), '', False, 'import utils\n'), ((184, 32, 186, 17), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((219, 23, 219, 65), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', ({(219, 54, 219, 64): 'trans.date'}, {}), '(trans.date)', False, 'import utils\n'), ((235, 16, 235, 29), 'time.sleep', 'time.sleep', ({(235, 27, 235, 28): '(1)'}, {}), '(1)', False, 'import time\n'), ((353, 35, 353, 55), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((371, 14, 371, 28), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((384, 16, 384, 36), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((122, 33, 124, 21), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', ({(123, 24, 123, 51): 'in_training.trainingEndTime'}, {}), '(in_training.trainingEndTime)', False, 'import utils\n'), ((179, 18, 179, 32), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((181, 31, 183, 17), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', ({(182, 20, 182, 38): 'skills[-1].endTime'}, {}), '(skills[-1].endTime)', False, 'import utils\n'), ((236, 31, 238, 17), 'utils.connection.api_request', 'utils.connection.api_request', (), '', False, 'import utils\n'), ((99, 35, 101, 21), 'metrics.models.Corporation.find_corporation', 'Corporation.find_corporation', ({(100, 24, 100, 47): 'corp_data.corporationID'}, {}), '(corp_data.corporationID)', False, 'from metrics.models import Corporation\n'), ((102, 33, 104, 21), 'utils.common.convert_timestamp', 'utils.common.convert_timestamp', ({(103, 24, 103, 43): 'corp_data.startDate'}, {}), '(corp_data.startDate)', False, 'import utils\n'), ((365, 54, 365, 71), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((375, 60, 375, 77), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((389, 20, 389, 40), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n')] |
sachanacar/webex-assistant-sdk | webex_assistant_sdk/templates/mindmeld_template/{{cookiecutter.skill_name}}/{{cookiecutter.skill_name}}/__init__.py | bb0f1ad16973cfa5784d7d887381229fab01effa | # -*- coding: utf-8 -*-
from {{cookiecutter.skill_name}}.root import app
__all__ = ['app']
| [] |
donicrazy/ChatApp | backend/api/v1/dialogs/urls.py | ab129a9c0706bbb972cbce43283ba6e06d144635 | from django.urls import path
from backend.api.v1.dialogs.views import (
DialogListCreateView,
DialogRetrieveUpdateDestroyAPIView,
DialogMembershipListCreateView,
DialogMessageListCreateView,
DialogMessageRetrieveUpdateDestroyAPIView,
)
urlpatterns = [
path('', DialogListCreateView.as_view()),
path('<int:pk>', DialogRetrieveUpdateDestroyAPIView.as_view()),
path('membership/', DialogMembershipListCreateView.as_view()),
path('messages/', DialogMessageListCreateView.as_view()),
path('messages/<int:pk>', DialogMessageRetrieveUpdateDestroyAPIView.as_view()),
]
| [((11, 13, 11, 43), 'backend.api.v1.dialogs.views.DialogListCreateView.as_view', 'DialogListCreateView.as_view', ({}, {}), '()', False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n'), ((12, 21, 12, 65), 'backend.api.v1.dialogs.views.DialogRetrieveUpdateDestroyAPIView.as_view', 'DialogRetrieveUpdateDestroyAPIView.as_view', ({}, {}), '()', False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n'), ((13, 24, 13, 64), 'backend.api.v1.dialogs.views.DialogMembershipListCreateView.as_view', 'DialogMembershipListCreateView.as_view', ({}, {}), '()', False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n'), ((14, 22, 14, 59), 'backend.api.v1.dialogs.views.DialogMessageListCreateView.as_view', 'DialogMessageListCreateView.as_view', ({}, {}), '()', False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n'), ((15, 30, 15, 81), 'backend.api.v1.dialogs.views.DialogMessageRetrieveUpdateDestroyAPIView.as_view', 'DialogMessageRetrieveUpdateDestroyAPIView.as_view', ({}, {}), '()', False, 'from backend.api.v1.dialogs.views import DialogListCreateView, DialogRetrieveUpdateDestroyAPIView, DialogMembershipListCreateView, DialogMessageListCreateView, DialogMessageRetrieveUpdateDestroyAPIView\n')] |
genouest/biomaj2galaxy | biomaj2galaxy/commands/init.py | 8c76f3cc96902d9401a03e7b1a6cd8f4a7ba17bd | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from bioblend import galaxy
from biomaj2galaxy import config, pass_context
from biomaj2galaxy.io import info, warn
import click
CONFIG_TEMPLATE = """## BioMAJ2Galaxy: Global Configuration File.
# Each stanza should contain a single Galaxy server to interact with.
#
# You can set the key __default to the name of a default instance
__default: local
local:
url: "%(url)s"
apikey: "%(apikey)s"
"""
SUCCESS_MESSAGE = (
"Ready to go! Type `biomaj2galaxy` to get a list of commands you can execute."
)
@click.command()
@pass_context
def init(ctx, url=None, api_key=None, admin=False, **kwds):
"""Help initialize global configuration (in home directory)
"""
click.echo("""Welcome to BioMAJ2Galaxy""")
if os.path.exists(config.global_config_path()):
info("Your biomaj2galaxy configuration already exists. Please edit it instead: %s" % config.global_config_path())
return 0
while True:
# Check environment
url = click.prompt("url")
apikey = click.prompt("apikey")
info("Testing connection...")
try:
instance = galaxy.GalaxyInstance(url=url, key=apikey)
instance.libraries.get_libraries()
# We do a connection test during startup.
info("Ok! Everything looks good.")
break
except Exception as e:
warn("Error, we could not access the configuration data for your instance: %s", e)
should_break = click.prompt("Continue despite inability to contact this instance? [y/n]")
if should_break in ('Y', 'y'):
break
config_path = config.global_config_path()
if os.path.exists(config_path):
warn("File %s already exists, refusing to overwrite." % config_path)
return -1
with open(config_path, "w") as f:
f.write(CONFIG_TEMPLATE % {
'url': url,
'apikey': apikey,
})
info(SUCCESS_MESSAGE)
| [((32, 1, 32, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((38, 4, 38, 46), 'click.echo', 'click.echo', ({(38, 15, 38, 45): '"""Welcome to BioMAJ2Galaxy"""'}, {}), "('Welcome to BioMAJ2Galaxy')", False, 'import click\n'), ((61, 18, 61, 45), 'biomaj2galaxy.config.global_config_path', 'config.global_config_path', ({}, {}), '()', False, 'from biomaj2galaxy import config, pass_context\n'), ((62, 7, 62, 34), 'os.path.exists', 'os.path.exists', ({(62, 22, 62, 33): 'config_path'}, {}), '(config_path)', False, 'import os\n'), ((39, 22, 39, 49), 'biomaj2galaxy.config.global_config_path', 'config.global_config_path', ({}, {}), '()', False, 'from biomaj2galaxy import config, pass_context\n'), ((45, 14, 45, 33), 'click.prompt', 'click.prompt', ({(45, 27, 45, 32): '"""url"""'}, {}), "('url')", False, 'import click\n'), ((46, 17, 46, 39), 'click.prompt', 'click.prompt', ({(46, 30, 46, 38): '"""apikey"""'}, {}), "('apikey')", False, 'import click\n'), ((48, 8, 48, 37), 'biomaj2galaxy.io.info', 'info', ({(48, 13, 48, 36): '"""Testing connection..."""'}, {}), "('Testing connection...')", False, 'from biomaj2galaxy.io import info, warn\n'), ((63, 8, 63, 76), 'biomaj2galaxy.io.warn', 'warn', ({(63, 13, 63, 75): "('File %s already exists, refusing to overwrite.' % config_path)"}, {}), "('File %s already exists, refusing to overwrite.' % config_path)", False, 'from biomaj2galaxy.io import info, warn\n'), ((71, 8, 71, 29), 'biomaj2galaxy.io.info', 'info', ({(71, 13, 71, 28): 'SUCCESS_MESSAGE'}, {}), '(SUCCESS_MESSAGE)', False, 'from biomaj2galaxy.io import info, warn\n'), ((50, 23, 50, 65), 'bioblend.galaxy.GalaxyInstance', 'galaxy.GalaxyInstance', (), '', False, 'from bioblend import galaxy\n'), ((53, 12, 53, 46), 'biomaj2galaxy.io.info', 'info', ({(53, 17, 53, 45): '"""Ok! Everything looks good."""'}, {}), "('Ok! Everything looks good.')", False, 'from biomaj2galaxy.io import info, warn\n'), ((40, 93, 40, 120), 'biomaj2galaxy.config.global_config_path', 'config.global_config_path', ({}, {}), '()', False, 'from biomaj2galaxy import config, pass_context\n'), ((56, 12, 56, 94), 'biomaj2galaxy.io.warn', 'warn', ({(56, 17, 56, 90): '"""Error, we could not access the configuration data for your instance: %s"""', (56, 92, 56, 93): 'e'}, {}), "('Error, we could not access the configuration data for your instance: %s',\n e)", False, 'from biomaj2galaxy.io import info, warn\n'), ((57, 27, 57, 101), 'click.prompt', 'click.prompt', ({(57, 40, 57, 100): '"""Continue despite inability to contact this instance? [y/n]"""'}, {}), "('Continue despite inability to contact this instance? [y/n]')", False, 'import click\n')] |
josemrsantos/zoopla_datamart | datamart/tests/test_Dimension.py | f3a3af8071199deeb712d1814aecb6cc3cd88d57 | from ..datamart import *
def test_create_dimension():
dimension = Dimension("test_dimension")
assert dimension.is_degenerate == False
def test_create_dimension_insert_2_identical_lines():
''' with 2 identical lines, only one gets stored
'''
dimension = Dimension("test_dimension")
dimension.addDimensionLine('test')
dimension.addDimensionLine('test')
assert dimension.id_value == 1
assert len(list(dimension.values)) == 1
def test_create_dimension_insert_2_identical_lines_and_1_different():
''' with 2 identical lines and one different, only 2 get stored
'''
dimension = Dimension("test_dimension")
dimension.addDimensionLine('test')
dimension.addDimensionLine('test2')
dimension.addDimensionLine('test')
assert dimension.id_value == 2
assert len(list(dimension.values)) == 2
| [] |
PranjaliJain/matchmaker | preprocessing/convert_formats/msmarco_doc_create_train_input.py | b7e22eb8b70cccabf0729076df7cbab3f4ba4a1f | #
# msmarco doc: create the train.tsv triples
# -------------------------------
import random
random.seed(42)
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.getcwd())
from matchmaker.evaluation.msmarco_eval import *
from collections import defaultdict
from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer
#
# config
#
parser = argparse.ArgumentParser()
parser.add_argument('--out-file', action='store', dest='out_file',
help='training output text file location', required=True)
parser.add_argument('--out-file-ids', action='store', dest='out_file_ids',
help='training output ids file location', required=True)
parser.add_argument('--candidate-file', action='store', dest='candidate_file',
help='trec ranking file location (lucene output)', required=True)
parser.add_argument('--collection-file', action='store', dest='collection_file',
help='collection.tsv location', required=True)
parser.add_argument('--query-file', action='store', dest='query_file',
help='query.tsv location', required=True)
parser.add_argument('--qrel', action='store', dest='qrel_file',
help='qrel location', required=True)
args = parser.parse_args()
max_triples = 10_000_000
max_doc_char_length = 150_000
max_doc_token_length = 10000
#
# load data
# -------------------------------
#
collection = {}
#collection_length = {}
tokenizer = BlingFireTokenizer()
with open(args.collection_file,"r",encoding="utf8") as collection_file:
for line in tqdm(collection_file):
ls = line.split("\t") # id<\t>text ....
_id = ls[0]
max_char_doc = ls[1].rstrip()[:max_doc_char_length]
collection[_id] = max_char_doc
#collection_length[_id] = len(tokenizer.tokenize(max_char_doc))
queries = {}
with open(args.query_file,"r",encoding="utf8") as query_file:
for line in tqdm(query_file):
ls = line.split("\t") # id<\t>text ....
_id = ls[0]
queries[_id] = ls[1].rstrip()
qrels = load_reference(args.qrel_file)
#
# produce output
# -------------------------------
#
triples = []
stats = defaultdict(int)
with open(args.candidate_file,"r",encoding="utf8") as candidate_file:
for line in tqdm(candidate_file):
#if random.random() <= 0.5: continue #skip some entries for faster processing
[topicid, _ , unjudged_docid, rank, _ , _ ] = line.split()
#if int(rank) <= 100:
# #if random.random() < 0.7: continue # skip 70% of candidates to speed up things...
# #else:
# stats['< 100 sampling count'] += 1
#else:
# if random.random() <= 0.9: continue # skip 90% of candidates assumong top1k -> same number of samples from 0-100 as 101 - 1000
# else:
# stats['> 100 sampling count'] += 1
if topicid not in queries or topicid not in qrels: # added: because we carved out the validation qrels from the train -> so there are some missing
stats['skipped'] += 1
continue
#assert topicid in qrels
assert unjudged_docid in collection
# Use topicid to get our positive_docid
positive_docid = random.choice(qrels[topicid])
assert positive_docid in collection
if unjudged_docid in qrels[topicid]:
stats['docid_collision'] += 1
continue
stats['kept'] += 1
#if collection_length[positive_docid] > max_doc_token_length and collection_length[unjudged_docid] > max_doc_token_length:
# stats['both_to_long'] += 1
# continue
#if collection_length[positive_docid] > max_doc_token_length:
# stats['pos_to_long'] += 1
# continue
#if collection_length[unjudged_docid] > max_doc_token_length:
# stats['unjuged_to_long'] += 1
# continue
triples.append((topicid,positive_docid,unjudged_docid))
# important: shuffle the train data
random.shuffle(triples)
with open(args.out_file,"w",encoding="utf8") as out_file_text ,\
open(args.out_file_ids,"w",encoding="utf8") as out_file_ids:
for i,(topicid, positive_docid, unjudged_docid) in tqdm(enumerate(triples)):
if i == max_triples:
break
if collection[positive_docid].strip() != "" and collection[unjudged_docid].strip() != "":
out_file_ids.write(str(topicid)+"\t"+positive_docid+"\t"+unjudged_docid+"\n")
out_file_text.write(queries[topicid]+"\t"+collection[positive_docid]+"\t"+collection[unjudged_docid]+"\n")
for key, val in stats.items():
print(f"{key}\t{val}") | [((6, 0, 6, 15), 'random.seed', 'random.seed', ({(6, 12, 6, 14): '(42)'}, {}), '(42)', False, 'import random\n'), ((21, 9, 21, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((55, 12, 55, 32), 'matchmaker.dataloaders.bling_fire_tokenizer.BlingFireTokenizer', 'BlingFireTokenizer', ({}, {}), '()', False, 'from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer\n'), ((81, 8, 81, 24), 'collections.defaultdict', 'defaultdict', ({(81, 20, 81, 23): 'int'}, {}), '(int)', False, 'from collections import defaultdict\n'), ((128, 0, 128, 23), 'random.shuffle', 'random.shuffle', ({(128, 15, 128, 22): 'triples'}, {}), '(triples)', False, 'import random\n'), ((12, 16, 12, 27), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((57, 16, 57, 37), 'tqdm.tqdm', 'tqdm', ({(57, 21, 57, 36): 'collection_file'}, {}), '(collection_file)', False, 'from tqdm import tqdm\n'), ((66, 16, 66, 32), 'tqdm.tqdm', 'tqdm', ({(66, 21, 66, 31): 'query_file'}, {}), '(query_file)', False, 'from tqdm import tqdm\n'), ((85, 16, 85, 36), 'tqdm.tqdm', 'tqdm', ({(85, 21, 85, 35): 'candidate_file'}, {}), '(candidate_file)', False, 'from tqdm import tqdm\n'), ((106, 25, 106, 54), 'random.choice', 'random.choice', ({(106, 39, 106, 53): 'qrels[topicid]'}, {}), '(qrels[topicid])', False, 'import random\n')] |
powerblossom/workcloud | tests/communities/test_reply.py | fd943220366ebeadfa90c59fc395f84a734b5686 | from core.response import Response
from communities.tests import TestCase
class ReplyPermissionTest(TestCase):
def setUp(self):
self.create_user(is_staff=True)
def test_permission_reply_all(self):
self.create_forum()
self.create_thread()
thread_id = self.thread.id
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'name': 'tester',
'content': 'test'
}
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('thread').get('id') == thread_id and
self.data.get('reply_id') == 0 and
not self.data.get('user') and
self.data.get('name') == 'tester' and
self.data.get('content') == 'test' and
not self.data.get('is_deleted')
)
reply_id = self.data.get('id')
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert (
response.status_code == Response.HTTP_200 and
len(self.data) == 1 and
self.data[0].get('name') == 'tester' and
self.data[0].get('content') == 'test'
)
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit'
},
)
assert response.status_code == Response.HTTP_401
response = self.delete(
'/api/communities/r/%d/' % reply_id
)
assert response.status_code == Response.HTTP_401
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert response.status_code == Response.HTTP_200
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.create_user(username='[email protected]')
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert response.status_code == Response.HTTP_404
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_404
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'name': 'tester',
'content': 'test'
},
auth=True
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('thread').get('id') == thread_id and
self.data.get('reply_id') == 0 and
self.data.get('user').get('id') == self.user.id and
self.data.get('content') == 'test' and
not self.data.get('is_deleted')
)
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert (
response.status_code == Response.HTTP_200 and
len(self.data) == 2
)
def test_permission_reply_member(self):
option = self.create_option(
permission_reply='member'
)
self.create_forum(option=option)
self.create_thread()
thread_id = self.thread.id
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'name': 'tester',
'content': 'test'
}
)
assert response.status_code == Response.HTTP_401
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert response.status_code == Response.HTTP_200
self.create_user(username='[email protected]')
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'content': 'test'
},
auth=True
)
reply_id = self.data.get('id')
assert (
response.status_code == Response.HTTP_201 and
self.data.get('content') == 'test' and
self.data.get('user').get('username') == self.user.username
)
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert (
response.status_code == Response.HTTP_200 and
self.data.get('content') == 'edit'
)
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_200
def test_permission_reply_staff(self):
option = self.create_option(
permission_reply='staff'
)
self.create_forum(option=option)
self.create_thread()
thread_id = self.thread.id
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'name': 'tester',
'content': 'test'
}
)
assert response.status_code == Response.HTTP_401
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert response.status_code == Response.HTTP_200
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'content': 'test'
},
auth=True
)
assert response.status_code == Response.HTTP_201
reply_id = self.data.get('id')
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert (
response.status_code == Response.HTTP_200 and
self.data.get('content') == 'edit'
)
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.create_user(username='[email protected]')
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'content': 'test'
},
auth=True
)
assert response.status_code == Response.HTTP_403
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert response.status_code == Response.HTTP_404
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_404
def test_permission_thread_read_member(self):
option = self.create_option(
permission_read='member',
permission_reply='member'
)
self.create_forum(option=option)
self.create_thread()
thread_id = self.thread.id
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert response.status_code == Response.HTTP_401
response = self.get(
'/api/communities/f/%d/replies/' % thread_id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.create_user(username='[email protected]')
response = self.get(
'/api/communities/f/%d/replies/' % thread_id,
auth=True
)
assert response.status_code == Response.HTTP_200
def test_permission_thread_read_staff(self):
option = self.create_option(
permission_read='staff',
permission_reply='staff'
)
self.create_forum(option=option)
self.create_thread()
thread_id = self.thread.id
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert response.status_code == Response.HTTP_401
response = self.get(
'/api/communities/f/%d/replies/' % thread_id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.create_user(username='[email protected]')
response = self.get(
'/api/communities/f/%d/replies/' % thread_id,
auth=True
)
assert response.status_code == Response.HTTP_403
class ReplyModelTest(TestCase):
def setUp(self):
self.create_user(is_staff=True)
self.create_forum()
self.create_thread()
self.create_reply()
def test_nested_reply(self):
response = self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'content': 'test'
},
auth=True
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('reply_id') == 0
)
reply_id = self.data.get('id')
response = self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'reply_id': reply_id,
'content': 'test'
},
auth=True
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('reply_id') == reply_id
)
response = self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'reply_id': self.data.get('id'),
'content': 'test'
},
auth=True
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('reply_id') == reply_id
)
def test_reply_edit_delete(self):
response = self.patch(
'/api/communities/r/%d/' % self.reply.id,
{
'content': 'bow wow'
},
auth=True
)
assert (
response.status_code == Response.HTTP_200 and
self.data.get('content') == 'bow wow' and
self.data.get('reply_id') == 0 and
not self.data.get('name')
)
response = self.patch(
'/api/communities/r/%d/' % self.reply.id,
{
'reply_id': self.reply.id,
'name': 'dog',
'content': 'meow'
},
auth=True
)
assert (
response.status_code == Response.HTTP_200 and
self.data.get('content') == 'meow' and
self.data.get('reply_id') == 0 and
not self.data.get('name')
)
response = self.delete(
'/api/communities/r/%d/' % self.reply.id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.get(
'/api/communities/f/%d/replies/' % self.thread.id,
auth=True
)
assert (
len(self.data) == 1 and
self.data[0].get('is_deleted')
)
def test_reply_to_invalid_id(self):
thread_id = int(self.thread.id) + 1
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'content': 'test'
},
auth=True
)
assert response.status_code == Response.HTTP_404
reply_id = int(self.reply.id) + 1
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'reply_id': reply_id,
'content': 'test'
},
auth=True
)
assert response.status_code == Response.HTTP_404
class ReplyListTest(TestCase):
def setUp(self):
self.create_user(is_staff=True)
self.create_forum()
self.create_thread()
def test_reply_list(self):
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'content': '1'
},
auth=True
)
reply_id = self.data.get('id')
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'content': '4'
},
auth=True
)
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'reply_id': reply_id,
'content': '2'
},
auth=True
)
nested_reply_id = self.data.get('id')
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'content': '5'
},
auth=True
)
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'reply_id': nested_reply_id,
'content': '3'
},
auth=True
)
self.get(
'/api/communities/f/%d/replies/' % self.thread.id,
auth=True
)
assert (
len(self.data) == 5 and
self.data[0].get('content') == '1' and
self.data[0].get('reply_id') == 0 and
self.data[1].get('content') == '2' and
self.data[1].get('reply_id') == reply_id and
self.data[2].get('content') == '3' and
self.data[2].get('reply_id') == reply_id and
self.data[3].get('content') == '4' and
self.data[3].get('reply_id') == 0 and
self.data[4].get('content') == '5' and
self.data[4].get('reply_id') == 0
)
| [] |
noiseux1523/Deep-Belief-Network | examples/Word2Vec_AverageVectorsTuto.py | 6eb364a85fb128a33c539e5e414ef451f24e499d | # Author: Angela Chapman
# Date: 8/6/2014
#
# This file contains code to accompany the Kaggle tutorial
# "Deep learning goes to the movies". The code in this file
# is for Parts 2 and 3 of the tutorial, which cover how to
# train a model using Word2Vec.
#
# *************************************** #
# ****** Read the two training sets and the test set
#
import pandas as pd
import os
from nltk.corpus import stopwords
import nltk.data
import logging
import numpy as np # Make sure that numpy is imported
from gensim.models import Word2Vec
from sklearn.ensemble import RandomForestClassifier
from KaggleWord2VecUtility import KaggleWord2VecUtility
# ****** Define functions to create average word vectors
#
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given
# paragraph
#
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,), dtype="float32")
#
nwords = 0.
#
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.wv.index2word)
#
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1.
featureVec = np.add(featureVec, model[word])
#
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec, nwords)
return featureVec
def getAvgFeatureVecs(reviews, model, num_features):
# Given a set of reviews (each one a list of words), calculate
# the average feature vector for each one and return a 2D numpy array
#
# Initialize a counter
counter = 0.
#
# Preallocate a 2D numpy array, for speed
reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype="float32")
#
# Loop through the reviews
for review in reviews:
#
# Print a status message every 1000th review
if counter % 1000. == 0.:
print "Review %d of %d" % (counter, len(reviews))
#
# Call the function (defined above) that makes average feature vectors
reviewFeatureVecs[int(counter)] = makeFeatureVec(review, model, \
num_features)
#
# Increment the counter
counter = counter + 1.
return reviewFeatureVecs
def getCleanReviews(reviews):
clean_reviews = []
for review in reviews["review"]:
clean_reviews.append(KaggleWord2VecUtility.review_to_wordlist(review, remove_stopwords=True))
return clean_reviews
if __name__ == '__main__':
# Read data from files
train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv'), header=0,
delimiter="\t", quoting=3)
test = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv'), header=0, delimiter="\t",
quoting=3)
unlabeled_train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', "unlabeledTrainData.tsv"), header=0,
delimiter="\t", quoting=3)
# Verify the number of reviews that were read (100,000 in total)
print "Read %d labeled train reviews, %d labeled test reviews, " \
"and %d unlabeled reviews\n" % (train["review"].size,
test["review"].size, unlabeled_train["review"].size)
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# ****** Split the labeled and unlabeled training sets into clean sentences
#
sentences = [] # Initialize an empty list of sentences
print "Parsing sentences from training set"
for review in train["review"]:
sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer)
print "Parsing sentences from unlabeled set"
for review in unlabeled_train["review"]:
sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer)
# ****** Set parameters and train the word2vec model
#
# Import the built-in logging module and configure it so that Word2Vec
# creates nice output messages
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', \
level=logging.INFO)
# Set values for various parameters
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model (this will take some time)
print "Training Word2Vec model..."
model = Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count=min_word_count, \
window=context, sample=downsampling, seed=1)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model_name = "300features_40minwords_10context"
model.save(model_name)
model.doesnt_match("man woman child kitchen".split())
model.doesnt_match("france england germany berlin".split())
model.doesnt_match("paris berlin london austria".split())
model.most_similar("man")
model.most_similar("queen")
model.most_similar("awful")
# ****** Create average vectors for the training and test sets
#
print "Creating average feature vecs for training reviews"
trainDataVecs = getAvgFeatureVecs(getCleanReviews(train), model, num_features)
print "Creating average feature vecs for test reviews"
testDataVecs = getAvgFeatureVecs(getCleanReviews(test), model, num_features)
# ****** Fit a random forest to the training set, then make predictions
#
# Fit a random forest to the training data, using 100 trees
forest = RandomForestClassifier(n_estimators=100)
print "Fitting a random forest to labeled training data..."
forest = forest.fit(trainDataVecs, train["sentiment"])
# Test & extract results
result = forest.predict(testDataVecs)
# Write the test results
output = pd.DataFrame(data={"id": test["id"], "sentiment": result})
output.to_csv("Word2Vec_AverageVectors.csv", index=False, quoting=3)
print "Wrote Word2Vec_AverageVectors.csv" | [] |
PhiladelphiaController/phl-budget-data | src/phl_budget_data/etl/qcmr/positions/__init__.py | 438999017b8659de5bfb223a038f49fe6fd4a83a | from .core import FullTimePositions
| [] |
tzhanl/azure-sdk-for-python | sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/search_get_schema_response.py | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SearchGetSchemaResponse(Model):
"""The get schema operation response.
:param metadata: The metadata from search results.
:type metadata: ~azure.mgmt.loganalytics.models.SearchMetadata
:param value: The array of result values.
:type value: list[~azure.mgmt.loganalytics.models.SearchSchemaValue]
"""
_attribute_map = {
'metadata': {'key': 'metadata', 'type': 'SearchMetadata'},
'value': {'key': 'value', 'type': '[SearchSchemaValue]'},
}
def __init__(self, **kwargs):
super(SearchGetSchemaResponse, self).__init__(**kwargs)
self.metadata = kwargs.get('metadata', None)
self.value = kwargs.get('value', None)
| [] |
unhingedporter/DataStructureMustKnow | python/ds/spiralprint.py | 3c5b3225afa2775d37a2ff90121f73208717640a | # Python3 program to print
# given matrix in spiral form
def spiralPrint(m, n, a):
start_row_index = 0
start_col_index = 0
l = 0
''' start_row_index - starting row index
m - ending row index
start_col_index - starting column index
n - ending column index
i - iterator '''
while (start_row_index < m and start_col_index < n):
# Print the first row from
# the remaining rows
for i in range(start_col_index, n):
print(a[start_row_index][i], end=" ")
start_row_index += 1
# Print the last column from
# the remaining columns
for i in range(start_row_index, m):
print(a[i][n - 1], end=" ")
n -= 1
# Print the last row from
# the remaining rows
if (start_row_index < m):
for i in range(n - 1, (start_col_index - 1), -1):
print(a[m - 1][i], end=" ")
m -= 1
# Print the first column from
# the remaining columns
if (start_col_index < n):
for i in range(m - 1, start_row_index - 1, -1):
print(a[i][start_col_index], end=" ")
start_col_index += 1
# Driver Code
a = [[1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18]]
R = 3
C = 6
spiralPrint(R, C, a)
| [] |
marintrace/backend | rest-api/routers/authorization.py | ad34bd50bd5e3f90be1ac16a74d39a0a9342fa33 | """
Authorization Utilities
"""
from shared.models.user_entities import User
from shared.service.jwt_auth_wrapper import JWTAuthManager
manager = JWTAuthManager(oidc_vault_secret="oidc/rest",
object_creator=lambda claims, assumed_role, user_roles: User(
first_name=claims["given_name"],
last_name=claims["family_name"],
school=assumed_role,
email=claims['email']
))
AUTH_USER = manager.auth_header()
| [((8, 81, 13, 26), 'shared.models.user_entities.User', 'User', (), '', False, 'from shared.models.user_entities import User\n')] |
qnano/photonpy | photonpy/tests/psf_g2d_sigma.py | 9c03a1c9f4c2177c9c6fb3f2f16dfec2306006d4 | import matplotlib.pyplot as plt
import numpy as np
from photonpy.cpp.context import Context
import photonpy.cpp.gaussian as gaussian
from photonpy.smlm.util import imshow_hstack
from photonpy.cpp.estimator import Estimator
def CheckDeriv(psf:Estimator, theta):
nderiv,ev=psf.NumDeriv(theta,eps=1e-6)
deriv,ev=psf.Derivatives(theta)
maxerr = np.max( np.abs(deriv-nderiv), (-1,-2) )
print(f"PSF {psf.ParamFormat()}, max {np.max(deriv)}, min: {np.min(deriv)}: Deriv-NumDeriv: {maxerr}")
plt.figure()
imshow_hstack(deriv[0] - nderiv[0])
with Context() as ctx:
g = gaussian.Gaussian(ctx)
for cuda in [False]:
print(f"CUDA = {cuda}")
sigma=2
roisize=12
psf = g.CreatePSF_XYIBg(roisize, sigma, cuda)
theta = [[4, 4, 1000, 3]]
img = psf.ExpectedValue(theta)
plt.figure()
plt.set_cmap('inferno')
smp = np.random.poisson(img)
plt.imshow(smp[0])
psf_sigma = g.CreatePSF_XYIBgSigma(roisize, sigma, cuda)
theta_s = [[4,4,1000,3,sigma]]
img2 = psf_sigma.ExpectedValue(theta_s)
CheckDeriv(psf, theta)
# CheckDeriv(psf_sigma)
print(f"PSF Sigma crlb: {psf_sigma.CRLB(theta_s)}")
theta = psf_sigma.Estimate(smp)[0]
print(theta)
| [((16, 4, 16, 16), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((17, 4, 17, 39), 'photonpy.smlm.util.imshow_hstack', 'imshow_hstack', ({(17, 18, 17, 38): '(deriv[0] - nderiv[0])'}, {}), '(deriv[0] - nderiv[0])', False, 'from photonpy.smlm.util import imshow_hstack\n'), ((20, 5, 20, 14), 'photonpy.cpp.context.Context', 'Context', ({}, {}), '()', False, 'from photonpy.cpp.context import Context\n'), ((21, 8, 21, 30), 'photonpy.cpp.gaussian.Gaussian', 'gaussian.Gaussian', ({(21, 26, 21, 29): 'ctx'}, {}), '(ctx)', True, 'import photonpy.cpp.gaussian as gaussian\n'), ((13, 21, 13, 41), 'numpy.abs', 'np.abs', ({(13, 28, 13, 40): 'deriv - nderiv'}, {}), '(deriv - nderiv)', True, 'import numpy as np\n'), ((32, 8, 32, 20), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((33, 8, 33, 31), 'matplotlib.pyplot.set_cmap', 'plt.set_cmap', ({(33, 21, 33, 30): '"""inferno"""'}, {}), "('inferno')", True, 'import matplotlib.pyplot as plt\n'), ((35, 14, 35, 36), 'numpy.random.poisson', 'np.random.poisson', ({(35, 32, 35, 35): 'img'}, {}), '(img)', True, 'import numpy as np\n'), ((36, 8, 36, 26), 'matplotlib.pyplot.imshow', 'plt.imshow', ({(36, 19, 36, 25): 'smp[0]'}, {}), '(smp[0])', True, 'import matplotlib.pyplot as plt\n'), ((14, 42, 14, 55), 'numpy.max', 'np.max', ({(14, 49, 14, 54): 'deriv'}, {}), '(deriv)', True, 'import numpy as np\n'), ((14, 64, 14, 77), 'numpy.min', 'np.min', ({(14, 71, 14, 76): 'deriv'}, {}), '(deriv)', True, 'import numpy as np\n')] |
Gautierhyp/tespy | tests/tools_tests/helpers_tests.py | d44ae41874baeff77619e560faea59dd0cb84c7c | # -*- coding: utf-8
"""Module for testing helper functions.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location
tests/tools_tests/helpers_tests.py
SPDX-License-Identifier: MIT
"""
from nose.tools import eq_
from tespy.tools.helpers import newton
def func(params, x):
return x ** 2 + x - 20
def deriv(params, x):
return 2 * x + 1
def test_newton_bounds():
"""
Test newton algorithm value limit handling.
Try to calculate a zero crossing of a quadratic function in three
tries.
- zero crossing within limits, starting value near 4
- zero crossing within limits, starting value near -5
- zero crossing below minimum
- zero crossing above maximum
The function is x^2 + x - 20, there crossings are -5 and 4.
"""
result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=0)
msg = ('The newton algorithm should find the zero crossing at 4.0. ' +
str(round(result, 1)) + ' was found instead.')
eq_(4.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=-10)
msg = ('The newton algorithm should find the zero crossing at -5.0. ' +
str(round(result, 1)) + ' was found instead.')
eq_(-5.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-4, valmax=-2, val0=-3)
msg = ('The newton algorithm should not be able to find a zero crossing. '
'The value ' + str(round(result, 1)) + ' was found, but the '
'algorithm should have found the lower boundary of -4.0.')
eq_(-4.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-20, valmax=-10, val0=-10)
msg = ('The newton algorithm should not be able to find a zero crossing. '
'The value ' + str(round(result, 1)) + ' was found, but the '
'algorithm should have found the upper boundary of -10.0.')
eq_(-10.0, result, msg)
| [((41, 13, 41, 70), 'tespy.tools.helpers.newton', 'newton', (), '', False, 'from tespy.tools.helpers import newton\n'), ((44, 4, 44, 25), 'nose.tools.eq_', 'eq_', ({(44, 8, 44, 11): '(4.0)', (44, 13, 44, 19): 'result', (44, 21, 44, 24): 'msg'}, {}), '(4.0, result, msg)', False, 'from nose.tools import eq_\n'), ((46, 13, 46, 72), 'tespy.tools.helpers.newton', 'newton', (), '', False, 'from tespy.tools.helpers import newton\n'), ((49, 4, 49, 26), 'nose.tools.eq_', 'eq_', ({(49, 8, 49, 12): '(-5.0)', (49, 14, 49, 20): 'result', (49, 22, 49, 25): 'msg'}, {}), '(-5.0, result, msg)', False, 'from nose.tools import eq_\n'), ((51, 13, 51, 70), 'tespy.tools.helpers.newton', 'newton', (), '', False, 'from tespy.tools.helpers import newton\n'), ((55, 4, 55, 26), 'nose.tools.eq_', 'eq_', ({(55, 8, 55, 12): '(-4.0)', (55, 14, 55, 20): 'result', (55, 22, 55, 25): 'msg'}, {}), '(-4.0, result, msg)', False, 'from nose.tools import eq_\n'), ((57, 13, 57, 73), 'tespy.tools.helpers.newton', 'newton', (), '', False, 'from tespy.tools.helpers import newton\n'), ((61, 4, 61, 27), 'nose.tools.eq_', 'eq_', ({(61, 8, 61, 13): '(-10.0)', (61, 15, 61, 21): 'result', (61, 23, 61, 26): 'msg'}, {}), '(-10.0, result, msg)', False, 'from nose.tools import eq_\n')] |
ralfonso/theory | theory/model/form.py | 41684969313cfc545d74b306e409fd5bf21387b3 | import formencode
import pylons
from pylons import app_globals as g
class OutputSchema(formencode.Schema):
allow_extra_fields = False
enabled = formencode.validators.Int()
class ConfigForm(formencode.Schema):
allow_extra_fields = True
filter_extra_fields = True
#pre_validators = [formencode.NestedVariables()]
action = formencode.validators.String(not_empty=False,if_missing=None)
cancel = formencode.validators.String(not_empty=False,if_missing=None)
firsttime = formencode.validators.Int(not_empty=False, if_missing=0)
server = formencode.validators.String(strip=True,not_empty=True,messages={'empty':'please enter a server host name'})
port = formencode.validators.Int(strip=True,not_empty=True,messages={'empty':'please enter a port, MPD default is 6600',
'integer':'please enter an integer value for port, MPD default is 6600'
})
password = formencode.validators.String(not_empty=False,if_missing=None)
webpassword = formencode.validators.String(not_empty=False,if_missing=None)
timeout = formencode.validators.Bool()
default_search = formencode.validators.String(not_empty=True)
awskey = formencode.validators.String(strip=True,not_empty=False,if_missing=None)
aws_secret = formencode.validators.String(strip=True,not_empty=False,if_missing=None)
outputs = formencode.ForEach(OutputSchema(), if_missing=[])
class StreamNameInUse(formencode.validators.FancyValidator):
def validate_python(self, values, state):
# if old name is set, don't do this check
if values['oldname']:
return
if values['name'] in [name[0] for name in g.tc.streams]:
raise formencode.Invalid({'stream_name_taken':"that stream name has already been used"}, values, state)
class StreamForm(formencode.Schema):
allow_extra_fields = False
name = formencode.validators.String(not_empty=True,strip=True,messages={'empty':'please enter a name for this stream'})
url = formencode.validators.URL(not_empty=True,require_tld=False,strip=True,check_exists=False,messages={'empty':'please enter a URL'})
oldname = formencode.validators.String(not_empty=False)
chained_validators = [StreamNameInUse()]
class State(object):
"""Trivial class to be used as State objects to transport information to formencode validators"""
def __init__(self, **kw):
for key in kw:
setattr(self, key, kw[key])
def __repr__(self):
atts = []
for key in self.__dict__:
atts.append( (key, getattr(self, key)) )
return self.__class__.__name__ + '(' + ', '.join(x[0] + '=' + repr(x[1]) for x in atts) + ')'
def validate_custom(schema, **state_kwargs):
"""Validate a formencode schema.
Works similar to the @validate decorator. On success return a dictionary
of parameters from request.params. On failure throws a formencode.Invalid
exception."""
# Create a state object if requested
if state_kwargs:
state = State(**state_kwargs)
else:
state = None
# In case of validation errors an exception is thrown. This needs to
# be caught elsewhere.
if state_kwargs.get('variable_decode', False):
params = formencode.variabledecode.variable_decode(pylons.request.params)
print pylons.request.params
print params
else:
params = pylons.request.params
return schema.to_python(params, state)
def htmlfill(html, exception_error=None):
"""Add formencode error messages to an HTML string.
'html' contains the HTML page with the form (e.g. created with render()).
'exception_error' is the formencode.Invalid-Exception from formencode."""
return formencode.htmlfill.render(
form=html,
defaults=pylons.request.params,
errors=(exception_error and exception_error.unpack_errors()),
encoding=pylons.response.determine_charset()
)
| [] |
OnRails-IN/backend | utils/dynamo.py | 5f5c9703fcda282ed54f2e6315680fb30fd91a6f | """
Dynamo Utils
============
All utility functions for interactions with DynamoDB
Functions
- ensure_json
- create_user_table
- create_or_update_record
- list_tables
- list_records
- get_record
- delete_table
- delete_record
- check_active
"""
import boto3
from decimal import Decimal
from constants import AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, DYNAMO_URL
ddb = boto3.resource(
'dynamodb',
aws_access_key_id = AWS_ACCESS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
endpoint_url = DYNAMO_URL,
region_name = AWS_REGION
)
client = boto3.client(
'dynamodb',
aws_access_key_id = AWS_ACCESS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
endpoint_url = DYNAMO_URL,
region_name = AWS_REGION
)
def ensure_json(obj):
"""
Function to ensure that a python object is JSON serializable
Params:
obj::dict|[dict]
Object to be JSON serializable
Returns:
obj::dict|[dict]
Returns the JSON serializable object
"""
if isinstance(obj, list):
for i in range(len(obj)):
obj[i] = ensure_json(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.keys():
obj[k] = ensure_json(obj[k])
return obj
elif isinstance(obj, Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def create_user_table():
"""
Function to create the "users" table in DynamoDB
Returns:
bool
If the table was created or not
"""
try:
table = ddb.create_table(
TableName = "users",
KeySchema = [
{
"AttributeName": "username",
"KeyType": "HASH" # Partition key
},
{
"AttributeName": "index",
"KeyType": "RANGE" # Sort key
}
],
AttributeDefinitions = [
{
"AttributeName": "username",
"AttributeType": "S"
},
{
"AttributeName": "index",
"AttributeType": "S"
}
],
ProvisionedThroughput = {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_user_table\n{}".format(e))
return None
def create_train_table():
"""
Function to create the "trains" table in DynamoDB
Returns:
bool
If the table was created or not
"""
try:
table = ddb.create_table(
TableName = "trains",
KeySchema = [
{
"AttributeName": "train_name",
"KeyType": "HASH" # Partition key
},
{
"AttributeName": "train_type",
"KeyType": "RANGE" # Sort key
}
],
AttributeDefinitions = [
{
"AttributeName": "train_name",
"AttributeType": "N"
},
{
"AttributeName": "train_type",
"AttributeType": "S"
}
],
ProvisionedThroughput = {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_user_table\n{}".format(e))
return None
def create_or_update_record(tableName, record):
"""
Function to create or update a record in DynamoDB
Params:
tableName::str
The table name to get the record
record::dict
The object to store
Returns:
bool
If the record was inserted or not
"""
if not tableName or not record:
return False
if not {'username', 'index'}.issubset(record):
return False
try:
res = ddb.Table(tableName).get_item(
Key = {
"username": record['username'],
"index": record['index']
}
)
record = { **res['Item'], **record } if 'Item' in res else record
ddb.Table(tableName).put_item(
Item = record
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_or_update_record\n{}".format(e))
return None
def list_tables():
"""
Function to list all tables in DynamoDB
Returns:
tables::[str]
The list of tables
"""
try:
return client.list_tables()['TableNames']
except client.exceptions.ResourceNotFoundException:
print("Tables do not exist")
return False
except Exception as e:
print("Exception @ list_tables\n{}".format(e))
return None
def list_records(tableName):
"""
Function to list all records from a DynamoDB table
Params:
tableName::str
The table name to get the records
Returns:
records::[dict]
The list of records stored in the table
"""
if not tableName:
return False
try:
table = ddb.Table(tableName)
res = table.scan()
docs = ensure_json(res['Items'])
while 'LastEvaluatedKey' in res:
res = table.scan(ExclusiveStartKey = res['LastEvaluatedKey'])
docs.extend(ensure_json(res['Items']))
return docs
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ list_records\n{}".format(e))
return None
def get_record(tableName, query):
"""
Function to retrieve one record from DynamoDB table
Params:
tableName::str
The table name to get the record
query::dict
The query to fetch the record
Returns:
doc::dict
The record retrieved from the table
"""
if not tableName or not query or not isinstance(query, dict):
return False
try:
res = ddb.Table(tableName).get_item(
Key = query
)
doc = ensure_json(res['Item']) if 'Item' in res else None
return doc
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ get_record\n{}".format(e))
return None
def delete_table(tableName):
"""
Function to delete a DynamoDB table
Params:
tableName::str
The table name to delete
Returns:
bool
If the table was deleted or not
"""
if not tableName:
return False
try:
ddb.Table(tableName).delete()
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ delete_table\n{}".format(e))
return None
def delete_record(tableName, query):
"""
Function to delete a DynamoDB table
Params:
tableName::str
The table name to get the record
query::dict
The query to fetch the record
Returns:
bool
If the record was deleted or not
"""
if not tableName or not key or not val:
return False
try:
res = ddb.Table(tableName).delete_item(
Key = query
)
print(res)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ delete_record\n{}".format(e))
return None
def check_active(tableName):
"""
Function to check if a table is ACTIVE
Params:
tableName::str
The table name to check
Returns:
bool
If the table is active or not
"""
if not tableName:
return False
try:
if ddb.Table(tableName).table_status == "ACTIVE":
return True
return False
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ check_status\n{}".format(e))
return None | [((23, 6, 29, 1), 'boto3.resource', 'boto3.resource', (), '', False, 'import boto3\n'), ((30, 9, 36, 1), 'boto3.client', 'boto3.client', (), '', False, 'import boto3\n')] |
gitana/cloudcms-python-driver | cloudcms/branch/__init__.py | 8685c634880c1a6af6f359f1a25de42dcf49f319 | from .branch import Branch | [] |
kubatom/my_nemtiko_repo | test2/test2.py | 842a303ae120d871623c267ea76c2353d70b2fce | print('this is a test2 file')
| [] |
yijiangh/compas | src/compas_blender/forms/__init__.py | a9e86edf6b602f47ca051fccedcaa88a5e5d3600 | """
********************************************************************************
compas_blender.forms
********************************************************************************
.. currentmodule:: compas_blender.forms
"""
__all__ = []
| [] |
cadappl/scm-workbench | Source/Git/Experiments/git_annotate.py | 302cdb8e36bb755f4977062e8977c37e7f4491f9 | #!/usr/bin/python3
import sys
import git
r = git.Repo( sys.argv[1] )
num = 0
for info in r.blame( 'HEAD', sys.argv[2] ):
num += 1
commit = info[0]
all_lines = info[1]
print( '%s %6d:%s' % (commit, num, all_lines[0]) )
for line in all_lines[1:]:
num += 1
print( '%*s %6d:%s' % (40, '', num, line) )
| [((6, 4, 6, 27), 'git.Repo', 'git.Repo', ({(6, 14, 6, 25): 'sys.argv[1]'}, {}), '(sys.argv[1])', False, 'import git\n')] |
HansikaPH/time-series-forecasting | configs/global_configs.py | 23be319a190489bc1464653a3d672edd70ab110b | # configs for the model training
class model_training_configs:
VALIDATION_ERRORS_DIRECTORY = 'results/validation_errors/'
INFO_FREQ = 1
# configs for the model testing
class model_testing_configs:
RNN_FORECASTS_DIRECTORY = 'results/rnn_forecasts/'
RNN_ERRORS_DIRECTORY = 'results/errors'
PROCESSED_RNN_FORECASTS_DIRECTORY = '/results/processed_rnn_forecasts/'
# configs for hyperparameter tuning(SMAC3)
class hyperparameter_tuning_configs:
SMAC_RUNCOUNT_LIMIT = 50
class gpu_configs:
log_device_placement = False
| [] |
imaginal/openprocurement.blade | openprocurement/blade/tests/auctions.py | 4ef512e3d0c1287af1faca9caa9e5349a3c5b0fb | # -*- coding: utf-8 -*-
import unittest
from uuid import uuid4
from copy import deepcopy
from openprocurement.api.models import get_now
from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX
try:
import openprocurement.auctions.core as auctions_core
except ImportError:
auctions_core = None
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionResourceTest(AuctionBaseWebTest):
def test_empty_listing(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/auctions?opt_jsonp=callback')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions?opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions?opt_jsonp=callback&opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions?offset=2015-01-01T00:00:00+02:00&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
response = self.app.get('/auctions?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/auctions?feed=changes&offset=0', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Offset expired/invalid', u'location': u'params', u'name': u'offset'}
])
response = self.app.get('/auctions?feed=changes&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
def test_listing(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
for i in range(3):
offset = get_now().isoformat()
auctions.append(self.create_auction())
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
while True:
response = self.app.get('/auctions?offset={}'.format(offset))
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/auctions', params=[('opt_fields', 'status,enquiryPeriod')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod']))
self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri'])
response = self.app.get('/auctions?descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True))
response = self.app.get('/auctions?descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_auction_data2 = test_auction_data.copy()
test_auction_data2['mode'] = 'test'
self.create_auction(test_auction_data2)
while True:
response = self.app.get('/auctions?mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_changes(self):
response = self.app.get('/auctions?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
for i in range(3):
auctions.append(self.create_auction())
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions?feed=changes')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
response = self.app.get('/auctions?feed=changes&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status,enquiryPeriod')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod']))
self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri'])
response = self.app.get('/auctions?feed=changes&descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True))
response = self.app.get('/auctions?feed=changes&descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_auction_data2 = test_auction_data.copy()
test_auction_data2['mode'] = 'test'
self.create_auction(test_auction_data2)
while True:
response = self.app.get('/auctions?feed=changes&mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?feed=changes&mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_draft(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
data = test_auction_data.copy()
data.update({'status': 'draft'})
for i in range(3):
auctions.append(self.create_auction(data))
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
def test_get_auction(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], auction)
response = self.app.get('/auctions/{}?opt_jsonp=callback'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}?opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_auction_not_found(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'}
])
response = self.app.patch_json(
'/auctions/some_id', {'data': {}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'}
])
# put custom document object into database to check auction construction on non-Auction data
data = {'contract': 'test', '_id': uuid4().hex}
self.db.save(data)
response = self.app.get('/auctions/{}'.format(data['_id']), status=404)
self.assertEqual(response.status, '404 Not Found')
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionAwardResourceTest(AuctionBaseWebTest):
def test_listing(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], auction['awards'])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_jsonp=callback'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_jsonp=callback&opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
def test_listing_changes(self):
auction = self.create_auction()
data = self.db[auction['id']]
awards = data['awards']
for i in range(3):
award = deepcopy(test_award)
award['date'] = get_now().isoformat()
award['id'] = uuid4().hex
awards.append(award)
self.db.save(data)
ids = ','.join([i['id'] for i in awards])
response = self.app.get('/auctions/{}/awards'.format(auction['id']))
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), len(awards))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in awards]))
self.assertEqual(set([i['date'] for i in response.json['data']]), set([i['date'] for i in awards]))
self.assertEqual([i['date'] for i in response.json['data']], sorted([i['date'] for i in awards]))
def test_get_award(self):
auction = self.create_auction()
award = auction['awards'][0]
response = self.app.get('/auctions/{}/awards/{}'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], award)
response = self.app.get('/auctions/{}/awards/{}?opt_jsonp=callback'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}/awards/{}?opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_award_not_found(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards/some_id'.format(auction['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'award_id'}
])
def test_get_document_with_versions(self):
auction = self.create_auction()
data = self.db[auction['id']]
documents = data['documents']
for i in range(3):
document = deepcopy(test_document)
document['id'] = data['documents'][0]['id']
document['url'] += str(i)
document['dateModified'] = get_now().isoformat()
documents.append(document)
self.db.save(data)
versions = [{'dateModified': i['dateModified'], 'url': i['url']} for i in documents[:-1]]
response = self.app.get('/auctions/{}/documents/{}'.format(auction['id'], document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['previousVersions']), len(versions))
self.assertEqual(response.json['data']['previousVersions'], versions)
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionAwardDocumentResourceTest(AuctionBaseWebTest):
def test_listing(self):
auction = self.create_auction()
award = auction['awards'][0]
document = award['documents'][0]
response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], award['documents'])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback&opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
def test_listing_changes(self):
auction = self.create_auction()
data = self.db[auction['id']]
award = data['awards'][0]
award_documents = award['documents']
for i in range(3):
document = deepcopy(test_document)
document['dateModified'] = get_now().isoformat()
document['id'] = uuid4().hex
award_documents.append(document)
self.db.save(data)
ids = ','.join([i['id'] for i in award_documents])
response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id']))
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), len(award_documents))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in award_documents]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in award_documents]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in award_documents]))
def test_get_award_document(self):
auction = self.create_auction()
award = auction['awards'][0]
award_document = award['documents'][0]
response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(auction['id'], award['id'], award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], award_document)
response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_jsonp=callback'.format(auction['id'], award['id'],award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_pretty=1'.format(auction['id'], award['id'], award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_award_document_not_found(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards/{}/documents/some_id'.format(auction['id'], auction['awards'][0]['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AuctionResourceTest))
suite.addTest(unittest.makeSuite(AuctionAwardResourceTest))
suite.addTest(unittest.makeSuite(AuctionAwardDocumentResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [((13, 1, 13, 64), 'unittest.skipUnless', 'unittest.skipUnless', ({(13, 21, 13, 34): 'auctions_core', (13, 36, 13, 63): '"""Auctions is not reachable"""'}, {}), "(auctions_core, 'Auctions is not reachable')", False, 'import unittest\n'), ((340, 1, 340, 64), 'unittest.skipUnless', 'unittest.skipUnless', ({(340, 21, 340, 34): 'auctions_core', (340, 36, 340, 63): '"""Auctions is not reachable"""'}, {}), "(auctions_core, 'Auctions is not reachable')", False, 'import unittest\n'), ((452, 1, 452, 64), 'unittest.skipUnless', 'unittest.skipUnless', ({(452, 21, 452, 34): 'auctions_core', (452, 36, 452, 63): '"""Auctions is not reachable"""'}, {}), "(auctions_core, 'Auctions is not reachable')", False, 'import unittest\n'), ((548, 12, 548, 32), 'unittest.TestSuite', 'unittest.TestSuite', ({}, {}), '()', False, 'import unittest\n'), ((556, 4, 556, 38), 'unittest.main', 'unittest.main', (), '', False, 'import unittest\n'), ((160, 29, 160, 53), 'openprocurement.edge.tests.base.test_auction_data.copy', 'test_auction_data.copy', ({}, {}), '()', False, 'from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX\n'), ((251, 29, 251, 53), 'openprocurement.edge.tests.base.test_auction_data.copy', 'test_auction_data.copy', ({}, {}), '()', False, 'from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX\n'), ((272, 15, 272, 39), 'openprocurement.edge.tests.base.test_auction_data.copy', 'test_auction_data.copy', ({}, {}), '()', False, 'from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX\n'), ((549, 18, 549, 57), 'unittest.makeSuite', 'unittest.makeSuite', ({(549, 37, 549, 56): 'AuctionResourceTest'}, {}), '(AuctionResourceTest)', False, 'import unittest\n'), ((550, 18, 550, 62), 'unittest.makeSuite', 'unittest.makeSuite', ({(550, 37, 550, 61): 'AuctionAwardResourceTest'}, {}), '(AuctionAwardResourceTest)', False, 'import unittest\n'), ((551, 18, 551, 70), 'unittest.makeSuite', 'unittest.makeSuite', ({(551, 37, 551, 69): 'AuctionAwardDocumentResourceTest'}, {}), '(AuctionAwardDocumentResourceTest)', False, 'import unittest\n'), ((380, 20, 380, 40), 'copy.deepcopy', 'deepcopy', ({(380, 29, 380, 39): 'test_award'}, {}), '(test_award)', False, 'from copy import deepcopy\n'), ((436, 23, 436, 46), 'copy.deepcopy', 'deepcopy', ({(436, 32, 436, 45): 'test_document'}, {}), '(test_document)', False, 'from copy import deepcopy\n'), ((495, 23, 495, 46), 'copy.deepcopy', 'deepcopy', ({(495, 32, 495, 45): 'test_document'}, {}), '(test_document)', False, 'from copy import deepcopy\n'), ((333, 43, 333, 50), 'uuid.uuid4', 'uuid4', ({}, {}), '()', False, 'from uuid import uuid4\n'), ((382, 26, 382, 33), 'uuid.uuid4', 'uuid4', ({}, {}), '()', False, 'from uuid import uuid4\n'), ((497, 29, 497, 36), 'uuid.uuid4', 'uuid4', ({}, {}), '()', False, 'from uuid import uuid4\n'), ((85, 21, 85, 30), 'openprocurement.api.models.get_now', 'get_now', ({}, {}), '()', False, 'from openprocurement.api.models import get_now\n'), ((381, 28, 381, 37), 'openprocurement.api.models.get_now', 'get_now', ({}, {}), '()', False, 'from openprocurement.api.models import get_now\n'), ((439, 39, 439, 48), 'openprocurement.api.models.get_now', 'get_now', ({}, {}), '()', False, 'from openprocurement.api.models import get_now\n'), ((496, 39, 496, 48), 'openprocurement.api.models.get_now', 'get_now', ({}, {}), '()', False, 'from openprocurement.api.models import get_now\n')] |
kejkz/webium | webium/controls/select.py | ccb09876a201e75f5c5810392d4db7a8708b90cb | from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.remote.webelement import WebElement
class Select(WebElement):
"""
Implements logic to work with Web List UI elements
"""
@property
def is_multiple(self):
value = self.get_attribute('multiple')
return value is not None and not value == 'false'
def select_option(self, option):
"""
Performs selection of provided item from Web List
@params option - string item name
"""
items_list = self.get_options()
for item in items_list:
if item.get_attribute("value") == option:
item.click()
break
def get_options(self):
"""
Performs search for provided item in Web List
"""
return self.find_elements_by_tag_name('option')
def get_attribute_selected(self, attribute):
"""
Performs search of selected item from Web List
Return attribute of selected item
@params attribute - string attribute name
"""
items_list = self.get_options()
return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None)
def get_value_selected(self):
"""
Performs search of selected item from Web List
Return value of selected item
"""
return self.get_attribute_selected('value')
def get_text_selected(self):
"""
Performs search of selected item from Web List
Return text of selected item
"""
return self.get_attribute_selected('text')
def select_by_visible_text(self, text):
"""
Performs search of selected item from Web List
@params text - string visible text
"""
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
return
matched = True
# in case the target option isn't found by xpath
# attempt to find it by direct comparison among options which contain at least the longest token from the text
if len(opts) == 0 and ' ' in text:
sub_string_without_space = self._get_longest_token(text)
if sub_string_without_space == "":
candidates = self.get_options()
else:
xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space))
candidates = self.find_elements_by_xpath(xpath)
for candidate in candidates:
if text == candidate.text:
self._set_selected(candidate)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: " + str(text))
@staticmethod
def _escape_string(value):
if '"' in value and "'" in value:
substrings = value.split('"')
result = ['concat(']
for substring in substrings:
result.append('"{0}"'.format(substring))
result.append(', \'"\', ')
result.pop()
if value.endswith('"'):
result.append(', \'"\'')
return ''.join(result) + ')'
if '"' in value:
return "'{0}'".format(value)
return '"{0}"'.format(value)
@staticmethod
def _get_longest_token(value):
items = value.split(' ')
longest = ''
for item in items:
if len(item) > len(longest):
longest = item
return longest
@staticmethod
def _set_selected(option):
if not option.is_selected():
option.click()
| [] |
zy-sunshine/falkon-pyqt5 | mc/cookies/CookieManager.py | bc2b60aa21c9b136439bd57a11f391d68c736f99 | from PyQt5.QtWidgets import QDialog
from PyQt5 import uic
from PyQt5.Qt import Qt
from PyQt5.Qt import QShortcut
from PyQt5.Qt import QKeySequence
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QInputDialog
from PyQt5.Qt import QDateTime
from PyQt5.Qt import QStyle
from PyQt5.Qt import QNetworkCookie
from PyQt5.QtWidgets import QTreeWidgetItem
from mc.common.globalvars import gVar
from mc.app.Settings import Settings
from mc.common import const
from mc.tools.TreeWidget import TreeWidget
from mc.tools.IconProvider import IconProvider
class HashableTreeWidgetItem(QTreeWidgetItem):
def __hash__(self):
return id(self)
class CookieManager(QDialog):
def __init__(self, parent=None):
'''
@param parent QWidget
'''
super().__init__(parent)
self._ui = uic.loadUi('mc/cookies/CookieManager.ui', self)
self._domainHash = {} # QHash<QString, QTreeWidgetItem>
self._itemHash = {} # QHash<QTreeWidgetItem, QNetworkCookie>
self.setAttribute(Qt.WA_DeleteOnClose)
gVar.appTools.centerWidgetOnScreen(self)
if self.isRightToLeft():
self._ui.cookieTree.headerItem().setTextAlignment(0, Qt.AlignRight | Qt.AlignVCenter)
self._ui.cookieTree.headerItem().setTextAlignment(1, Qt.AlignRight | Qt.AlignVCenter)
self._ui.cookieTree.setLayoutDirection(Qt.LeftToRight)
self._ui.whiteList.setLayoutDirection(Qt.LeftToRight)
self._ui.blackList.setLayoutDirection(Qt.LeftToRight)
# Stored Cookies
self._ui.cookieTree.currentItemChanged.connect(self._currentItemChanged)
self._ui.removeAll.clicked.connect(self._removeAll)
self._ui.removeOne.clicked.connect(self._remove)
self._ui.close.clicked.connect(lambda: self._close())
self._ui.close2.clicked.connect(lambda: self._close())
self._ui.close3.clicked.connect(lambda: self._close())
self._ui.search.textChanged.connect(self._filterString)
# Cookie Filtering
self._ui.whiteAdd.clicked.connect(self._addWhitelist)
self._ui.whiteRemove.clicked.connect(self._removeWhitelist)
self._ui.blackAdd.clicked.connect(self._addBlacklist)
self._ui.blackRemove.clicked.connect(self._removeBlacklist)
# Cookie Settings
settings = Settings()
settings.beginGroup('Cookie-Settings')
self._ui.saveCookies.setChecked(settings.value('allCookies', True))
self._ui.filter3rdParty.setChecked(settings.value('filterThirdPartyCookies', False))
self._ui.filterTracking.setChecked(settings.value('filterTrackingCookie', False))
self._ui.deleteCookiesOnClose.setChecked(settings.value('deleteCookiesOnClose', False))
self._ui.whiteList.addItems(settings.value('whitelist', []))
self._ui.blackList.addItems(settings.value('blacklist', []))
settings.endGroup()
if const.QTWEBENGINEWIDGETS_VERSION < const.QT_VERSION_CHECK(5, 11, 0):
self._ui.filter3rdParty.hide()
self._ui.search.setPlaceholderText(_('Search'))
self._ui.cookieTree.setDefaultItemShowMode(TreeWidget.ItemsCollapsed)
self._ui.cookieTree.sortItems(0, Qt.AscendingOrder)
self._ui.cookieTree.header().setDefaultSectionSize(220)
self._ui.cookieTree.setFocus()
self._ui.whiteList.setSortingEnabled(True)
self._ui.blackList.setSortingEnabled(True)
self._removeShortcut = QShortcut(QKeySequence('Del'), self)
self._removeShortcut.activated.connect(self._deletePressed)
self._ui.search.textChanged.connect(self._filterString)
cookieJar = gVar.app.cookieJar()
cookieJar.cookieAdded.connect(self._addCookie)
cookieJar.cookieRemoved.connect(self._removeCookie)
# Load cookies
for cookie in cookieJar.getAllCookies():
self._addCookie(cookie)
gVar.appTools.setWmClass('Cookies', self)
def _close(self):
super().close()
# private Q_SLOTS:
def _currentItemChanged(self, current, parent):
'''
@param: current QTreeWidgetItem
@param: parent QTreeWidgetItem
'''
if not current:
return
if not current.text(1):
self._ui.name.setText(_('<cookie not selected>'))
self._ui.value.setText(_("<cookie not selected>"))
self._ui.server.setText(_("<cookie not selected>"))
self._ui.path.setText(_("<cookie not selected>"))
self._ui.secure.setText(_("<cookie not selected>"))
self._ui.expiration.setText(_("<cookie not selected>"))
self._ui.removeOne.setText(_("Remove cookies"))
return
cookie = current.data(0, Qt.UserRole + 10)
self._ui.name.setText(cookie.name().data().decode())
self._ui.value.setText(cookie.value().data().decode())
self._ui.server.setText(cookie.domain())
self._ui.path.setText(cookie.path())
if cookie.isSecure():
self._ui.secure.setText(_('Secure only'))
else:
self._ui.secure.setText(_('All connections'))
if cookie.isSessionCookie():
self._ui.expiration.setText(_('Session cookie'))
else:
self._ui.expiration.setText(
QDateTime(cookie.expirationDate()).toString('hh:mm:ss dddd d. MMMM yyyy')
)
self._ui.removeOne.setText(_('Remove cookie'))
def _remove(self):
current = self._ui.cookieTree.currentItem()
if not current:
return
cookies = [] # QList<QNetworkCookie>
if current.childCount():
for idx in range(current.childCount()):
# QTreeWidgetItem
item = current.child(idx)
if item and item in self._itemHash:
cookies.append(self._itemHash[item])
elif current in self._itemHash:
cookies.append(self._itemHash[current])
cookieJar = gVar.app.cookieJar()
for cookie in cookies:
cookieJar.deleteCookie(cookie)
def _removeAll(self):
button = QMessageBox.warning(self, _('Confirmation'),
_('Are you sure you want to delete all cookies on your computer?'),
QMessageBox.Yes | QMessageBox.No)
if button != QMessageBox.Yes:
return
gVar.app.cookieJar().deleteAllCookies()
self._itemHash.clear()
self._domainHash.clear()
self._ui.cookieTree.clear()
def _addWhitelist(self):
server, ok = QInputDialog.getText(self, _('Add to whitelist'),
_('Server:'))
if not server:
return
if self._ui.blackList.findItems(server, Qt.MatchFixedString):
QMessageBox.information(self, _('Already blacklisted!'),
_("The server \"%s\" is already in blacklist, please remove it first.") % server)
return
if not self._ui.whiteList.findItems(server, Qt.MatchFixedString):
self._ui.whiteList.addItem(server)
def _removeWhitelist(self):
item = self._ui.whiteList.currentItem()
self._removeTreeItem(self._ui.whiteList, item)
def _addBlacklist(self):
server, ok = QInputDialog.getText(self, _('Add to blacklist'),
_('Server:'))
self._addBlacklistByServer(server)
def _removeBlacklist(self):
item = self._ui.blackList.currentItem()
self._removeTreeItem(self._ui.blackList, item)
def _deletePressed(self):
if self._ui.cookieTree.hasFocus():
self._remove()
elif self._ui.whiteList.hasFocus():
self._removeWhitelist()
elif self._ui.blackList.hasFocus():
self._removeBlacklist()
def _filterString(self, string):
'''
@param: string QString
'''
print('=====>', string)
if not string:
for idx in range(self._ui.cookieTree.topLevelItemCount()):
item = self._ui.cookieTree.topLevelItem(idx)
item.setHidden(False)
item.setExpanded(self._ui.cookieTree.defaultItemShowMode() == TreeWidget.ItemsExpanded)
else:
strLower = string.lower()
for idx in range(self._ui.cookieTree.topLevelItemCount()):
item = self._ui.cookieTree.topLevelItem(idx)
text = '.' + item.text(0)
item.setHidden(text.lower() not in strLower)
item.setExpanded(True)
def _addCookie(self, cookie):
'''
@param: cookie QNetworkCookie
'''
item = None # QTreeWidgetItem
domain = self._cookieDomain(cookie)
findParent = self._domainHash.get(domain)
if findParent:
item = HashableTreeWidgetItem(findParent)
else:
newParent = HashableTreeWidgetItem(self._ui.cookieTree)
newParent.setText(0, domain)
newParent.setIcon(0, IconProvider.standardIcon(QStyle.SP_DirIcon))
newParent.setData(0, Qt.UserRole + 10, cookie.domain())
self._ui.cookieTree.addTopLevelItem(newParent)
self._domainHash[domain] = newParent
item = HashableTreeWidgetItem(newParent)
cookie = QNetworkCookie(cookie)
item.setText(0, '.' + domain)
item.setText(1, cookie.name().data().decode())
item.setData(0, Qt.UserRole + 10, cookie)
self._ui.cookieTree.addTopLevelItem(item)
self._itemHash[item] = cookie
def _removeCookie(self, cookie):
'''
@param: cookie QNetworkCookie
'''
# QTreeWidgetItem
item = self._cookieItem(cookie)
if not item:
return
self._itemHash.pop(item, None)
itemParent = item.parent()
if itemParent and itemParent.childCount() == 1:
self._domainHash.pop(self._cookieDomain(cookie), None)
self._removeTreeItem(self._ui.cookieTree, itemParent)
item = None
if item:
self._removeTreeItem(self._ui.cookieTree, item)
def _removeTreeItem(self, tree, item):
if not item: return
(item.parent() or tree.invisibleRootItem()).removeChild(item)
# private:
# override
def closeEvent(self, event):
'''
@param event QCloseEvent
'''
whitelist = []
blacklist = []
for idx in range(self._ui.whiteList.count()):
item = self._ui.whiteList.item(idx)
whitelist.append(item.text())
for idx in range(self._ui.blackList.count()):
item = self._ui.blackList.item(idx)
blacklist.append(item.text())
settings = Settings()
settings.beginGroup('Cookie-Settings')
settings.setValue('allowCookies', self._ui.saveCookies.isChecked())
settings.setValue('filterThirdPartyCookies', self._ui.filter3rdParty.isChecked())
settings.setValue('filterTrackingCookie', self._ui.filterTracking.isChecked())
settings.setValue('deleteCookiesOnClose', self._ui.deleteCookiesOnClose.isChecked())
settings.setValue('whitelist', whitelist)
settings.setValue('blacklist', blacklist)
settings.endGroup()
gVar.app.cookieJar().loadSettings()
event.accept()
# override
def keyPressEvent(self, event):
'''
@param event QKeyEvent
'''
if event.key() == Qt.Key_Escape:
self._close()
super().keyPressEvent(event)
def _addBlacklistByServer(self, server):
'''
@param: server QString
'''
if not server:
return
if self._ui.whiteList.findItems(server, Qt.MatchFixedString):
QMessageBox.information(self, _('Already whitelisted!'),
_("The server \"%s\" is already in whitelist, please remove it first.") % server)
return
if not self._ui.blackList.findItems(server, Qt.MatchFixedString):
self._ui.blackList.addItem(server)
def _cookieDomain(self, cookie):
'''
@param: cookie QNetworkCookie
@return: QString
'''
domain = cookie.domain()
domain = domain.lstrip('.')
return domain
def _cookieItem(self, cookie):
'''
@param: cookie QNetworkCookie
@return: QTreeWidgetItem
'''
for key, val in self._itemHash.items():
if val == cookie:
return key
return None
| [((28, 19, 28, 66), 'PyQt5.uic.loadUi', 'uic.loadUi', ({(28, 30, 28, 59): '"""mc/cookies/CookieManager.ui"""', (28, 61, 28, 65): 'self'}, {}), "('mc/cookies/CookieManager.ui', self)", False, 'from PyQt5 import uic\n'), ((35, 8, 35, 48), 'mc.common.globalvars.gVar.appTools.centerWidgetOnScreen', 'gVar.appTools.centerWidgetOnScreen', ({(35, 43, 35, 47): 'self'}, {}), '(self)', False, 'from mc.common.globalvars import gVar\n'), ((60, 19, 60, 29), 'mc.app.Settings.Settings', 'Settings', ({}, {}), '()', False, 'from mc.app.Settings import Settings\n'), ((86, 20, 86, 40), 'mc.common.globalvars.gVar.app.cookieJar', 'gVar.app.cookieJar', ({}, {}), '()', False, 'from mc.common.globalvars import gVar\n'), ((94, 8, 94, 49), 'mc.common.globalvars.gVar.appTools.setWmClass', 'gVar.appTools.setWmClass', ({(94, 33, 94, 42): '"""Cookies"""', (94, 44, 94, 48): 'self'}, {}), "('Cookies', self)", False, 'from mc.common.globalvars import gVar\n'), ((153, 20, 153, 40), 'mc.common.globalvars.gVar.app.cookieJar', 'gVar.app.cookieJar', ({}, {}), '()', False, 'from mc.common.globalvars import gVar\n'), ((244, 17, 244, 39), 'PyQt5.Qt.QNetworkCookie', 'QNetworkCookie', ({(244, 32, 244, 38): 'cookie'}, {}), '(cookie)', False, 'from PyQt5.Qt import QNetworkCookie\n'), ((293, 19, 293, 29), 'mc.app.Settings.Settings', 'Settings', ({}, {}), '()', False, 'from mc.app.Settings import Settings\n'), ((70, 46, 70, 78), 'mc.common.const.QT_VERSION_CHECK', 'const.QT_VERSION_CHECK', ({(70, 69, 70, 70): '(5)', (70, 72, 70, 74): '(11)', (70, 76, 70, 77): '(0)'}, {}), '(5, 11, 0)', False, 'from mc.common import const\n'), ((82, 41, 82, 60), 'PyQt5.Qt.QKeySequence', 'QKeySequence', ({(82, 54, 82, 59): '"""Del"""'}, {}), "('Del')", False, 'from PyQt5.Qt import QKeySequence\n'), ((165, 8, 165, 28), 'mc.common.globalvars.gVar.app.cookieJar', 'gVar.app.cookieJar', ({}, {}), '()', False, 'from mc.common.globalvars import gVar\n'), ((237, 33, 237, 77), 'mc.tools.IconProvider.IconProvider.standardIcon', 'IconProvider.standardIcon', ({(237, 59, 237, 76): 'QStyle.SP_DirIcon'}, {}), '(QStyle.SP_DirIcon)', False, 'from mc.tools.IconProvider import IconProvider\n'), ((303, 8, 303, 28), 'mc.common.globalvars.gVar.app.cookieJar', 'gVar.app.cookieJar', ({}, {}), '()', False, 'from mc.common.globalvars import gVar\n')] |
dongbohu/cimr-d | .circleci/process_submitted_data.py | 7d8f7f7319cff0092946a28d1416d38c06e085d7 | #!/usr/bin/env python3
import os
import sys
import logging
import subprocess
logging.basicConfig(level=logging.INFO)
root_dir = 'submitted_data'
submitted_file_split = set()
for dir_, _, files in os.walk(root_dir):
for file_name in files:
rel_dir = os.path.relpath(dir_, root_dir)
rel_file = os.path.join(root_dir, rel_dir, file_name)
submitted_file_split.add(rel_file)
for submitted_file in submitted_file_split:
if submitted_file.startswith('submitted_data'):
dir_name, data_type, file_name = submitted_file.split('/')
out_dir_name = 'processed_data'
if not os.path.isdir(out_dir_name):
os.makedirs(out_dir_name, exist_ok=True)
if not os.path.isdir(out_dir_name + '/' + data_type):
os.makedirs(out_dir_name + '/' + data_type, exist_ok=True)
outfile = submitted_file.replace(dir_name, out_dir_name)
if not os.path.isfile(outfile):
if not data_type == 'tad':
from cimr.processor.utils import Infiler
infile = Infiler(
data_type,
submitted_file,
genome_build='b38',
update_rsid=False,
outfile=str(outfile),
chunksize=700000
)
infile.read_file()
if data_type == 'eqtl':
from cimr.processor.query import Querier
genes = list(infile.list_genes())
queried = Querier(genes)
queried.form_query()
else:
logging.info(f' processed file already exists for {submitted_file}')
logging.info(f' if reprocessing, delete {outfile} and file a new pull request')
| [((9, 0, 9, 39), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((15, 22, 15, 39), 'os.walk', 'os.walk', ({(15, 30, 15, 38): 'root_dir'}, {}), '(root_dir)', False, 'import os\n'), ((17, 18, 17, 49), 'os.path.relpath', 'os.path.relpath', ({(17, 34, 17, 38): 'dir_', (17, 40, 17, 48): 'root_dir'}, {}), '(dir_, root_dir)', False, 'import os\n'), ((18, 19, 18, 61), 'os.path.join', 'os.path.join', ({(18, 32, 18, 40): 'root_dir', (18, 42, 18, 49): 'rel_dir', (18, 51, 18, 60): 'file_name'}, {}), '(root_dir, rel_dir, file_name)', False, 'import os\n'), ((28, 15, 28, 42), 'os.path.isdir', 'os.path.isdir', ({(28, 29, 28, 41): 'out_dir_name'}, {}), '(out_dir_name)', False, 'import os\n'), ((29, 12, 29, 52), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((30, 15, 30, 60), 'os.path.isdir', 'os.path.isdir', ({(30, 29, 30, 59): "(out_dir_name + '/' + data_type)"}, {}), "(out_dir_name + '/' + data_type)", False, 'import os\n'), ((31, 12, 31, 70), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((35, 15, 35, 38), 'os.path.isfile', 'os.path.isfile', ({(35, 30, 35, 37): 'outfile'}, {}), '(outfile)', False, 'import os\n'), ((55, 16, 55, 84), 'logging.info', 'logging.info', ({(55, 29, 55, 83): 'f""" processed file already exists for {submitted_file}"""'}, {}), "(f' processed file already exists for {submitted_file}')", False, 'import logging\n'), ((56, 16, 56, 95), 'logging.info', 'logging.info', ({(56, 29, 56, 94): 'f""" if reprocessing, delete {outfile} and file a new pull request"""'}, {}), "(f' if reprocessing, delete {outfile} and file a new pull request')", False, 'import logging\n'), ((51, 30, 51, 44), 'cimr.processor.query.Querier', 'Querier', ({(51, 38, 51, 43): 'genes'}, {}), '(genes)', False, 'from cimr.processor.query import Querier\n')] |
resourceidea/resourceideaapi | common/enums.py | 4cc7db98f981d8f2011c1995e23e8a8655e31f75 | import enum
class Status(enum.Enum):
"""Status enumeration."""
ACTIVE = 'ACTIVE'
DISABLED = 'DISABLED'
ARCHIVED = 'ARCHIVED'
DELETED = 'DELETED'
class ProgressStatus(enum.Enum):
"""Enumeration indicates the different
stages of the progress made on an engagement,
job or task."""
NOT_STARTED = 'NOT STARTED'
RUNNING = 'RUNNING'
IN_REVIEW = 'IN REVIEW'
REVIEWED = 'REVIEWED'
CLOSED = 'CLOSED'
| [] |
softsys4ai/FlexiBO | networks/mobilenet.py | 1406d67e5bd14d6b7210e724e6b239889f210db6 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MobileNet 224 (2017)
# Paper: https://arxiv.org/pdf/1704.04861.pdf
import os
import tensorflow as tf
from tensorflow.keras import layers, Input, Model
def stem(inputs, alpha, n_filters,
filter_size):
""" Construct the stem group
inputs : input tensor
alpha : width multiplier
"""
# Convolutional block
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(inputs)
x = layers.Conv2D(n_filters, (filter_size, filter_size), strides=(2, 2), padding='valid')(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
# Depthwise Separable Convolution Block
x = depthwise_block(x, 64, alpha, (1, 1))
return x
def classifier(x, alpha, dropout, n_classes):
""" Construct the classifier group
x : input to the classifier
alpha : width multiplier
dropout : dropout percentage
n_classes : number of output classes
"""
# Flatten the feature maps into 1D feature maps (?, N)
x = layers.GlobalAveragePooling2D()(x)
# Reshape the feature maps to (?, 1, 1, 1024)
shape = (1, 1, int(1024 * alpha))
x = layers.Reshape(shape)(x)
# Perform dropout for preventing overfitting
x = layers.Dropout(dropout)(x)
# Use convolution for classifying (emulates a fully connected layer)
x = layers.Conv2D(n_classes, (1, 1), padding='same')(x)
x = layers.Activation('softmax')(x)
# Reshape the resulting output to 1D vector of number of classes
x = layers.Reshape((n_classes, ))(x)
return x
def depthwise_block(x, n_filters, alpha, strides):
""" Construct a Depthwise Separable Convolution block
x : input to the block
n_filters : number of filters
alpha : width multiplier
strides : strides
"""
# Apply the width filter to the number of feature maps
filters = int(n_filters * alpha)
# Strided convolution to match number of filters
if strides == (2, 2):
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(x)
padding = 'valid'
else:
padding = 'same'
# Depthwise Convolution
x = layers.DepthwiseConv2D((3, 3), strides, padding=padding)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
# Pointwise Convolution
x = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
return x
def get_configurable_hyperparams():
"""This function is used to ge the configurable hyperparameters
"""
import yaml
with open("cur_config.yaml") as fp:
cur_cfg=yaml.load(fp)
return (cur_cfg["cur_conf"][0], cur_cfg["cur_conf"][1], cur_cfg["cur_conf"][2],
cur_cfg["cur_conf"][3], cur_cfg["cur_conf"][4])
def get_data():
"""This function is used to get train and test data
"""
from tensorflow.keras.datasets import cifar10
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = (x_train / 255.0).astype(np.float32)
x_test = (x_test / 255.0).astype(np.float32)
return x_train, y_train, x_test, y_test
if __name__=="__main__":
# get configurable hyperparams
(stem_n_filters,
stem_filter_size
depthwise_block1_n_filters,
depthwise_block2_n_filters,
depthwise_block3_n_filters,
depthwise_block4_n_filters,)=get_configurable_hyperparams()
alpha = 1 # width multiplier
dropout = 0.5 # dropout percentage
n_classes = 1000 # number of classes
inputs = Input(shape=(224, 224, 3))
# Create the stem group
x = stem(inputs, alpha, stem_n_filters,
stem_filter_size)
# First Depth wise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block1_n_filters, alpha, strides=(2, 2))
x = depthwise_block(x, depthwise_block1_n_filters, alpha, strides=(1, 1))
# Second Depthwise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block2_n_filters, alpha, strides=(2, 2))
x = depthwise_block(x, depthwise_block2_n_filters, alpha, strides=(1, 1))
# Third Depthwise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block3_n_filters, alpha, strides=(2, 2))
for _ in range(5):
x = depthwise_block(x, depthwise_block3_n_filters, alpha, strides=(1, 1))
# Fourth Depthwise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block4_n_filters, alpha, strides=(2, 2))
x = depthwise_block(x, depthwise_block4_n_filters, alpha, strides=(1, 1))
# Create the classifier
outputs = classifier(x, alpha, dropout, n_classes)
# Instantiate the Model
model = Model(inputs, outputs)
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.summary()
xtrain, ytrain, x_test, y_test=get_data()
# train model
model.fit(x_train, y_train, epochs=10,
batch_size=32, validation_split=0.1, verbose=1)
# save model
fmodel=os.path.join(os.getcwd(),"model.h5")
model.save(fmodel)
| [] |
altfool/mri_face_detection | info.py | 3117f7f00c98efe2260936146ce6b5454b059672 | import numpy as np
img_dtype = np.float32
imgX, imgY, imgZ = (256, 256, 150)
imgs_path_withfaces = '../dataset/withfaces'
imgs_path_nofaces = '../dataset/nofaces'
imgX_dwt1, imgY_dwt1, imgZ_dwt1 = (128, 128, 75)
imgs_path_withfaces_dwt = './dataset/withfaces'
imgs_path_nofaces_dwt = './dataset/nofaces'
dwt_flag = (True, False)[0]
if dwt_flag:
imgX, imgY, imgZ = imgX_dwt1, imgY_dwt1, imgZ_dwt1
imgs_path_withfaces = imgs_path_withfaces_dwt
imgs_path_nofaces = imgs_path_nofaces_dwt
| [] |
zzz2010/Contrib | biggan/paddorch/paddorch/vision/functional.py | d351d83da718145cef9f6c98598f7fedc027efe5 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import collections
import random
import math
import cv2
import numbers
import numpy as np
if sys.version_info < (3, 3):
Sequence = collections.Sequence
Iterable = collections.Iterable
else:
Sequence = collections.abc.Sequence
Iterable = collections.abc.Iterable
__all__ = ['flip', 'resize', 'pad', 'rotate', 'to_grayscale']
def flip(image, code):
"""
Accordding to the code (the type of flip), flip the input image
Args:
image: Input image, with (H, W, C) shape
code: Code that indicates the type of flip.
-1 : Flip horizontally and vertically
0 : Flip vertically
1 : Flip horizontally
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms import functional as F
fake_img = np.random.rand(224, 224, 3)
# flip horizontally and vertically
F.flip(fake_img, -1)
# flip vertically
F.flip(fake_img, 0)
# flip horizontally
F.flip(fake_img, 1)
"""
return cv2.flip(image, flipCode=code)
def resize(img, size, interpolation=cv2.INTER_LINEAR):
"""
resize the input data to given size
Args:
input: Input data, could be image or masks, with (H, W, C) shape
size: Target size of input data, with (height, width) shape.
interpolation: Interpolation method.
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms import functional as F
fake_img = np.random.rand(256, 256, 3)
F.resize(fake_img, 224)
F.resize(fake_img, (200, 150))
"""
if isinstance(interpolation, Sequence):
interpolation = random.choice(interpolation)
if isinstance(size, int):
h, w = img.shape[:2]
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return cv2.resize(img, (ow, oh), interpolation=interpolation)
else:
oh = size
ow = int(size * w / h)
return cv2.resize(img, (ow, oh), interpolation=interpolation)
else:
return cv2.resize(img, tuple(size[::-1]), interpolation=interpolation)
def pad(img, padding, fill=(0, 0, 0), padding_mode='constant'):
"""Pads the given CV Image on all sides with speficified padding mode and fill value.
Args:
img (np.ndarray): Image to be padded.
padding (int|tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int|tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
``constant`` means padding with a constant value, this value is specified with fill.
``edge`` means padding with the last value at the edge of the image.
``reflect`` means padding with reflection of image (without repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in reflect mode
will result in ``[3, 2, 1, 2, 3, 4, 3, 2]``.
``symmetric`` menas pads with reflection of image (repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in symmetric mode
will result in ``[2, 1, 1, 2, 3, 4, 4, 3]``.
Returns:
numpy ndarray: Padded image.
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms.functional import pad
fake_img = np.random.rand(500, 500, 3).astype('float32')
fake_img = pad(fake_img, 2)
print(fake_img.shape)
"""
if not isinstance(padding, (numbers.Number, list, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, list, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
raise ValueError(
"Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
'Expected padding mode be either constant, edge, reflect or symmetric, but got {}'.format(padding_mode)
PAD_MOD = {
'constant': cv2.BORDER_CONSTANT,
'edge': cv2.BORDER_REPLICATE,
'reflect': cv2.BORDER_DEFAULT,
'symmetric': cv2.BORDER_REFLECT
}
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, collections.Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, collections.Sequence) and len(padding) == 4:
pad_left, pad_top, pad_right, pad_bottom = padding
if isinstance(fill, numbers.Number):
fill = (fill,) * (2 * len(img.shape) - 3)
if padding_mode == 'constant':
assert (len(fill) == 3 and len(img.shape) == 3) or (len(fill) == 1 and len(img.shape) == 2), \
'channel of image is {} but length of fill is {}'.format(img.shape[-1], len(fill))
img = cv2.copyMakeBorder(
src=img,
top=pad_top,
bottom=pad_bottom,
left=pad_left,
right=pad_right,
borderType=PAD_MOD[padding_mode],
value=fill)
return img
def rotate(img,
angle,
interpolation=cv2.INTER_LINEAR,
expand=False,
center=None):
"""Rotates the image by angle.
Args:
img (numpy.ndarray): Image to be rotated.
angle (float|int): In degrees clockwise order.
interpolation (int, optional):
interpolation: Interpolation method.
expand (bool|optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple|optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
Returns:
numpy ndarray: Rotated image.
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms.functional import rotate
fake_img = np.random.rand(500, 500, 3).astype('float32')
fake_img = rotate(fake_img, 10)
print(fake_img.shape)
"""
dtype = img.dtype
h, w, _ = img.shape
point = center or (w / 2, h / 2)
M = cv2.getRotationMatrix2D(point, angle=-angle, scale=1)
if expand:
if center is None:
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - point[0]
M[1, 2] += (nH / 2) - point[1]
dst = cv2.warpAffine(img, M, (nW, nH))
else:
xx = []
yy = []
for point in (np.array([0, 0, 1]), np.array([w - 1, 0, 1]),
np.array([w - 1, h - 1, 1]), np.array([0, h - 1, 1])):
target = np.dot(M, point)
xx.append(target[0])
yy.append(target[1])
nh = int(math.ceil(max(yy)) - math.floor(min(yy)))
nw = int(math.ceil(max(xx)) - math.floor(min(xx)))
M[0, 2] += (nw - w) / 2
M[1, 2] += (nh - h) / 2
dst = cv2.warpAffine(img, M, (nw, nh), flags=interpolation)
else:
dst = cv2.warpAffine(img, M, (w, h), flags=interpolation)
return dst.astype(dtype)
def to_grayscale(img, num_output_channels=1):
"""Converts image to grayscale version of image.
Args:
img (numpy.ndarray): Image to be converted to grayscale.
Returns:
numpy.ndarray: Grayscale version of the image.
if num_output_channels == 1, returned image is single channel
if num_output_channels == 3, returned image is 3 channel with r == g == b
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms.functional import to_grayscale
fake_img = np.random.rand(500, 500, 3).astype('float32')
fake_img = to_grayscale(fake_img)
print(fake_img.shape)
"""
if num_output_channels == 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
elif num_output_channels == 3:
img = cv2.cvtColor(
cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
else:
raise ValueError('num_output_channels should be either 1 or 3')
return img | [((55, 11, 55, 41), 'cv2.flip', 'cv2.flip', (), '', False, 'import cv2\n'), ((163, 10, 170, 19), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (), '', False, 'import cv2\n'), ((208, 8, 208, 61), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (), '', False, 'import cv2\n'), ((75, 24, 75, 52), 'random.choice', 'random.choice', ({(75, 38, 75, 51): 'interpolation'}, {}), '(interpolation)', False, 'import random\n'), ((237, 14, 237, 65), 'cv2.warpAffine', 'cv2.warpAffine', (), '', False, 'import cv2\n'), ((261, 14, 261, 51), 'cv2.cvtColor', 'cv2.cvtColor', ({(261, 27, 261, 30): 'img', (261, 32, 261, 50): 'cv2.COLOR_RGB2GRAY'}, {}), '(img, cv2.COLOR_RGB2GRAY)', False, 'import cv2\n'), ((84, 19, 84, 73), 'cv2.resize', 'cv2.resize', (), '', False, 'import cv2\n'), ((88, 19, 88, 73), 'cv2.resize', 'cv2.resize', (), '', False, 'import cv2\n'), ((212, 18, 212, 33), 'numpy.abs', 'np.abs', ({(212, 25, 212, 32): 'M[0, 0]'}, {}), '(M[0, 0])', True, 'import numpy as np\n'), ((213, 18, 213, 33), 'numpy.abs', 'np.abs', ({(213, 25, 213, 32): 'M[0, 1]'}, {}), '(M[0, 1])', True, 'import numpy as np\n'), ((221, 18, 221, 50), 'cv2.warpAffine', 'cv2.warpAffine', ({(221, 33, 221, 36): 'img', (221, 38, 221, 39): 'M', (221, 41, 221, 49): '(nW, nH)'}, {}), '(img, M, (nW, nH))', False, 'import cv2\n'), ((235, 18, 235, 71), 'cv2.warpAffine', 'cv2.warpAffine', (), '', False, 'import cv2\n'), ((225, 26, 225, 45), 'numpy.array', 'np.array', ({(225, 35, 225, 44): '[0, 0, 1]'}, {}), '([0, 0, 1])', True, 'import numpy as np\n'), ((225, 47, 225, 70), 'numpy.array', 'np.array', ({(225, 56, 225, 69): '[w - 1, 0, 1]'}, {}), '([w - 1, 0, 1])', True, 'import numpy as np\n'), ((226, 26, 226, 53), 'numpy.array', 'np.array', ({(226, 35, 226, 52): '[w - 1, h - 1, 1]'}, {}), '([w - 1, h - 1, 1])', True, 'import numpy as np\n'), ((226, 55, 226, 78), 'numpy.array', 'np.array', ({(226, 64, 226, 77): '[0, h - 1, 1]'}, {}), '([0, h - 1, 1])', True, 'import numpy as np\n'), ((227, 25, 227, 41), 'numpy.dot', 'np.dot', ({(227, 32, 227, 33): 'M', (227, 35, 227, 40): 'point'}, {}), '(M, point)', True, 'import numpy as np\n'), ((264, 12, 264, 49), 'cv2.cvtColor', 'cv2.cvtColor', ({(264, 25, 264, 28): 'img', (264, 30, 264, 48): 'cv2.COLOR_RGB2GRAY'}, {}), '(img, cv2.COLOR_RGB2GRAY)', False, 'import cv2\n')] |
ashhansen6/minigames | ground_battle.py | 5b2e0db14b3567c9b6220206105ed448fb303551 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 13:38:35 2021
GROUND INVASION! The Game
@author: Ashton Hansen ([email protected])
"""
# Packages used:
import numpy as np
import pandas as pd
import random as rng
from termcolor import colored
# Defining starting forces
## Defenders:
def_force = 1250
def_reserves = 400
defenders = def_force + def_reserves
def_strength = def_force
def_guard = def_force
## Attackers:
att_force = 900
att_reserves = 1000
attackers = att_force + att_reserves
att_strength = att_force
att_guard = att_force
# Defining strategies:
## Defenders:
def_strat = ["draft", "turtle"]
### Draft
def draft(def_force, def_reserves):
global def_pair
global def_strength
global def_guard
# Defender Strategy Information
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("You hear news that a draft decree was issued...")
print("Intelligence suggests that there will be more enemy combatants.")
print("You expect the drafted soldiers to have decreased combat effectiveness.")
# Defender Strategy Effects
if def_reserves >= 100:
def_danger = def_force + 100
def_safe = def_reserves - 100
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
else:
def_danger = def_force + def_reserves
def_safe = 0
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
def_power = def_danger * 0.980
def_protection = def_danger * 0.95
def_deployment = [def_danger, def_safe, def_power, def_protection]
return(def_deployment)
### Turtle
def turtle(def_force, def_reserves):
global def_pair
global def_strength
global def_guard
# Defender Strategy Information
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("The defenders appear to bolster their defenses in preparation.")
print("Intelligence suggests that their defenses will be difficult to penetrate.")
print("It is likely that the defenders will try to keep soldiers out of harm's way.")
# Defender Strategy Effects
if def_force > 1100:
def_danger = def_force
def_safe = def_reserves + (def_danger - 1100)
def_danger = 1100
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
else:
def_danger = def_force
def_safe = def_reserves
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
def_power = def_danger * 0.975
def_protection = def_danger * 1.15
def_deployment = [def_danger, def_safe, def_power, def_protection]
return(def_deployment)
## Attackers:
att_strat = ["blitz", "guerilla"]
### Blitz
def blitz(att_force, att_reserves):
global att_pair
global att_strength
global att_guard
# Attacker Strategy Information
print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan"))
print("Your officers grimly accept your orders...")
print("There is an air of apprehension as the troops prepare to deploy.")
print("While offensive effectiveness will improve, heavier losses are expected.")
# Attacker Strategy Effects
if att_reserves >= 200:
att_danger = att_force + 200
att_safe = att_reserves - 200
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_safe)
else:
att_danger = att_force + att_reserves
att_safe = 0
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_reserves)
att_power = att_danger * 1.10
att_protection = att_danger * 0.90
att_deployment = [att_danger, att_safe, att_power, att_protection]
return(att_deployment)
### Guerilla
def guerilla(att_force, att_reserves):
global att_pair
global att_strength
global att_guard
# Attacker Strategy Information
print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan"))
print("Your officers immediately begin plans to target strategic weak points.")
print("Soldiers move out in small forces and keep the enemy guessing.")
print("While not as effective offensively, troop survival rates should be higher.")
# Attacker Strategy Effects
if att_force > 750:
att_danger = att_force
att_safe = att_reserves + (att_force - 750)
att_danger = 750
else:
att_danger = att_force
att_safe = att_reserves
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_safe)
att_power = att_danger * 0.95
att_protection = att_danger * 1.25
att_deployment = [att_danger, att_safe, att_power, att_protection]
return(att_deployment)
# Ground Battle Event (Player == Attacker)
wave = 0
player = input("Attacker or Defender? [A/D]:")
while (attackers > 0) and (defenders > 0):
# Wave Information
wave = wave + 1
if wave == 1:
print("############################################################")
print("PREPARE FOR BATTLE! THE FIRST WAVE OF THE BATTLE BEGINS NOW.")
print("############################################################")
else:
print("########## WAVE:", wave, "##########")
print("#############################")
print("Defending force strength:", def_force)
print("Defending forces in reserve:", def_reserves)
print("Attacking force strength:", att_force)
print("Attacking forces in reserve:", att_reserves)
if player =="A":
# Active Player (Attacker)
att_strat_chosen = input(colored("How should we proceed, commander? [blitz/guerilla]:", "yellow"))
elif player == "D":
# CPU Attacker
att_strat_chosen = rng.choice(att_strat)
# Defender Setup
if player == "A":
# CPU Defender
if def_reserves > 0:
def_strat = ["none",
"draft", "draft", "draft", "draft", "draft", "draft",
"turtle", "turtle", "turtle"]
def_strat_chosen = rng.choice(def_strat)
else:
def_strat = ["none", "none",
"turtle", "turtle", "turtle" ,"turtle", "turtle", "turtle", "turtle", "turtle"]
def_strat_chosen = rng.choice(def_strat)
elif player == "D":
# Active Player (defender)
def_strat_chosen = input(colored("How should we proceed, commander? [draft/turtle]:", "yellow"))
if def_strat_chosen == "draft":
draft_results = draft(def_force, def_reserves)
def_force = draft_results[0]
def_reserves = draft_results[1]
def_strength = draft_results[2]
def_guard = draft_results[3]
elif def_strat_chosen == "turtle":
turtle_results = turtle(def_force, def_reserves)
def_force = turtle_results[0]
def_reserves = turtle_results[1]
def_strength = turtle_results[2]
def_guard = turtle_results[3]
elif def_strat_chosen == "none":
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("It appears that the enemy will employ standard tactics...")
def_force = def_force
def_reserves = def_reserves
def_strength = def_force
def_guard = def_force
print("Defending force strength:", def_force)
print("Forces kept in reserve:", def_reserves)
# Attacker Setup
if att_strat_chosen == "blitz":
blitz_results = blitz(att_force, att_reserves)
att_force = blitz_results[0]
att_reserves = blitz_results[1]
att_strength = blitz_results[2]
att_guard = blitz_results[3]
elif att_strat_chosen == "guerilla":
guerilla_results = guerilla(att_force, att_reserves)
att_force = guerilla_results[0]
att_reserves = guerilla_results[1]
att_strength = guerilla_results[2]
att_guard = guerilla_results[3]
# Combat
# Attacker damage
def_guard = np.random.normal(def_guard, def_guard/10) * 0.50
att_strength = att_strength - def_guard
if att_strength < 0:
att_strength = 0
def_force = def_force - np.random.normal(att_strength, att_strength/10)//2 - (0.1*att_strength)//1
if def_force < 0:
def_force = 0
# Defender damage
att_guard = np.random.normal(att_guard, att_guard/10) * 0.50 - 0.1
def_strength = def_strength - att_guard
if def_strength < 0:
def_strength = 0
att_force = att_force - np.random.normal(def_strength, def_strength/10)//2 - (0.1*def_strength)//1
if att_force < 0:
att_force = 0
# Post-wave results:
print(colored("########## POST-WAVE RESULTS ##########", on_color = "on_cyan"))
print(colored("Defenders:", on_color = "on_blue"))
print("Surviving defensive forces:", def_force)
print("Defenseive forces kept in reserve:", def_reserves)
print("Defender strength estimate:", def_strength)
print("Defender guard estimate:", def_guard)
print(colored("Attackers:", on_color = "on_red"))
print("Surviving attacker forces:", att_force)
print("Attacker forces kept in reserve:", att_reserves)
print("Attacker strength estimate:", att_strength)
print("Attacker guard estimate:", att_guard)
# Reset allocations
# Defender reallocations:
def_reserves = def_reserves + def_force
def_force = 0
if def_reserves >= 1250:
def_reserves = def_reserves - 1250
def_force = 1250
def_guard = def_force
else:
def_force = def_reserves
def_reserves = 0
def_guard = def_force
# Attacker reallocations:
att_reserves = att_reserves + att_force
att_force = 0
if att_reserves >= 900:
att_reserves = att_reserves - 900
att_force = 900
att_guard = att_force
else:
att_force = att_reserves
att_reserves = 0
att_guard = att_force
defenders = def_force + def_reserves
attackers = att_force + att_reserves
# End of wave conditionals
if (attackers > 0) and (defenders > 0) and (player == "A"):
fightflight = input(colored("Continue or retreat?: [continue/retreat]:", "yellow"))
if fightflight == "retreat":
print(colored("########## WITHDRAWAL ##########", on_color = "on_blue"))
print("You choose to withdraw your troops...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
break
else:
print("The battle will continue next turn...")
elif attackers <= 0 and player == "A":
print(colored("########## FAILURE! ##########", on_color = "on_red"))
print("Your assault has been repelled!")
print("You return home, wondering what punishment for your failure awaits...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
elif defenders <= 0 and player == "A":
print(colored("########## SUCCESS! ##########", on_color = "on_green"))
print("The defenders have been routed!")
print("You may now decide the fate of the defending population...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
elif (attackers > 0) and (defenders > 0) and (player == "D"):
fightflight = input(colored("Defend or retreat?: [defend/retreat]:", "yellow"))
if fightflight == "retreat":
print(colored("########## WITHDRAWAL ##########", on_color = "on_blue"))
print("You choose to withdraw your troops from the region...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1900 - defenders))
print("Survival rate:", (defenders)/1900)
print("Total assault waves:", wave)
break
else:
print("The battle will continue next turn...")
elif defenders <= 0 and player == "D":
print(colored("########## FAILURE! ##########", on_color = "on_red"))
print("Your defense has been broken!")
print("Enemy troops now occupy your lands and have claimed dominion...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1650 - defenders))
print("Survival rate:", (defenders)/1650)
print("Total assault waves:", wave)
elif attackers <= 0 and player == "D":
print(colored("########## SUCCESS! ##########", on_color = "on_green"))
print("The attackers have been repelled!")
print("The storm has passed, and your people live another day...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1650 - defenders))
print("Survival rate:", (defenders)/1650)
print("Total assault waves:", wave)
print("#############################")
| [((41, 10, 41, 84), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((67, 10, 67, 84), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((98, 10, 98, 82), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((124, 10, 124, 82), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((217, 16, 217, 57), 'numpy.random.normal', 'np.random.normal', ({(217, 33, 217, 42): 'def_guard', (217, 44, 217, 56): '(def_guard / 10)'}, {}), '(def_guard, def_guard / 10)', True, 'import numpy as np\n'), ((233, 10, 233, 82), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((234, 10, 234, 53), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((239, 10, 239, 52), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((162, 33, 162, 105), 'termcolor.colored', 'colored', ({(162, 41, 162, 94): '"""How should we proceed, commander? [blitz/guerilla]:"""', (162, 96, 162, 104): '"""yellow"""'}, {}), "('How should we proceed, commander? [blitz/guerilla]:', 'yellow')", False, 'from termcolor import colored\n'), ((165, 27, 165, 48), 'random.choice', 'rng.choice', ({(165, 38, 165, 47): 'att_strat'}, {}), '(att_strat)', True, 'import random as rng\n'), ((173, 31, 173, 52), 'random.choice', 'rng.choice', ({(173, 42, 173, 51): 'def_strat'}, {}), '(def_strat)', True, 'import random as rng\n'), ((177, 31, 177, 52), 'random.choice', 'rng.choice', ({(177, 42, 177, 51): 'def_strat'}, {}), '(def_strat)', True, 'import random as rng\n'), ((225, 17, 225, 58), 'numpy.random.normal', 'np.random.normal', ({(225, 34, 225, 43): 'att_guard', (225, 45, 225, 57): '(att_guard / 10)'}, {}), '(att_guard, att_guard / 10)', True, 'import numpy as np\n'), ((271, 28, 271, 90), 'termcolor.colored', 'colored', ({(271, 36, 271, 79): '"""Continue or retreat?: [continue/retreat]:"""', (271, 81, 271, 89): '"""yellow"""'}, {}), "('Continue or retreat?: [continue/retreat]:', 'yellow')", False, 'from termcolor import colored\n'), ((180, 33, 180, 103), 'termcolor.colored', 'colored', ({(180, 41, 180, 92): '"""How should we proceed, commander? [draft/turtle]:"""', (180, 94, 180, 102): '"""yellow"""'}, {}), "('How should we proceed, commander? [draft/turtle]:', 'yellow')", False, 'from termcolor import colored\n'), ((221, 28, 221, 75), 'numpy.random.normal', 'np.random.normal', ({(221, 45, 221, 57): 'att_strength', (221, 59, 221, 74): '(att_strength / 10)'}, {}), '(att_strength, att_strength / 10)', True, 'import numpy as np\n'), ((229, 28, 229, 75), 'numpy.random.normal', 'np.random.normal', ({(229, 45, 229, 57): 'def_strength', (229, 59, 229, 74): '(def_strength / 10)'}, {}), '(def_strength, def_strength / 10)', True, 'import numpy as np\n'), ((273, 18, 273, 83), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((275, 18, 275, 91), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((284, 14, 284, 76), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((287, 14, 287, 87), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((194, 14, 194, 88), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((293, 14, 293, 78), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((296, 14, 296, 87), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((302, 28, 302, 86), 'termcolor.colored', 'colored', ({(302, 36, 302, 75): '"""Defend or retreat?: [defend/retreat]:"""', (302, 77, 302, 85): '"""yellow"""'}, {}), "('Defend or retreat?: [defend/retreat]:', 'yellow')", False, 'from termcolor import colored\n'), ((304, 18, 304, 83), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((306, 18, 306, 91), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((315, 14, 315, 76), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((318, 14, 318, 87), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((324, 14, 324, 78), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n'), ((327, 14, 327, 87), 'termcolor.colored', 'colored', (), '', False, 'from termcolor import colored\n')] |
martinheidegger/pretalx | src/pretalx/orga/urls.py | d812e665c1c5ce29df3eafc1985af08e4d986fef | from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from pretalx.event.models.event import SLUG_CHARS
from pretalx.orga.views import cards
from .views import (
admin,
auth,
cfp,
dashboard,
event,
mails,
organiser,
person,
plugins,
review,
schedule,
speaker,
submission,
)
app_name = "orga"
urlpatterns = [
url("^login/$", auth.LoginView.as_view(), name="login"),
url("^logout/$", auth.logout_view, name="logout"),
url("^reset/$", auth.ResetView.as_view(), name="auth.reset"),
url(r"^reset/(?P<token>\w+)$", auth.RecoverView.as_view(), name="auth.recover"),
url("^$", RedirectView.as_view(url="event", permanent=False)),
url("^admin/$", admin.AdminDashboard.as_view(), name="admin.dashboard"),
url("^admin/update/$", admin.UpdateCheckView.as_view(), name="admin.update"),
url("^me$", event.UserSettings.as_view(), name="user.view"),
url("^me/subuser$", person.SubuserView.as_view(), name="user.subuser"),
url(
r"^invitation/(?P<code>\w+)$",
event.InvitationView.as_view(),
name="invitation.view",
),
url(
"^organiser/$",
dashboard.DashboardOrganiserListView.as_view(),
name="organiser.list",
),
url(
"^organiser/new$", organiser.OrganiserDetail.as_view(), name="organiser.create"
),
url(
f"^organiser/(?P<organiser>[{SLUG_CHARS}]+)/",
include(
[
url("^$", organiser.OrganiserDetail.as_view(), name="organiser.view"),
url(
"^delete$",
organiser.OrganiserDelete.as_view(),
name="organiser.delete",
),
url("^teams/$", organiser.TeamDetail.as_view(), name="organiser.teams"),
url(
"^teams/new$",
organiser.TeamDetail.as_view(),
name="organiser.teams.create",
),
url(
"^teams/(?P<pk>[0-9]+)/$",
organiser.TeamDetail.as_view(),
name="organiser.teams.view",
),
url(
"^teams/(?P<pk>[0-9]+)/delete$",
organiser.TeamDelete.as_view(),
name="organiser.teams.delete",
),
url(
"^teams/(?P<pk>[0-9]+)/tracks$",
organiser.TeamTracks.as_view(),
name="organiser.teams.tracks",
),
url(
"^teams/(?P<pk>[0-9]+)/delete/(?P<user_pk>[0-9]+)$",
organiser.TeamDelete.as_view(),
name="organiser.teams.delete_member",
),
url(
"^teams/(?P<pk>[0-9]+)/reset/(?P<user_pk>[0-9]+)$",
organiser.TeamResetPassword.as_view(),
name="organiser.team.password_reset",
),
url(
"^teams/(?P<pk>[0-9]+)/uninvite$",
organiser.TeamUninvite.as_view(),
name="organiser.teams.uninvite",
),
url(
"^teams/(?P<pk>[0-9]+)/resend$",
organiser.TeamResend.as_view(),
name="organiser.teams.resend",
),
]
),
),
url("^event/new/$", event.EventWizard.as_view(), name="event.create"),
url("^event/typeahead/$", event.event_list, name="event.typeahead"),
url("^event/$", dashboard.DashboardEventListView.as_view(), name="event.list"),
url(
f"^event/(?P<event>[{SLUG_CHARS}]+)/",
include(
[
url(
"^$", dashboard.EventDashboardView.as_view(), name="event.dashboard"
),
url("^login/$", auth.LoginView.as_view(), name="event.login"),
url("^reset/$", auth.ResetView.as_view(), name="event.auth.reset"),
url(
r"^reset/(?P<token>\w+)$",
auth.RecoverView.as_view(),
name="event.auth.recover",
),
url("^delete$", event.EventDelete.as_view(), name="event.delete"),
url("^live$", event.EventLive.as_view(), name="event.live"),
url("^api/users$", person.UserList.as_view(), name="event.user_list"),
url(
"^cfp/$",
RedirectView.as_view(pattern_name="orga:cfp.text.view"),
name="cfp",
),
url("^cfp/flow/$", cfp.CfPFlowEditor.as_view(), name="cfp.flow"),
url(
"^cfp/questions/$",
cfp.CfPQuestionList.as_view(),
name="cfp.questions.view",
),
url(
"^cfp/questions/new$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.questions.create",
),
url(
"^cfp/questions/remind$",
cfp.CfPQuestionRemind.as_view(),
name="cfp.questions.remind",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.question.view",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/up$",
cfp.question_move_up,
name="cfp.questions.up",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/down$",
cfp.question_move_down,
name="cfp.questions.down",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/delete$",
cfp.CfPQuestionDelete.as_view(),
name="cfp.question.delete",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/edit$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.question.edit",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/toggle$",
cfp.CfPQuestionToggle.as_view(),
name="cfp.question.toggle",
),
url("^cfp/text$", cfp.CfPTextDetail.as_view(), name="cfp.text.view"),
url(
"^cfp/types/$",
cfp.SubmissionTypeList.as_view(),
name="cfp.types.view",
),
url(
"^cfp/types/new$",
cfp.SubmissionTypeDetail.as_view(),
name="cfp.types.create",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/$",
cfp.SubmissionTypeDetail.as_view(),
name="cfp.type.view",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/delete$",
cfp.SubmissionTypeDelete.as_view(),
name="cfp.type.delete",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/default$",
cfp.SubmissionTypeDefault.as_view(),
name="cfp.type.default",
),
url("^cfp/tracks/$", cfp.TrackList.as_view(), name="cfp.tracks.view"),
url(
"^cfp/tracks/new$",
cfp.TrackDetail.as_view(),
name="cfp.track.create",
),
url(
"^cfp/tracks/(?P<pk>[0-9]+)/$",
cfp.TrackDetail.as_view(),
name="cfp.track.view",
),
url(
"^cfp/tracks/(?P<pk>[0-9]+)/delete$",
cfp.TrackDelete.as_view(),
name="cfp.track.delete",
),
url(
"^cfp/access-codes/$",
cfp.AccessCodeList.as_view(),
name="cfp.access_code.view",
),
url(
"^cfp/access-codes/new$",
cfp.AccessCodeDetail.as_view(),
name="cfp.access_code.create",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/$",
cfp.AccessCodeDetail.as_view(),
name="cfp.access_code.view",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/send$",
cfp.AccessCodeSend.as_view(),
name="cfp.access_code.send",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/delete$",
cfp.AccessCodeDelete.as_view(),
name="cfp.access_code.delete",
),
url(
"^mails/",
include(
[
url(
"^(?P<pk>[0-9]+)/$",
mails.MailDetail.as_view(),
name="mails.outbox.mail.view",
),
url(
"^(?P<pk>[0-9]+)/copy$",
mails.MailCopy.as_view(),
name="mails.outbox.mail.copy",
),
url(
"^(?P<pk>[0-9]+)/delete$",
mails.OutboxPurge.as_view(),
name="mails.outbox.mail.delete",
),
url(
"^(?P<pk>[0-9]+)/send$",
mails.OutboxSend.as_view(),
name="mails.outbox.mail.send",
),
url(
"^templates/$",
mails.TemplateList.as_view(),
name="mails.templates.list",
),
url(
"^templates/new$",
mails.TemplateDetail.as_view(),
name="mails.templates.create",
),
url(
"^templates/(?P<pk>[0-9]+)/$",
mails.TemplateDetail.as_view(),
name="mails.templates.view",
),
url(
"^templates/(?P<pk>[0-9]+)/delete$",
mails.TemplateDelete.as_view(),
name="mails.templates.delete",
),
url(
"^compose$",
mails.ComposeMail.as_view(),
name="mails.compose",
),
url("^sent$", mails.SentMail.as_view(), name="mails.sent"),
url(
"^outbox/$",
mails.OutboxList.as_view(),
name="mails.outbox.list",
),
url(
"^outbox/send$",
mails.OutboxSend.as_view(),
name="mails.outbox.send",
),
url(
"^outbox/purge$",
mails.OutboxPurge.as_view(),
name="mails.outbox.purge",
),
]
),
),
url(
"^submissions/$",
submission.SubmissionList.as_view(),
name="submissions.list",
),
url(
"^submissions/new$",
submission.SubmissionContent.as_view(),
name="submissions.create",
),
url(
"^submissions/cards/$",
cards.SubmissionCards.as_view(),
name="submissions.cards",
),
url(
"^submissions/feed/$",
submission.SubmissionFeed(),
name="submissions.feed",
),
url(
"^submissions/statistics/$",
submission.SubmissionStats.as_view(),
name="submissions.statistics",
),
url(
"^submissions/feedback/$",
submission.AllFeedbacksList.as_view(),
name="submissions.feedback",
),
url(
r"^submissions/(?P<code>[\w-]+)/",
include(
[
url(
"^$",
submission.SubmissionContent.as_view(),
name="submissions.content.view",
),
url(
"^submit$",
submission.SubmissionStateChange.as_view(),
name="submissions.submit",
),
url(
"^accept$",
submission.SubmissionStateChange.as_view(),
name="submissions.accept",
),
url(
"^reject$",
submission.SubmissionStateChange.as_view(),
name="submissions.reject",
),
url(
"^confirm",
submission.SubmissionStateChange.as_view(),
name="submissions.confirm",
),
url(
"^withdraw$",
submission.SubmissionStateChange.as_view(),
name="submissions.withdraw",
),
url(
"^delete",
submission.SubmissionStateChange.as_view(),
name="submissions.delete",
),
url(
"^cancel",
submission.SubmissionStateChange.as_view(),
name="submissions.cancel",
),
url(
"^speakers/$",
submission.SubmissionSpeakers.as_view(),
name="submissions.speakers.view",
),
url(
"^speakers/add$",
submission.SubmissionSpeakersAdd.as_view(),
name="submissions.speakers.add",
),
url(
"^speakers/delete$",
submission.SubmissionSpeakersDelete.as_view(),
name="submissions.speakers.delete",
),
url(
"^reviews/$",
review.ReviewSubmission.as_view(),
name="submissions.reviews",
),
url(
"^reviews/delete$",
review.ReviewSubmissionDelete.as_view(),
name="submissions.reviews.submission.delete",
),
url(
"^feedback/$",
submission.FeedbackList.as_view(),
name="submissions.feedback.list",
),
url(
"^toggle_featured$",
submission.ToggleFeatured.as_view(),
name="submissions.toggle_featured",
),
url(
"^anonymise/$",
submission.Anonymise.as_view(),
name="submissions.anonymise",
),
]
),
),
url("^speakers/$", speaker.SpeakerList.as_view(), name="speakers.list"),
url(
"^speakers/(?P<pk>[0-9]+)/$",
speaker.SpeakerDetail.as_view(),
name="speakers.view",
),
url(
"^speakers/(?P<pk>[0-9]+)/reset$",
speaker.SpeakerPasswordReset.as_view(),
name="speakers.reset",
),
url(
"^speakers/(?P<pk>[0-9]+)/toggle-arrived$",
speaker.SpeakerToggleArrived.as_view(),
name="speakers.arrived",
),
url(
"^info/$",
speaker.InformationList.as_view(),
name="speakers.information.list",
),
url(
"^info/new$",
speaker.InformationDetail.as_view(),
name="speakers.information.create",
),
url(
"^info/(?P<pk>[0-9]+)/$",
speaker.InformationDetail.as_view(),
name="speakers.information.view",
),
url(
"^info/(?P<pk>[0-9]+)/delete$",
speaker.InformationDelete.as_view(),
name="speakers.information.delete",
),
url(
"^reviews/$",
review.ReviewDashboard.as_view(),
name="reviews.dashboard",
),
url(
"^reviews/regenerate/$",
review.RegenerateDecisionMails.as_view(),
name="reviews.regenerate",
),
url(
"^settings/$",
event.EventDetail.as_view(),
name="settings.event.view",
),
url(
"^settings/mail$",
event.EventMailSettings.as_view(),
name="settings.mail.view",
),
url(
"^settings/plugins$",
plugins.EventPluginsView.as_view(),
name="settings.plugins.select",
),
url(
"^settings/widget$",
event.WidgetSettings.as_view(),
name="settings.widget",
),
url(
"^settings/review/$",
event.EventReviewSettings.as_view(),
name="settings.review",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/up$",
event.phase_move_up,
name="settings.review.phase.up",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/down$",
event.phase_move_down,
name="settings.review.phase.down",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/delete$",
event.PhaseDelete.as_view(),
name="settings.review.phasedelete",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/activate$",
event.PhaseActivate.as_view(),
name="settings.review.phasedelete",
),
url(
"^schedule/$", schedule.ScheduleView.as_view(), name="schedule.main"
),
url(
"^schedule/export/$",
schedule.ScheduleExportView.as_view(),
name="schedule.export",
),
url(
"^schedule/export/trigger$",
schedule.ScheduleExportTriggerView.as_view(),
name="schedule.export.trigger",
),
url(
"^schedule/export/download$",
schedule.ScheduleExportDownloadView.as_view(),
name="schedule.export.download",
),
url(
"^schedule/release$",
schedule.ScheduleReleaseView.as_view(),
name="schedule.release",
),
url(
r"^schedule/quick/(?P<code>\w+)/$",
schedule.QuickScheduleView.as_view(),
name="schedule.quick",
),
url(
"^schedule/reset$",
schedule.ScheduleResetView.as_view(),
name="schedule.reset",
),
url(
"^schedule/toggle$",
schedule.ScheduleToggleView.as_view(),
name="schedule.toggle",
),
url(
"^schedule/resend_mails$",
schedule.ScheduleResendMailsView.as_view(),
name="schedule.resend_mails",
),
url(
"^schedule/rooms/$",
schedule.RoomList.as_view(),
name="schedule.rooms.list",
),
url(
"^schedule/rooms/new$",
schedule.RoomDetail.as_view(),
name="schedule.rooms.create",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/$",
schedule.RoomDetail.as_view(),
name="schedule.rooms.view",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/delete$",
schedule.RoomDelete.as_view(),
name="schedule.rooms.delete",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/up$",
schedule.room_move_up,
name="schedule.rooms.up",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/down$",
schedule.room_move_down,
name="schedule.rooms.down",
),
url(
"^schedule/api/talks/$",
schedule.TalkList.as_view(),
name="schedule.api.talks",
),
url(
"^schedule/api/talks/(?P<pk>[0-9]+)/$",
schedule.TalkUpdate.as_view(),
name="schedule.api.update",
),
url(
"^schedule/api/availabilities/(?P<talkid>[0-9]+)/(?P<roomid>[0-9]+)/$",
schedule.RoomTalkAvailabilities.as_view(),
name="schedule.api.availabilities",
),
]
),
),
]
| [((26, 4, 26, 53), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import include, url\n'), ((102, 4, 102, 71), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import include, url\n'), ((29, 14, 29, 64), 'django.views.generic.base.RedirectView.as_view', 'RedirectView.as_view', (), '', False, 'from django.views.generic.base import RedirectView\n'), ((147, 16, 151, 17), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import include, url\n'), ((152, 16, 156, 17), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import include, url\n'), ((495, 16, 499, 17), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import include, url\n'), ((500, 16, 504, 17), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import include, url\n'), ((578, 16, 582, 17), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import include, url\n'), ((583, 16, 587, 17), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import include, url\n'), ((123, 20, 123, 75), 'django.views.generic.base.RedirectView.as_view', 'RedirectView.as_view', (), '', False, 'from django.views.generic.base import RedirectView\n'), ((319, 20, 319, 51), 'pretalx.orga.views.cards.SubmissionCards.as_view', 'cards.SubmissionCards.as_view', ({}, {}), '()', False, 'from pretalx.orga.views import cards\n')] |
Malekhy/ws2122-lspm | ws2122-lspm/Lib/site-packages/pm4py/statistics/overlap/utils/compute.py | e4dc8b801d12f862b8ef536a0f125f346f085a00 | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from enum import Enum
from typing import Optional, Dict, Any, Tuple, List, Union
from intervaltree import Interval, IntervalTree
from pm4py.util import exec_utils
class Parameters(Enum):
EPSILON = "epsilon"
def apply(points: List[Tuple[float, float]], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> List[int]:
"""
Computes the overlap statistic given a list of points, expressed as (min_timestamp, max_timestamp)
Parameters
-----------------
points
List of points with the aforementioned features
parameters
Parameters of the method, including:
- Parameters.EPSILON
Returns
-----------------
overlap
List associating to each point the number of intersecting points
"""
if parameters is None:
parameters = {}
epsilon = exec_utils.get_param_value(Parameters.EPSILON, parameters, 10 ** (-5))
points = [(x[0] - epsilon, x[1] + epsilon) for x in points]
sorted_points = sorted(points)
tree = IntervalTree()
for p in sorted_points:
tree.add(Interval(p[0], p[1]))
overlap = []
for p in points:
overlap.append(len(tree[p[0]:p[1]]))
return overlap
| [((49, 14, 49, 84), 'pm4py.util.exec_utils.get_param_value', 'exec_utils.get_param_value', ({(49, 41, 49, 59): 'Parameters.EPSILON', (49, 61, 49, 71): 'parameters', (49, 73, 49, 83): '10 ** -5'}, {}), '(Parameters.EPSILON, parameters, 10 ** -5)', False, 'from pm4py.util import exec_utils\n'), ((52, 11, 52, 25), 'intervaltree.IntervalTree', 'IntervalTree', ({}, {}), '()', False, 'from intervaltree import Interval, IntervalTree\n'), ((55, 17, 55, 37), 'intervaltree.Interval', 'Interval', ({(55, 26, 55, 30): 'p[0]', (55, 32, 55, 36): 'p[1]'}, {}), '(p[0], p[1])', False, 'from intervaltree import Interval, IntervalTree\n')] |
sk-Prime/webapp | webapp/apps/Base Quiz/baseui_gen.py | c21d7d49de4e4442f9af29ba9f08f37b5abbd20d | from htmlman import HTMLMan
from styleman import Template
page=HTMLMan()
page.make_responsive()
page.add_title("Base Quiz")
style=Template('antartica')
page.add_body_class(style['page'])
page.add_js("baseui.js")
page.create_section('main',append=True)
page['main'].add_style_class(style['main'])
title=page.create_section('title')
title.add_style_class(style['title'])
title.add_content("Base Quiz")
widget=page.create_section("widget")
widget.add_style_class(style['widget'])
label = page.create_section('label',ID='label')
#label.add_style_class(style['center'])
label.add_style(name='label',mode="class")
label.style_to_cssman(style)
label.style(
"font-size","20pt",
"font-family","monospace",
"height","50px",
"border-bottom","1px solid #ccd",
)
label.add_content("0x0")
answer_l=page.create_section("answer_l1",ID="label_t")
answer_l.add_style_class(style["label"])
answer_l2=page.create_section("answer_l2",ID="label_b")
answer_l2.add_style_class(style["label"])
controls = page.create_section("control")
controls.add_style(name="control",mode="class",cssman_obj=style)
controls.style(
"display","grid",
"grid-template-columns","1fr 1fr",
"gap","10px",
"padding","10px"
)
rand_b=page.create_section('random',tag="button",inner_html="Random")
rand_b.config_attr("type","button","onclick","randomize()")
answer_b=page.create_section('answer_b',tag="button",inner_html="Answer")
answer_b.config_attr("type","button","onclick","answer()")
controls.add_content(rand_b)
controls.add_content(answer_b)
widget.add_content(label)
widget.add_content(answer_l)
widget.add_content(answer_l2)
widget.add_content(controls)
page['main'].add_content(title)
page['main'].add_content(widget)
page.render(style,html_path="baseui.html") | [((4, 5, 4, 14), 'htmlman.HTMLMan', 'HTMLMan', ({}, {}), '()', False, 'from htmlman import HTMLMan\n'), ((7, 6, 7, 27), 'styleman.Template', 'Template', ({(7, 15, 7, 26): '"""antartica"""'}, {}), "('antartica')", False, 'from styleman import Template\n')] |
srcc-msu/job_statistics | cluster_config/cluster.py | 74680a4e4c105ebcff94f089e07fcb44dbcc12d9 | name = "cluster"
num_cores = 1000
GENERAL_PARTITIONS = ["regular"]
GPU_PARTITIONS = ["gpu"]
PARTITIONS = GENERAL_PARTITIONS + GPU_PARTITIONS
ACTIVE_JOB_STATES = ["RUNNING", "COMPLETING"]
FINISHED_JOB_STATES = ["COMPLETED", "NODE_FAIL", "TIMEOUT", "FAILED", "CANCELLED"]
JOB_STATES = ACTIVE_JOB_STATES + FINISHED_JOB_STATES
def node2int(node):
"""custom function to convert nodename to int
this one removes all chars from names like node1-001-01"""
return int(''.join(filter(lambda x: x.isdigit(), node)))
| [] |
OmeGak/indico-plugins-cern | room_assistance/indico_room_assistance/plugin.py | 6e32bc158877080085ceffd021ab1d2247192f75 | # This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2021 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
import dateutil.parser
import pytz
from flask import flash, request, session
from flask_pluginengine import render_plugin_template, url_for_plugin
from indico.core import signals
from indico.core.config import config
from indico.core.plugins import IndicoPlugin
from indico.core.settings.converters import ModelListConverter
from indico.modules.events.requests.models.requests import Request, RequestState
from indico.modules.events.requests.views import WPRequestsEventManagement
from indico.modules.rb.models.rooms import Room
from indico.modules.users import User
from indico.util.string import natural_sort_key
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import EmailListField, IndicoQuerySelectMultipleField, PrincipalListField
from indico.web.menu import TopMenuItem
from indico_room_assistance import _
from indico_room_assistance.blueprint import blueprint
from indico_room_assistance.definition import RoomAssistanceRequest
from indico_room_assistance.util import (can_request_assistance_for_event, event_has_room_with_support_attached,
is_room_assistance_support)
def _order_func(object_list):
return sorted(object_list, key=lambda r: natural_sort_key(r[1].full_name))
class RoomAssistanceForm(IndicoForm):
_fieldsets = [
('Startup assistance emails', ['room_assistance_recipients', 'rooms_with_assistance',
'room_assistance_support']),
]
room_assistance_recipients = EmailListField(_('Recipients'),
description=_('Notifications about room assistance requests are sent '
'to these email addresses (one per line)'))
rooms_with_assistance = IndicoQuerySelectMultipleField('Rooms',
query_factory=lambda: Room.query,
description=_('Rooms for which users can request startup '
'assistance'),
get_label='full_name', collection_class=set,
render_kw={'size': 20}, modify_object_list=_order_func)
room_assistance_support = PrincipalListField(_('Room assistance support'), allow_groups=True,
description=_('List of users who can view the list of events with '
'room startup assistance.'))
class RoomAssistancePlugin(IndicoPlugin):
"""Room assistance request
This plugin lets users request assistance for meeting rooms.
"""
configurable = True
settings_form = RoomAssistanceForm
settings_converters = {
'rooms_with_assistance': ModelListConverter(Room)
}
acl_settings = {'room_assistance_support'}
default_settings = {
'room_assistance_recipients': [],
'rooms_with_assistance': [],
}
def init(self):
super().init()
self.inject_bundle('main.css', WPRequestsEventManagement, subclasses=False,
condition=lambda: request.view_args.get('type') == RoomAssistanceRequest.name)
self.template_hook('event-actions', self._room_assistance_action)
self.connect(signals.menu.items, self._extend_services_menu, sender='top-menu')
self.connect(signals.plugin.get_event_request_definitions, self._get_room_assistance_request)
self.connect(signals.event.updated, self._on_event_update)
def get_blueprints(self):
return blueprint
def _room_assistance_action(self, event, **kwargs):
return render_plugin_template('room_assistance_action.html', event=event,
can_request_assistance=can_request_assistance_for_event(event))
def _extend_services_menu(self, reservation, **kwargs):
if not session.user or not is_room_assistance_support(session.user):
return
return TopMenuItem('services-cern-room-assistance', _('Room assistance'),
url_for_plugin('room_assistance.request_list'), section='services')
def _get_room_assistance_request(self, sender, **kwargs):
return RoomAssistanceRequest
def _on_event_update(self, event, **kwargs):
changes = kwargs['changes']
if not changes.keys() & {'location_data', 'start_dt', 'end_dt'}:
return
request = Request.find_latest_for_event(event, RoomAssistanceRequest.name)
if not request or request.state != RequestState.accepted:
return
if 'location_data' in changes and not event_has_room_with_support_attached(event):
request.definition.reject(request, {'comment': render_plugin_template('auto_reject_no_supported_room.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event location is not in the list of the rooms supported by the room assistance team. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
if changes.keys() & {'start_dt', 'end_dt'}:
tz = pytz.timezone(config.DEFAULT_TIMEZONE)
occurrences = {dateutil.parser.parse(occ).astimezone(tz) for occ in request.data['occurrences']}
req_dates = {occ.date() for occ in occurrences}
event_dates = set(event.iter_days())
old_dates = req_dates - event_dates
has_overlapping_dates = req_dates & event_dates
if not has_overlapping_dates:
request.definition.reject(request,
{'comment': render_plugin_template('auto_reject_no_overlapping_dates.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event dates don't overlap with the existing room assistance request for this event. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
elif old_dates and has_overlapping_dates:
new_data = dict(request.data)
new_data['occurrences'] = [occ.astimezone(pytz.utc).isoformat() for occ in occurrences
if occ.date() in req_dates & event_dates]
request.data = new_data
flash(_("Room assistance had been requested for days that are not between the updated start/end "
"dates. Support will not be provided on these days anymore."), 'warning')
| [((43, 48, 43, 63), 'indico_room_assistance._', '_', ({(43, 50, 43, 62): '"""Recipients"""'}, {}), "('Recipients')", False, 'from indico_room_assistance import _\n'), ((52, 49, 52, 77), 'indico_room_assistance._', '_', ({(52, 51, 52, 76): '"""Room assistance support"""'}, {}), "('Room assistance support')", False, 'from indico_room_assistance import _\n'), ((66, 33, 66, 57), 'indico.core.settings.converters.ModelListConverter', 'ModelListConverter', ({(66, 52, 66, 56): 'Room'}, {}), '(Room)', False, 'from indico.core.settings.converters import ModelListConverter\n'), ((105, 18, 105, 82), 'indico.modules.events.requests.models.requests.Request.find_latest_for_event', 'Request.find_latest_for_event', ({(105, 48, 105, 53): 'event', (105, 55, 105, 81): 'RoomAssistanceRequest.name'}, {}), '(event, RoomAssistanceRequest.name)', False, 'from indico.modules.events.requests.models.requests import Request, RequestState\n'), ((44, 60, 45, 104), 'indico_room_assistance._', '_', ({(44, 62, 45, 103): '"""Notifications about room assistance requests are sent to these email addresses (one per line)"""'}, {}), "('Notifications about room assistance requests are sent to these email addresses (one per line)'\n )", False, 'from indico_room_assistance import _\n'), ((48, 71, 49, 86), 'indico_room_assistance._', '_', ({(48, 73, 49, 85): '"""Rooms for which users can request startup assistance"""'}, {}), "('Rooms for which users can request startup assistance')", False, 'from indico_room_assistance import _\n'), ((53, 61, 54, 90), 'indico_room_assistance._', '_', ({(53, 63, 54, 89): '"""List of users who can view the list of events with room startup assistance."""'}, {}), "('List of users who can view the list of events with room startup assistance.'\n )", False, 'from indico_room_assistance import _\n'), ((94, 60, 94, 80), 'indico_room_assistance._', '_', ({(94, 62, 94, 79): '"""Room assistance"""'}, {}), "('Room assistance')", False, 'from indico_room_assistance import _\n'), ((95, 27, 95, 73), 'flask_pluginengine.url_for_plugin', 'url_for_plugin', ({(95, 42, 95, 72): '"""room_assistance.request_list"""'}, {}), "('room_assistance.request_list')", False, 'from flask_pluginengine import render_plugin_template, url_for_plugin\n'), ((116, 17, 116, 55), 'pytz.timezone', 'pytz.timezone', ({(116, 31, 116, 54): 'config.DEFAULT_TIMEZONE'}, {}), '(config.DEFAULT_TIMEZONE)', False, 'import pytz\n'), ((34, 45, 34, 77), 'indico.util.string.natural_sort_key', 'natural_sort_key', ({(34, 62, 34, 76): 'r[1].full_name'}, {}), '(r[1].full_name)', False, 'from indico.util.string import natural_sort_key\n'), ((88, 61, 88, 100), 'indico_room_assistance.util.can_request_assistance_for_event', 'can_request_assistance_for_event', ({(88, 94, 88, 99): 'event'}, {}), '(event)', False, 'from indico_room_assistance.util import can_request_assistance_for_event, event_has_room_with_support_attached, is_room_assistance_support\n'), ((91, 35, 91, 75), 'indico_room_assistance.util.is_room_assistance_support', 'is_room_assistance_support', ({(91, 62, 91, 74): 'session.user'}, {}), '(session.user)', False, 'from indico_room_assistance.util import can_request_assistance_for_event, event_has_room_with_support_attached, is_room_assistance_support\n'), ((109, 46, 109, 89), 'indico_room_assistance.util.event_has_room_with_support_attached', 'event_has_room_with_support_attached', ({(109, 83, 109, 88): 'event'}, {}), '(event)', False, 'from indico_room_assistance.util import can_request_assistance_for_event, event_has_room_with_support_attached, is_room_assistance_support\n'), ((111, 38, 111, 60), 'indico.modules.users.User.get_system_user', 'User.get_system_user', ({}, {}), '()', False, 'from indico.modules.users import User\n'), ((113, 18, 114, 98), 'indico_room_assistance._', '_', ({(113, 20, 114, 97): '"""The new event location is not in the list of the rooms supported by the room assistance team. Room assistance request has been rejected and support will not be provided."""'}, {}), "('The new event location is not in the list of the rooms supported by the room assistance team. Room assistance request has been rejected and support will not be provided.'\n )", False, 'from indico_room_assistance import _\n'), ((110, 59, 110, 118), 'flask_pluginengine.render_plugin_template', 'render_plugin_template', ({(110, 82, 110, 117): '"""auto_reject_no_supported_room.txt"""'}, {}), "('auto_reject_no_supported_room.txt')", False, 'from flask_pluginengine import render_plugin_template, url_for_plugin\n'), ((126, 42, 126, 64), 'indico.modules.users.User.get_system_user', 'User.get_system_user', ({}, {}), '()', False, 'from indico.modules.users import User\n'), ((128, 22, 129, 102), 'indico_room_assistance._', '_', ({(128, 24, 129, 101): '"""The new event dates don\'t overlap with the existing room assistance request for this event. Room assistance request has been rejected and support will not be provided."""'}, {}), '("The new event dates don\'t overlap with the existing room assistance request for this event. Room assistance request has been rejected and support will not be provided."\n )', False, 'from indico_room_assistance import _\n'), ((125, 54, 125, 116), 'flask_pluginengine.render_plugin_template', 'render_plugin_template', ({(125, 77, 125, 115): '"""auto_reject_no_overlapping_dates.txt"""'}, {}), "('auto_reject_no_overlapping_dates.txt')", False, 'from flask_pluginengine import render_plugin_template, url_for_plugin\n'), ((135, 22, 136, 85), 'indico_room_assistance._', '_', ({(135, 24, 136, 84): '"""Room assistance had been requested for days that are not between the updated start/end dates. Support will not be provided on these days anymore."""'}, {}), "('Room assistance had been requested for days that are not between the updated start/end dates. Support will not be provided on these days anymore.'\n )", False, 'from indico_room_assistance import _\n')] |
liangmuxin/datamart | datamart/materializers/wikidata_spo_materializer.py | 495a21588db39c9ad239409208bec701dca07f30 | from datamart.materializers.materializer_base import MaterializerBase
import os
import urllib.request
import sys
import csv
import copy
import json
from typing import List
from pprint import pprint
import re
import typing
from pandas import DataFrame
import traceback
class WikidataSPOMaterializer(MaterializerBase):
property = ""
def __init__(self, **kwargs):
""" initialization and loading the city name to city id map
"""
MaterializerBase.__init__(self, **kwargs)
def get(self,
metadata: dict = None,
constrains: dict = None
) -> typing.Optional[DataFrame]:
materialization_arguments = metadata["materialization"].get("arguments", {})
self.property = materialization_arguments.get("property", "")
materialization_arguments = metadata["materialization"].get("arguments", {})
self.property = materialization_arguments.get("property", "")
prefix = 'http://sitaware.isi.edu:8080/bigdata/namespace/wdq/sparql?query='
format = '&format=json'
result = dict()
property_label = ""
main_query_encoded = self._encode_url(self._formulate_main_query(self.property))
try:
# print(prefix + main_query_encoded + format)
main_query_req = urllib.request.Request(prefix + main_query_encoded + format)
result, property_label = self._process_main_query(self._get_query_result(main_query_req))
except Exception as err:
print(err)
traceback.print_tb(err.__traceback__)
count = 0
while(True):
try:
main_query_encoded = self._encode_url(self._next(self._formulate_main_query(self.property), offset=count))
main_query_req = urllib.request.Request(prefix + main_query_encoded + format)
temp, property_label = self._process_main_query(self._get_query_result(main_query_req))
# property_label = re.sub(r"\s+", '_', property_label)
count += 1
result.update(temp)
except:
# print("property ", property, "count ", count)
break
property_label = re.sub(r"\s+", '_', property_label)
sep = ";"
values = list(result.values())
columns = ["source", "subject_label", "category", "prop_value", "value_label"]
# for val in values:
# col_name = col_name.union(set(val.keys()))
# columns = list(col_name)
rows = list()
for k, v in result.items():
v['value_label'] = list(filter(None, v['value_label']))
v['value_label'] = list() if not any(v['value_label']) else list(v['value_label'])
for k1, v1 in v.items():
if k1 != "source":
# print(k1, v1)
v[k1] = sep.join(v1)
rows.append(v)
df = DataFrame(rows, columns=columns)
# print(df)
return df
@staticmethod
def _formulate_main_query(property):
main_query = 'select distinct ?source ?source_l ?category ?prop_l ?prop_value ?know_as where{\
?source wdt:' + property + ' ?prop_value.\
?source rdfs:label ?source_l.\
?source wdt:P31/rdfs:label ?category.\
filter (lang(?category)="en")\
filter (lang(?source_l)="en")\
wd:' + property + ' rdfs:label ?prop_l.\
filter (lang(?prop_l)="en")\
optional {?prop_value rdfs:label ?know_as.\
filter (lang(?know_as)="en")}\
}'
return main_query
@staticmethod
def _formulate_id_category_query(property):
id_category_query = \
'select distinct ?identifier ?l where{\
?source wdt:' + property + ' ?value.\
?source ?id ?idValue.\
?identifier ?ref ?id.\
optional {?value rdfs:label ?know_as.\
filter (lang(?know_as)="en")}\
?identifier wikibase:directClaim ?id.\
?identifier wikibase:propertyType wikibase:ExternalId.\
?identifier rdfs:label ?l.\
?identifier schema:description ?desc.\
filter (lang(?desc)="en")\
filter (lang(?l)="en")\
}\
ORDER BY ?identifier'
return id_category_query
@staticmethod
def _next(query_sent, offset):
query_sent = query_sent + " LIMIT 1000 " + "OFFSET " + str(1000 * offset)
return query_sent
@staticmethod
def _encode_url(url):
encoded_url = urllib.parse.quote(url)
return encoded_url
@staticmethod
def _get_query_result(query_req) -> List[dict]:
data = {}
with urllib.request.urlopen(query_req) as r:
data = json.loads(r.read().decode('utf-8'))
result = data['results']['bindings']
return result
@staticmethod
def _process_id_category_query(data):
ids = dict()
for item in data:
identifier = item['l']['value']
ids[identifier] = set()
return ids
@staticmethod
def _process_main_query(data):
result = {}
property_label = ""
for item in data:
category = item['category']['value'].strip()
property_label = item['prop_l']['value'].strip()
source = item['source']['value'].strip()
prop_value = item['prop_value']['value'].strip()
know_as = item['know_as']['value'].strip() if 'know_as' in item.keys() else None
subject_l = item['source_l']['value'].strip()
# id = item['id']['value'].strip()
# id_l = item['id_l']['value'].strip()
# id_value = item['id_value']['value'].strip()
if source not in result.keys():
result[source] = dict()
result[source]['source'] = source
result[source]['category'] = set()
result[source]['prop_value'] = set()
result[source]['subject_label'] = set()
result[source]['value_label'] = set()
# result[source].update(copy.deepcopy(ids))
result[source]['prop_value'].add(prop_value)
result[source]['category'].add(category)
result[source]['subject_label'].add(subject_l)
result[source]['value_label'].add(know_as)
# result[source][id_l].add(id_value)
# pprint("ss", result)
return result, property_label
| [((24, 8, 24, 49), 'datamart.materializers.materializer_base.MaterializerBase.__init__', 'MaterializerBase.__init__', ({(24, 34, 24, 38): 'self'}, {}), '(self, **kwargs)', False, 'from datamart.materializers.materializer_base import MaterializerBase\n'), ((61, 25, 61, 60), 're.sub', 're.sub', ({(61, 32, 61, 38): '"""\\\\s+"""', (61, 40, 61, 43): '"""_"""', (61, 45, 61, 59): 'property_label'}, {}), "('\\\\s+', '_', property_label)", False, 'import re\n'), ((78, 13, 78, 45), 'pandas.DataFrame', 'DataFrame', (), '', False, 'from pandas import DataFrame\n'), ((47, 12, 47, 49), 'traceback.print_tb', 'traceback.print_tb', ({(47, 31, 47, 48): 'err.__traceback__'}, {}), '(err.__traceback__)', False, 'import traceback\n')] |
danilobellini/Axelrod | axelrod/load_data_.py | 2c9212553e06095c24adcb82a5979279cbdf45fb | from typing import Dict, List, Tuple
import pkg_resources
def load_file(filename: str, directory: str) -> List[List[str]]:
"""Loads a data file stored in the Axelrod library's data subdirectory,
likely for parameters for a strategy."""
path = "/".join((directory, filename))
data_bytes = pkg_resources.resource_string(__name__, path)
data = data_bytes.decode("UTF-8", "replace")
rows = []
for line in data.split("\n"):
if line.startswith("#") or len(line) == 0:
continue
s = line.split(", ")
rows.append(s)
return rows
def load_weights(
filename: str = "ann_weights.csv", directory: str = "data"
) -> Dict[str, Tuple[int, int, List[float]]]:
"""Load Neural Network Weights."""
rows = load_file(filename, directory)
d = dict()
for row in rows:
name = str(row[0])
num_features = int(row[1])
num_hidden = int(row[2])
weights = list(map(float, row[3:]))
d[name] = (num_features, num_hidden, weights)
return d
def load_pso_tables(filename="pso_gambler.csv", directory="data"):
"""Load lookup tables."""
rows = load_file(filename, directory)
d = dict()
for row in rows:
name, a, b, c, = str(row[0]), int(row[1]), int(row[2]), int(row[3])
values = list(map(float, row[4:]))
d[(name, int(a), int(b), int(c))] = values
return d
| [((10, 17, 10, 62), 'pkg_resources.resource_string', 'pkg_resources.resource_string', ({(10, 47, 10, 55): '__name__', (10, 57, 10, 61): 'path'}, {}), '(__name__, path)', False, 'import pkg_resources\n')] |
genobank-io/CryptoVault | prescryptchain/api/views.py | 7c2f6c4c55df7d9e172058aad334a26786ea839f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# REST
from rest_framework.viewsets import ViewSetMixin
from rest_framework import routers, serializers, viewsets
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication
from rest_framework.permissions import IsAuthenticated, BasePermission
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.views import APIView
from rest_framework import mixins, generics
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
# our models
from blockchain.models import Block, Prescription, Transaction, Address
from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri
from .exceptions import NonValidPubKey
# Define router
router = routers.DefaultRouter()
class PrescriptionSerializer(serializers.ModelSerializer):
""" Prescription serializer """
timestamp = serializers.DateTimeField(read_only=False)
data = serializers.JSONField(binary=False, read_only=False, required=False)
files = serializers.JSONField(binary=False, read_only=False, required=False)
previous_hash = serializers.CharField(read_only=False, required=False, default="0")
class Meta:
model = Prescription
fields = (
'id',
'public_key',
'data',
"files",
'timestamp',
'signature',
'previous_hash',
'raw_size',
'hash_id',
'is_valid',
'transaction',
'readable',
)
read_only_fields = ('id', 'hash_id', 'is_valid',' transaction',)
def validate(self, data):
''' Method to control Extra Keys on Payload!'''
extra_keys = set(self.initial_data.keys()) - set(self.fields.keys())
if extra_keys:
print(extra_keys)
return data
def create(self, validated_data):
return Transaction.objects.create_tx(data=validated_data)
class PrescriptionViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
# Temporally without auth
# authentication_classes = (TokenAuthentication, BasicAuthentication, )
# permission_classes = (IsAuthenticated, )
serializer_class = PrescriptionSerializer
lookup_field = "hash_id"
http_method_names = ['get', 'post', 'options']
def get_queryset(self):
''' Custom Get queryset '''
raw_public_key = self.request.query_params.get('public_key', None)
if raw_public_key:
try:
pub_key = pubkey_string_to_rsa(raw_public_key)
except:
pub_key , raw_public_key = pubkey_base64_to_rsa(raw_public_key)
hex_raw_pub_key = savify_key(pub_key)
return Prescription.objects.filter(public_key=hex_raw_pub_key).order_by('-id')
else:
return Prescription.objects.all().order_by('-id')
# add patient filter by email, after could modify with other
router.register(r'rx-endpoint', PrescriptionViewSet, 'prescription-endpoint')
class BlockSerializer(serializers.ModelSerializer):
""" Prescription serializer """
class Meta:
model = Block
fields = (
'id',
'hash_block',
'previous_hash',
'raw_size',
'data',
'timestamp',
'merkleroot',
'hashcash',
'nonce',
)
read_only_fields = ('id', 'hash_block','timestamp','previous_hash', 'raw_size', 'data', 'merkleroot','hashcash','nonce',)
class BlockViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
serializer_class = BlockSerializer
def get_queryset(self):
return Block.objects.all().order_by('-timestamp')
# add patient filter by email, after could modify with other
router.register(r'block', BlockViewSet, 'block-endpoint')
class AddressSerializer(serializers.ModelSerializer):
""" Address serializer """
pub_key = serializers.CharField(read_only=True,allow_null=True, source="get_pub_key" )
class Meta:
model = Address
fields = (
'public_key_b64',
'address',
'is_valid',
'pub_key',
)
read_only_fields = ('address','pub_key', )
class AddressViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
serializer_class = AddressSerializer
lookup_field = "address"
http_method_names = ['get', 'options']
def get_queryset(self):
''' Custom Get queryset '''
raw_public_key = self.request.query_params.get('public_key', None)
if raw_public_key:
try:
pub_key_b64 = pubkey_base64_from_uri(raw_public_key)
except Exception as e:
raise NonValidPubKey
else:
_address = Address.objects.get_or_create_rsa_address(pub_key_b64)
return Address.objects.filter(address=_address)
else:
return Address.objects.all()
# add patient filter by email, after could modify with other
router.register(r'address', AddressViewSet, 'address_endpoint')
| [((20, 9, 20, 32), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ({}, {}), '()', False, 'from rest_framework import routers, serializers, viewsets\n'), ((25, 16, 25, 58), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', (), '', False, 'from rest_framework import routers, serializers, viewsets\n'), ((26, 11, 26, 79), 'rest_framework.serializers.JSONField', 'serializers.JSONField', (), '', False, 'from rest_framework import routers, serializers, viewsets\n'), ((27, 12, 27, 80), 'rest_framework.serializers.JSONField', 'serializers.JSONField', (), '', False, 'from rest_framework import routers, serializers, viewsets\n'), ((28, 20, 28, 87), 'rest_framework.serializers.CharField', 'serializers.CharField', (), '', False, 'from rest_framework import routers, serializers, viewsets\n'), ((117, 14, 117, 90), 'rest_framework.serializers.CharField', 'serializers.CharField', (), '', False, 'from rest_framework import routers, serializers, viewsets\n'), ((56, 15, 56, 65), 'blockchain.models.Transaction.objects.create_tx', 'Transaction.objects.create_tx', (), '', False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((76, 30, 76, 49), 'blockchain.utils.savify_key', 'savify_key', ({(76, 41, 76, 48): 'pub_key'}, {}), '(pub_key)', False, 'from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri\n'), ((150, 19, 150, 40), 'blockchain.models.Address.objects.all', 'Address.objects.all', ({}, {}), '()', False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((73, 26, 73, 62), 'blockchain.utils.pubkey_string_to_rsa', 'pubkey_string_to_rsa', ({(73, 47, 73, 61): 'raw_public_key'}, {}), '(raw_public_key)', False, 'from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri\n'), ((108, 15, 108, 34), 'blockchain.models.Block.objects.all', 'Block.objects.all', ({}, {}), '()', False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((141, 30, 141, 68), 'blockchain.utils.pubkey_base64_from_uri', 'pubkey_base64_from_uri', ({(141, 53, 141, 67): 'raw_public_key'}, {}), '(raw_public_key)', False, 'from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri\n'), ((146, 27, 146, 81), 'blockchain.models.Address.objects.get_or_create_rsa_address', 'Address.objects.get_or_create_rsa_address', ({(146, 69, 146, 80): 'pub_key_b64'}, {}), '(pub_key_b64)', False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((147, 23, 147, 63), 'blockchain.models.Address.objects.filter', 'Address.objects.filter', (), '', False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((75, 43, 75, 79), 'blockchain.utils.pubkey_base64_to_rsa', 'pubkey_base64_to_rsa', ({(75, 64, 75, 78): 'raw_public_key'}, {}), '(raw_public_key)', False, 'from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri\n'), ((77, 19, 77, 74), 'blockchain.models.Prescription.objects.filter', 'Prescription.objects.filter', (), '', False, 'from blockchain.models import Block, Prescription, Transaction, Address\n'), ((79, 19, 79, 45), 'blockchain.models.Prescription.objects.all', 'Prescription.objects.all', ({}, {}), '()', False, 'from blockchain.models import Block, Prescription, Transaction, Address\n')] |
stvreumi/pyre-check | client/commands/incremental.py | 94d13c8df37b53843ae92544b81042347b64315d | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import atexit
import logging
import os
import subprocess
import sys
from typing import List
from .command import ClientException, ExitCode, State
from .reporting import Reporting
from .start import Start
LOG = logging.getLogger(__name__)
class Incremental(Reporting):
NAME = "incremental"
def __init__(self, arguments, configuration, analysis_directory) -> None:
super(Incremental, self).__init__(arguments, configuration, analysis_directory)
def _run(self) -> None:
if self._state() == State.DEAD:
LOG.warning("Starting server at `%s`.", self._analysis_directory.get_root())
arguments = self._arguments
arguments.terminal = False
arguments.no_watchman = False
Start(arguments, self._configuration, self._analysis_directory).run()
if self._state() != State.DEAD:
LOG.info("Waiting for server...")
result = self._call_client(command=self.NAME)
try:
result.check()
errors = self._get_errors(result)
self._print(errors)
except ClientException as exception:
LOG.error("Error while waiting for server.")
LOG.error("Run `%s restart` in order to restart the server.", sys.argv[0])
self._exit_code = ExitCode.FAILURE
def _flags(self) -> List[str]:
flags = super()._flags()
flags.extend(
[
"-typeshed",
self._configuration.typeshed,
"-expected-binary-version",
self._configuration.version_hash,
]
)
search_path = self._configuration.search_path
if search_path:
flags.extend(["-search-path", ",".join(search_path)])
return flags
# pyre-ignore: T31696900
def _read_stderr(self, _stream, analysis_directory) -> None:
stderr_file = os.path.join(analysis_directory, ".pyre/server/server.stdout")
with subprocess.Popen(
["tail", "-f", stderr_file],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
) as stderr_tail:
atexit.register(stderr_tail.terminate)
super(Incremental, self)._read_stderr(
stderr_tail.stdout, analysis_directory
)
| [((18, 6, 18, 33), 'logging.getLogger', 'logging.getLogger', ({(18, 24, 18, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((68, 22, 68, 84), 'os.path.join', 'os.path.join', ({(68, 35, 68, 53): 'analysis_directory', (68, 55, 68, 83): '""".pyre/server/server.stdout"""'}, {}), "(analysis_directory, '.pyre/server/server.stdout')", False, 'import os\n'), ((69, 13, 73, 9), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((74, 12, 74, 50), 'atexit.register', 'atexit.register', ({(74, 28, 74, 49): 'stderr_tail.terminate'}, {}), '(stderr_tail.terminate)', False, 'import atexit\n')] |
rish-raghu/Object-Goal-Navigation | main_random_policy.py | d2c882f3a97396c691fc75b46bd94bb7077f7d0f | from collections import deque, defaultdict
import os
import sys
import logging
import time
import json
import gym
import torch.nn as nn
import torch
import numpy as np
import matplotlib.pyplot as plt
from model import RL_Policy, Semantic_Mapping
from utils.storage import GlobalRolloutStorage
from envs import make_vec_envs
from arguments import get_args
import algo
os.environ["OMP_NUM_THREADS"] = "1"
def main():
args = get_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Setup Logging
log_dir = "{}/models/{}/".format(args.dump_location, args.exp_name)
dump_dir = "{}/dump/{}/".format(args.dump_location, args.exp_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
logging.basicConfig(
filename=log_dir + 'train.log',
level=logging.INFO)
print("Dumping at {}".format(log_dir))
print(args)
logging.info(args)
# Logging and loss variables
num_scenes = args.num_processes
num_episodes = int(args.num_eval_episodes)
device = args.device = torch.device("cuda:0" if args.cuda else "cpu")
g_masks = torch.ones(num_scenes).float().to(device)
best_g_reward = -np.inf
# one episode per process for both train and eval
# for eval, one scene per process
if args.eval:
episode_success = []
episode_spl = []
episode_dist = []
for _ in range(args.num_processes):
episode_success.append(deque(maxlen=num_episodes))
episode_spl.append(deque(maxlen=num_episodes))
episode_dist.append(deque(maxlen=num_episodes))
# for train, different episodes of same scene per process
else:
episode_success = deque(maxlen=1000)
episode_spl = deque(maxlen=1000)
episode_dist = deque(maxlen=1000)
finished = np.zeros((args.num_processes))
wait_env = np.zeros((args.num_processes))
g_episode_rewards = deque(maxlen=1000)
g_value_losses = deque(maxlen=1000)
g_action_losses = deque(maxlen=1000)
g_dist_entropies = deque(maxlen=1000)
per_step_g_rewards = deque(maxlen=1000)
g_process_rewards = np.zeros((num_scenes))
# Starting environments
torch.set_num_threads(1)
envs = make_vec_envs(args)
obs, infos = envs.reset()
full_episode_data = []
episode_data = [None] * num_scenes
for e, info in enumerate(infos):
cInfo = info.copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
torch.set_grad_enabled(False)
# Initialize map variables:
# Full map consists of multiple channels containing the following:
# 1. Obstacle Map
# 2. Exploread Area (places that are known to be free or occupied)
# 3. Current Agent Location
# 4. Past Agent Locations
# 5,6,7,.. : Semantic Categories
nc = args.num_sem_categories + 4 # num channels
# Calculating full and local map sizes
map_size = args.map_size_cm // args.map_resolution
full_w, full_h = map_size, map_size
local_w = int(full_w / args.global_downscaling)
local_h = int(full_h / args.global_downscaling)
# Initializing full and local map
full_map = torch.zeros(num_scenes, nc, full_w, full_h).float().to(device)
local_map = torch.zeros(num_scenes, nc, local_w,
local_h).float().to(device)
# Initial full and local pose
full_pose = torch.zeros(num_scenes, 3).float().to(device)
local_pose = torch.zeros(num_scenes, 3).float().to(device)
# Origin of local map
origins = np.zeros((num_scenes, 3))
# Local Map Boundaries
lmb = np.zeros((num_scenes, 4)).astype(int)
# Planner pose inputs has 7 dimensions
# 1-3 store continuous global agent location
# 4-7 store local map boundaries
planner_pose_inputs = np.zeros((num_scenes, 7))
# get local boundary (x1, x2, y1, y2) given local agent position (x, y) and map size
def get_local_map_boundaries(agent_loc, local_sizes, full_sizes):
loc_r, loc_c = agent_loc
local_w, local_h = local_sizes
full_w, full_h = full_sizes
if args.global_downscaling > 1:
gx1, gy1 = loc_r - local_w // 2, loc_c - local_h // 2
gx2, gy2 = gx1 + local_w, gy1 + local_h
if gx1 < 0:
gx1, gx2 = 0, local_w
if gx2 > full_w:
gx1, gx2 = full_w - local_w, full_w
if gy1 < 0:
gy1, gy2 = 0, local_h
if gy2 > full_h:
gy1, gy2 = full_h - local_h, full_h
else:
gx1, gx2, gy1, gy2 = 0, full_w, 0, full_h
return [gx1, gx2, gy1, gy2]
# initialize global and local maps and poses given that initial position
# is at map center with 0 orientation
def init_map_and_pose():
full_map.fill_(0.)
full_pose.fill_(0.)
full_pose[:, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
# 3x3 grid around agent location is considered explored
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
for e in range(num_scenes):
local_map[e] = full_map[e, :,
lmb[e, 0]:lmb[e, 1],
lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# identical to above, except for specific environment
def init_map_and_pose_for_env(e):
full_map[e].fill_(0.)
full_pose[e].fill_(0.)
full_pose[e, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose[e].cpu().numpy()
planner_pose_inputs[e, :3] = locs
r, c = locs[1], locs[0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
local_map[e] = full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# reward is the newly explored area in a given step (in m^2)
def update_intrinsic_rew(e):
prev_explored_area = full_map[e, 1].sum(1).sum(0)
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \
local_map[e]
curr_explored_area = full_map[e, 1].sum(1).sum(0)
intrinsic_rews[e] = curr_explored_area - prev_explored_area
intrinsic_rews[e] *= (args.map_resolution / 100.)**2 # to m^2
def get_random_goal(e):
for _ in range(20):
goal = np.random.rand(2)
goal = [int(goal[0] * local_w), int(goal[1] * local_w)]
goal = [min(goal[0], int(local_w-1)), min(goal[1], int(local_w-1))]
if not local_map[e, 1, goal[0], goal[1]]: break
return goal
init_map_and_pose()
# Global policy observation space
ngc = 8 + args.num_sem_categories
es = 2
g_observation_space = gym.spaces.Box(0, 1, # binary local map
(ngc,
local_w,
local_h), dtype='uint8')
# Semantic Mapping
sem_map_module = Semantic_Mapping(args).to(device)
sem_map_module.eval()
intrinsic_rews = torch.zeros(num_scenes).to(device)
# Predict semantic map from frame 1
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx in range(num_scenes)])
).float().to(device)
# args (obs, pose_obs, maps_last, poses_last)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
local_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.
episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])])
episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"]))
global_goals = [get_random_goal(e) for e in range(num_scenes)]
goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)]
for e in range(num_scenes):
goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1
episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0))
episode_data[e]["used_policy"].append(True)
planner_inputs = [{} for e in range(num_scenes)]
for e, p_input in enumerate(planner_inputs):
p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy() # obstacles
p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy() # explored
p_input['pose_pred'] = planner_pose_inputs[e] # global location+local map bounds
p_input['goal'] = goal_maps[e] # global_goals[e]
p_input['new_goal'] = 1
p_input['found_goal'] = 0
p_input['wait'] = wait_env[e] or finished[e]
if args.visualize or args.print_images:
local_map[e, -1, :, :] = 1e-5 # TODO: what is this?
# single channel where each grid loc is cat ID
p_input['sem_map_pred'] = local_map[e, 4:, :, :
].argmax(0).cpu().numpy()
obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs)
start = time.time()
g_reward = 0
torch.set_grad_enabled(False)
spl_per_category = defaultdict(list)
success_per_category = defaultdict(list)
for step in range(args.num_training_frames // args.num_processes + 1):
if finished.sum() == args.num_processes:
break
g_step = (step // args.num_local_steps) % args.num_global_steps # global step num in PPO
l_step = step % args.num_local_steps # local step num in global step
# ------------------------------------------------------------------
# Reinitialize variables when episode ends
l_masks = torch.FloatTensor([0 if x else 1
for x in done]).to(device)
g_masks *= l_masks
for e, x in enumerate(done):
if x:
spl = infos[e]['spl']
success = infos[e]['success']
dist = infos[e]['distance_to_goal']
spl_per_category[infos[e]['goal_name']].append(spl)
success_per_category[infos[e]['goal_name']].append(success)
if args.eval:
episode_success[e].append(success)
episode_spl[e].append(spl)
episode_dist[e].append(dist)
if len(episode_success[e]) == num_episodes:
finished[e] = 1
episode_data[e]["success"] = success
episode_data[e]["spl"] = spl
episode_data[e]["distance_to_goal"] = dist
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = local_map[e]
episode_data[e]["explored_area"] = full_map[e, 1].sum(1).sum(0).item()
scene = episode_data[e]["scene_id"][16:-4]
if args.save_maps:
np.save('{}/maparr_{}_{}'.format(dump_dir, scene, episode_data[e]['episode_id']), full_map[e].cpu().numpy())
full_episode_data.append(episode_data[e])
cInfo = infos[e].copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
else:
episode_success.append(success)
episode_spl.append(spl)
episode_dist.append(dist)
wait_env[e] = 1.
update_intrinsic_rew(e)
init_map_and_pose_for_env(e)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Semantic Mapping Module
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx
in range(num_scenes)])
).float().to(device)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs + origins
local_map[:, 2, :, :].fill_(0.) # Resetting current location channel
# update current location
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
local_map[e, 2:4, loc_r - 2:loc_r + 3, loc_c - 2:loc_c + 3] = 1.
if args.eval and not wait_env[e]:
episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])])
episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"]))
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Global Policy
if l_step == args.num_local_steps - 1:
# For every global step, update the full and local maps
for e in range(num_scenes):
if wait_env[e] == 1: # New episode
wait_env[e] = 0.
else:
update_intrinsic_rew(e)
# update global map and pose based on new position in old local frame
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \
local_map[e]
full_pose[e] = local_pose[e] + \
torch.from_numpy(origins[e]).to(device).float()
# center the local frame based on new position
locs = full_pose[e].cpu().numpy()
r, c = locs[1], locs[0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
# compute new local map and pose based on new local frame
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
local_map[e] = full_map[e, :,
lmb[e, 0]:lmb[e, 1],
lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
locs = local_pose.cpu().numpy()
# Get exploration reward and metrics
g_reward = torch.from_numpy(np.asarray(
[infos[env_idx]['g_reward'] for env_idx in range(num_scenes)])
).float().to(device)
g_reward += args.intrinsic_rew_coeff * intrinsic_rews.detach()
for e in range(num_scenes):
if args.eval and not wait_env[e]:
episode_data[e]["goal_rewards"].append(infos[e]["g_reward"])
episode_data[e]["explore_rewards"].append(intrinsic_rews[e].item())
g_process_rewards += g_reward.cpu().numpy()
g_total_rewards = g_process_rewards * \
(1 - g_masks.cpu().numpy())
g_process_rewards *= g_masks.cpu().numpy()
per_step_g_rewards.append(np.mean(g_reward.cpu().numpy()))
if np.sum(g_total_rewards) != 0:
for total_rew in g_total_rewards:
if total_rew != 0:
g_episode_rewards.append(total_rew)
global_goals = [get_random_goal(e) for e in range(num_scenes)]
for e in range(num_scenes):
if args.eval and not wait_env[e]:
episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0))
g_reward = 0
g_masks = torch.ones(num_scenes).float().to(device)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Update long-term goal if target object is found
found_goal = [0 for _ in range(num_scenes)]
goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)]
# If goal category not found in map, goal is the location sampled by
# policy
for e in range(num_scenes):
goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1
if args.eval and not wait_env[e]:
episode_data[e]["used_policy"].append(True)
# Else if goal category found in map, use all locations where prob of goal
# obj existing is > 0 as the goal map for planner
for e in range(num_scenes):
cn = infos[e]['goal_cat_id'] + 4
if local_map[e, cn, :, :].sum() != 0.:
cat_semantic_map = local_map[e, cn, :, :].cpu().numpy()
cat_semantic_scores = cat_semantic_map
cat_semantic_scores[cat_semantic_scores > 0] = 1.
goal_maps[e] = cat_semantic_scores
found_goal[e] = 1
if args.eval and not wait_env[e]:
episode_data[e]["used_policy"][-1] = False
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Take action and get next observation
planner_inputs = [{} for e in range(num_scenes)]
for e, p_input in enumerate(planner_inputs):
p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy()
p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy()
p_input['pose_pred'] = planner_pose_inputs[e]
p_input['goal'] = goal_maps[e] # global_goals[e]
p_input['new_goal'] = l_step == args.num_local_steps - 1
p_input['found_goal'] = found_goal[e]
p_input['wait'] = wait_env[e] or finished[e]
if args.visualize or args.print_images:
local_map[e, -1, :, :] = 1e-5
p_input['sem_map_pred'] = local_map[e, 4:, :,
:].argmax(0).cpu().numpy()
obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs)
# ------------------------------------------------------------------
# Logging
if len(full_episode_data) % args.episode_save_interval == 0:
with open('{}/{}_episode_data.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(full_episode_data, f)
if step % args.log_interval == 0:
end = time.time()
time_elapsed = time.gmtime(end - start)
log = " ".join([
"Time: {0:0=2d}d".format(time_elapsed.tm_mday - 1),
"{},".format(time.strftime("%Hh %Mm %Ss", time_elapsed)),
"num timesteps {},".format(step * num_scenes),
"FPS {},".format(int(step * num_scenes / (end - start)))
])
log += "\n\tRewards:"
if len(g_episode_rewards) > 0:
log += " ".join([
" Global step mean/med rew:",
"{:.4f}/{:.4f},".format(
np.mean(per_step_g_rewards),
np.median(per_step_g_rewards)),
" Global eps mean/med/min/max eps rew:",
"{:.3f}/{:.3f}/{:.3f}/{:.3f},".format(
np.mean(g_episode_rewards),
np.median(g_episode_rewards),
np.min(g_episode_rewards),
np.max(g_episode_rewards))
])
if args.eval:
total_success = []
total_spl = []
total_dist = []
for e in range(args.num_processes):
for acc in episode_success[e]:
total_success.append(acc)
for dist in episode_dist[e]:
total_dist.append(dist)
for spl in episode_spl[e]:
total_spl.append(spl)
if len(total_spl) > 0:
log += " ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(total_success),
np.mean(total_spl),
np.mean(total_dist),
len(total_spl))
else:
if len(episode_success) > 100:
log += " ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(episode_success),
np.mean(episode_spl),
np.mean(episode_dist),
len(episode_spl))
log += "\n\tLosses:"
if len(g_value_losses) > 0 and not args.eval:
log += " ".join([
" Policy Loss value/action/dist:",
"{:.3f}/{:.3f}/{:.3f},".format(
np.mean(g_value_losses),
np.mean(g_action_losses),
np.mean(g_dist_entropies))
])
print(log)
logging.info(log)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Save best models
if (step * num_scenes) % args.save_interval < \
num_scenes:
if len(g_episode_rewards) >= 1000 and \
(np.mean(g_episode_rewards) >= best_g_reward) \
and not args.eval:
torch.save(g_policy.state_dict(),
os.path.join(log_dir, "model_best.pth"))
best_g_reward = np.mean(g_episode_rewards)
# Save periodic models
if (step * num_scenes) % args.save_periodic < \
num_scenes:
total_steps = step * num_scenes
if not args.eval:
torch.save(g_policy.state_dict(),
os.path.join(dump_dir,
"periodic_{}.pth".format(total_steps)))
# ------------------------------------------------------------------
# Print and save model performance numbers during evaluation
if args.eval:
print("Dumping eval details...")
total_success = []
total_spl = []
total_dist = []
for e in range(args.num_processes):
for acc in episode_success[e]:
total_success.append(acc)
for dist in episode_dist[e]:
total_dist.append(dist)
for spl in episode_spl[e]:
total_spl.append(spl)
if len(total_spl) > 0:
log = "Final ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(total_success),
np.mean(total_spl),
np.mean(total_dist),
len(total_spl))
print(log)
logging.info(log)
# Save the spl per category
log = "Success | SPL per category\n"
for key in success_per_category:
log += "{}: {} | {}\n".format(key,
sum(success_per_category[key]) /
len(success_per_category[key]),
sum(spl_per_category[key]) /
len(spl_per_category[key]))
print(log)
logging.info(log)
with open('{}/{}_spl_per_cat_pred_thr.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(spl_per_category, f)
with open('{}/{}_success_per_cat_pred_thr.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(success_per_category, f)
with open('{}/{}_episode_data.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(full_episode_data, f)
if __name__ == "__main__":
main()
| [((23, 11, 23, 21), 'arguments.get_args', 'get_args', ({}, {}), '()', False, 'from arguments import get_args\n'), ((25, 4, 25, 29), 'numpy.random.seed', 'np.random.seed', ({(25, 19, 25, 28): 'args.seed'}, {}), '(args.seed)', True, 'import numpy as np\n'), ((26, 4, 26, 32), 'torch.manual_seed', 'torch.manual_seed', ({(26, 22, 26, 31): 'args.seed'}, {}), '(args.seed)', False, 'import torch\n'), ((40, 4, 42, 27), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((45, 4, 45, 22), 'logging.info', 'logging.info', ({(45, 17, 45, 21): 'args'}, {}), '(args)', False, 'import logging\n'), ((50, 27, 50, 73), 'torch.device', 'torch.device', ({(50, 40, 50, 72): "'cuda:0' if args.cuda else 'cpu'"}, {}), "('cuda:0' if args.cuda else 'cpu')", False, 'import torch\n'), ((72, 15, 72, 45), 'numpy.zeros', 'np.zeros', ({(72, 25, 72, 43): 'args.num_processes'}, {}), '(args.num_processes)', True, 'import numpy as np\n'), ((73, 15, 73, 45), 'numpy.zeros', 'np.zeros', ({(73, 25, 73, 43): 'args.num_processes'}, {}), '(args.num_processes)', True, 'import numpy as np\n'), ((75, 24, 75, 42), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((77, 21, 77, 39), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((78, 22, 78, 40), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((79, 23, 79, 41), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((81, 25, 81, 43), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((83, 24, 83, 46), 'numpy.zeros', 'np.zeros', ({(83, 34, 83, 44): 'num_scenes'}, {}), '(num_scenes)', True, 'import numpy as np\n'), ((86, 4, 86, 28), 'torch.set_num_threads', 'torch.set_num_threads', ({(86, 26, 86, 27): '(1)'}, {}), '(1)', False, 'import torch\n'), ((87, 11, 87, 30), 'envs.make_vec_envs', 'make_vec_envs', ({(87, 25, 87, 29): 'args'}, {}), '(args)', False, 'from envs import make_vec_envs\n'), ((102, 4, 102, 33), 'torch.set_grad_enabled', 'torch.set_grad_enabled', ({(102, 27, 102, 32): '(False)'}, {}), '(False)', False, 'import torch\n'), ((129, 14, 129, 39), 'numpy.zeros', 'np.zeros', ({(129, 23, 129, 38): '(num_scenes, 3)'}, {}), '((num_scenes, 3))', True, 'import numpy as np\n'), ((137, 26, 137, 51), 'numpy.zeros', 'np.zeros', ({(137, 35, 137, 50): '(num_scenes, 7)'}, {}), '((num_scenes, 7))', True, 'import numpy as np\n'), ((242, 26, 245, 66), 'gym.spaces.Box', 'gym.spaces.Box', (), '', False, 'import gym\n'), ((299, 12, 299, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((302, 4, 302, 33), 'torch.set_grad_enabled', 'torch.set_grad_enabled', ({(302, 27, 302, 32): '(False)'}, {}), '(False)', False, 'import torch\n'), ((303, 23, 303, 40), 'collections.defaultdict', 'defaultdict', ({(303, 35, 303, 39): 'list'}, {}), '(list)', False, 'from collections import deque, defaultdict\n'), ((304, 27, 304, 44), 'collections.defaultdict', 'defaultdict', ({(304, 39, 304, 43): 'list'}, {}), '(list)', False, 'from collections import deque, defaultdict\n'), ((29, 8, 29, 41), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', ({(29, 31, 29, 40): 'args.seed'}, {}), '(args.seed)', False, 'import torch\n'), ((35, 11, 35, 34), 'os.path.exists', 'os.path.exists', ({(35, 26, 35, 33): 'log_dir'}, {}), '(log_dir)', False, 'import os\n'), ((36, 8, 36, 28), 'os.makedirs', 'os.makedirs', ({(36, 20, 36, 27): 'log_dir'}, {}), '(log_dir)', False, 'import os\n'), ((37, 11, 37, 35), 'os.path.exists', 'os.path.exists', ({(37, 26, 37, 34): 'dump_dir'}, {}), '(dump_dir)', False, 'import os\n'), ((38, 8, 38, 29), 'os.makedirs', 'os.makedirs', ({(38, 20, 38, 28): 'dump_dir'}, {}), '(dump_dir)', False, 'import os\n'), ((68, 26, 68, 44), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((69, 22, 69, 40), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((70, 23, 70, 41), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((275, 17, 275, 45), 'numpy.zeros', 'np.zeros', ({(275, 26, 275, 44): '(local_w, local_h)'}, {}), '((local_w, local_h))', True, 'import numpy as np\n'), ((618, 8, 618, 25), 'logging.info', 'logging.info', ({(618, 21, 618, 24): 'log'}, {}), '(log)', False, 'import logging\n'), ((630, 8, 630, 25), 'logging.info', 'logging.info', ({(630, 21, 630, 24): 'log'}, {}), '(log)', False, 'import logging\n'), ((132, 10, 132, 35), 'numpy.zeros', 'np.zeros', ({(132, 19, 132, 34): '(num_scenes, 4)'}, {}), '((num_scenes, 4))', True, 'import numpy as np\n'), ((231, 19, 231, 36), 'numpy.random.rand', 'np.random.rand', ({(231, 34, 231, 35): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((248, 21, 248, 43), 'model.Semantic_Mapping', 'Semantic_Mapping', ({(248, 38, 248, 42): 'args'}, {}), '(args)', False, 'from model import RL_Policy, Semantic_Mapping\n'), ((251, 21, 251, 44), 'torch.zeros', 'torch.zeros', ({(251, 33, 251, 43): 'num_scenes'}, {}), '(num_scenes)', False, 'import torch\n'), ((457, 21, 457, 49), 'numpy.zeros', 'np.zeros', ({(457, 30, 457, 48): '(local_w, local_h)'}, {}), '((local_w, local_h))', True, 'import numpy as np\n'), ((506, 18, 506, 29), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((507, 27, 507, 51), 'time.gmtime', 'time.gmtime', ({(507, 39, 507, 50): 'end - start'}, {}), '(end - start)', False, 'import time\n'), ((570, 12, 570, 29), 'logging.info', 'logging.info', ({(570, 25, 570, 28): 'log'}, {}), '(log)', False, 'import logging\n'), ((634, 12, 634, 42), 'json.dump', 'json.dump', ({(634, 22, 634, 38): 'spl_per_category', (634, 40, 634, 41): 'f'}, {}), '(spl_per_category, f)', False, 'import json\n'), ((638, 12, 638, 46), 'json.dump', 'json.dump', ({(638, 22, 638, 42): 'success_per_category', (638, 44, 638, 45): 'f'}, {}), '(success_per_category, f)', False, 'import json\n'), ((642, 12, 642, 43), 'json.dump', 'json.dump', ({(642, 22, 642, 39): 'full_episode_data', (642, 41, 642, 42): 'f'}, {}), '(full_episode_data, f)', False, 'import json\n'), ((63, 35, 63, 61), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((64, 31, 64, 57), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((65, 32, 65, 58), 'collections.deque', 'deque', (), '', False, 'from collections import deque, defaultdict\n'), ((315, 18, 316, 52), 'torch.FloatTensor', 'torch.FloatTensor', ({(315, 36, 316, 51): '[(0 if x else 1) for x in done]'}, {}), '([(0 if x else 1) for x in done])', False, 'import torch\n'), ((438, 15, 438, 38), 'numpy.sum', 'np.sum', ({(438, 22, 438, 37): 'g_total_rewards'}, {}), '(g_total_rewards)', True, 'import numpy as np\n'), ((503, 20, 503, 51), 'json.dump', 'json.dump', ({(503, 30, 503, 47): 'full_episode_data', (503, 49, 503, 50): 'f'}, {}), '(full_episode_data, f)', False, 'import json\n'), ((582, 32, 582, 58), 'numpy.mean', 'np.mean', ({(582, 40, 582, 57): 'g_episode_rewards'}, {}), '(g_episode_rewards)', True, 'import numpy as np\n'), ((612, 16, 612, 38), 'numpy.mean', 'np.mean', ({(612, 24, 612, 37): 'total_success'}, {}), '(total_success)', True, 'import numpy as np\n'), ((613, 16, 613, 34), 'numpy.mean', 'np.mean', ({(613, 24, 613, 33): 'total_spl'}, {}), '(total_spl)', True, 'import numpy as np\n'), ((614, 16, 614, 35), 'numpy.mean', 'np.mean', ({(614, 24, 614, 34): 'total_dist'}, {}), '(total_dist)', True, 'import numpy as np\n'), ((52, 14, 52, 36), 'torch.ones', 'torch.ones', ({(52, 25, 52, 35): 'num_scenes'}, {}), '(num_scenes)', False, 'import torch\n'), ((120, 15, 120, 58), 'torch.zeros', 'torch.zeros', ({(120, 27, 120, 37): 'num_scenes', (120, 39, 120, 41): 'nc', (120, 43, 120, 49): 'full_w', (120, 51, 120, 57): 'full_h'}, {}), '(num_scenes, nc, full_w, full_h)', False, 'import torch\n'), ((121, 16, 122, 36), 'torch.zeros', 'torch.zeros', ({(121, 28, 121, 38): 'num_scenes', (121, 40, 121, 42): 'nc', (121, 44, 121, 51): 'local_w', (122, 28, 122, 35): 'local_h'}, {}), '(num_scenes, nc, local_w, local_h)', False, 'import torch\n'), ((125, 16, 125, 42), 'torch.zeros', 'torch.zeros', ({(125, 28, 125, 38): 'num_scenes', (125, 40, 125, 41): '3'}, {}), '(num_scenes, 3)', False, 'import torch\n'), ((126, 17, 126, 43), 'torch.zeros', 'torch.zeros', ({(126, 29, 126, 39): 'num_scenes', (126, 41, 126, 42): '3'}, {}), '(num_scenes, 3)', False, 'import torch\n'), ((578, 21, 578, 47), 'numpy.mean', 'np.mean', ({(578, 29, 578, 46): 'g_episode_rewards'}, {}), '(g_episode_rewards)', True, 'import numpy as np\n'), ((581, 27, 581, 66), 'os.path.join', 'os.path.join', ({(581, 40, 581, 47): 'log_dir', (581, 49, 581, 65): '"""model_best.pth"""'}, {}), "(log_dir, 'model_best.pth')", False, 'import os\n'), ((510, 29, 510, 71), 'time.strftime', 'time.strftime', ({(510, 43, 510, 56): '"""%Hh %Mm %Ss"""', (510, 58, 510, 70): 'time_elapsed'}, {}), "('%Hh %Mm %Ss', time_elapsed)", False, 'import time\n'), ((546, 24, 546, 46), 'numpy.mean', 'np.mean', ({(546, 32, 546, 45): 'total_success'}, {}), '(total_success)', True, 'import numpy as np\n'), ((547, 24, 547, 42), 'numpy.mean', 'np.mean', ({(547, 32, 547, 41): 'total_spl'}, {}), '(total_spl)', True, 'import numpy as np\n'), ((548, 24, 548, 43), 'numpy.mean', 'np.mean', ({(548, 32, 548, 42): 'total_dist'}, {}), '(total_dist)', True, 'import numpy as np\n'), ((554, 24, 554, 48), 'numpy.mean', 'np.mean', ({(554, 32, 554, 47): 'episode_success'}, {}), '(episode_success)', True, 'import numpy as np\n'), ((555, 24, 555, 44), 'numpy.mean', 'np.mean', ({(555, 32, 555, 43): 'episode_spl'}, {}), '(episode_spl)', True, 'import numpy as np\n'), ((556, 24, 556, 45), 'numpy.mean', 'np.mean', ({(556, 32, 556, 44): 'episode_dist'}, {}), '(episode_dist)', True, 'import numpy as np\n'), ((218, 12, 218, 40), 'torch.from_numpy', 'torch.from_numpy', ({(218, 29, 218, 39): 'origins[e]'}, {}), '(origins[e])', False, 'import torch\n'), ((450, 22, 450, 44), 'torch.ones', 'torch.ones', ({(450, 33, 450, 43): 'num_scenes'}, {}), '(num_scenes)', False, 'import torch\n'), ((521, 24, 521, 51), 'numpy.mean', 'np.mean', ({(521, 32, 521, 50): 'per_step_g_rewards'}, {}), '(per_step_g_rewards)', True, 'import numpy as np\n'), ((522, 24, 522, 53), 'numpy.median', 'np.median', ({(522, 34, 522, 52): 'per_step_g_rewards'}, {}), '(per_step_g_rewards)', True, 'import numpy as np\n'), ((525, 24, 525, 50), 'numpy.mean', 'np.mean', ({(525, 32, 525, 49): 'g_episode_rewards'}, {}), '(g_episode_rewards)', True, 'import numpy as np\n'), ((526, 24, 526, 52), 'numpy.median', 'np.median', ({(526, 34, 526, 51): 'g_episode_rewards'}, {}), '(g_episode_rewards)', True, 'import numpy as np\n'), ((527, 24, 527, 49), 'numpy.min', 'np.min', ({(527, 31, 527, 48): 'g_episode_rewards'}, {}), '(g_episode_rewards)', True, 'import numpy as np\n'), ((528, 24, 528, 49), 'numpy.max', 'np.max', ({(528, 31, 528, 48): 'g_episode_rewards'}, {}), '(g_episode_rewards)', True, 'import numpy as np\n'), ((564, 24, 564, 47), 'numpy.mean', 'np.mean', ({(564, 32, 564, 46): 'g_value_losses'}, {}), '(g_value_losses)', True, 'import numpy as np\n'), ((565, 24, 565, 48), 'numpy.mean', 'np.mean', ({(565, 32, 565, 47): 'g_action_losses'}, {}), '(g_action_losses)', True, 'import numpy as np\n'), ((566, 24, 566, 49), 'numpy.mean', 'np.mean', ({(566, 32, 566, 48): 'g_dist_entropies'}, {}), '(g_dist_entropies)', True, 'import numpy as np\n'), ((192, 16, 192, 44), 'torch.from_numpy', 'torch.from_numpy', ({(192, 33, 192, 43): 'origins[e]'}, {}), '(origins[e])', False, 'import torch\n'), ((397, 20, 397, 48), 'torch.from_numpy', 'torch.from_numpy', ({(397, 37, 397, 47): 'origins[e]'}, {}), '(origins[e])', False, 'import torch\n'), ((418, 20, 418, 48), 'torch.from_numpy', 'torch.from_numpy', ({(418, 37, 418, 47): 'origins[e]'}, {}), '(origins[e])', False, 'import torch\n')] |
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection | src/ITN/srmg/core/RiemannianRight.py | 2e35afaa891badf5a235b5d995102e4dc8a4cf0d | #!/usr/bin/env python
# coding=utf-8
'''
Author: Shuangchi He / Yulv
Email: [email protected]
Date: 2022-03-19 10:33:38
Motto: Entities should not be multiplied unnecessarily.
LastEditors: Shuangchi He
LastEditTime: 2022-03-23 00:52:55
FilePath: /Awesome-Ultrasound-Standard-Plane-Detection/src/ITN/srmg/core/RiemannianRight.py
Description: Modify here please
Init from https://github.com/yuanwei1989/plane-detection Author: Yuanwei Li (3 Oct 2018)
# Copyright (c) 2006-2017, Nina Milone, Bishesh Kanal, Benjamin Hou
# Copyright (c) 2006-2017, Imperial College of Science, Technology and Medicine
# Produced at Biomedical Image Analysis Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
Statistics on Riemannian Manifolds and Groups
---------------------------------------------
This is a set of codes to compare the computing of the different types of means on Lie groups.
These codes can be used to reproduce the experiments illustrated in the video developed for the
MICCAI Educational challenge 2014, available at: url of the video.
:Authors:
`Nina Miolane <website>`
`Bishesh Khanal <website>`
:Organization:
Asclepios Team, INRIA Sophia Antipolis.
:Version:
2017.07.05
Requirements
------------
* `Numpy 1.11 <http://www.numpy.org>`_
Notes
-----
----------
(1) Defining a mean on Lie group.
Nina Miolane. Medical Imaging. 2013. <hal-00938320>
'''
import numpy
import math
from srmg.common.group import *
from srmg.common.util import *
EPS = 1e-5
def riemExpR(a,f0,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from any point f0 (for left- and right-invariant metric)
"""
f = grpCompose((riemExpIdR(a, numpy.linalg.lstsq(jR(f0),v)[0])), f0)
return f
def riemExpIdR(a,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from Id (for left- and right-invariant metric)
"""
v=grpReg(-v);
f = numpy.zeros(6)
f[0:3] = v[0:3]
f[3:6] = a * v[3:6]
f = grpInv(f)
return f
def sigma2R(a,m,tabf,tabw):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating variance requires at least 2 points')
return 0
s = 0
for i in range(0,siz):
s = s + tabw[i] * normA2R(a,m,riemLogR(a,m,tabf[i,:]));
return s
def riemLogR(a,f0,f):
"""
DESCRIPTION
Attributes:
a: ?????
f0: ????
f: ????
Return:
v: ?????
"""
v=numpy.dot(jR(f0),riemLogIdR(a,grpCompose(f,grpInv(f0))))
return v
def riemLogIdR(a,f):
"""
DESCRIPTION
Attributes:
a: ?????
f: ????
Return:
v: ?????
"""
v = numpy.zeros(6)
v[0:3] = f[0:3]
v[3:6] = numpy.dot(rotMat(-f[0:3]),f[3:6]);
return v
def qR(a,f):
"""
Left- and right- invariant inner product in the principal chart (propagation of Frobenius inner product)
Attributes:
a: ?????
f: ????
Return:
g: ?????
"""
f = grpReg(f)
g0 = numpy.zeros([6,6])
g0[0:3,0:3] = numpy.eye(3)
g0[3:6,3:6] = a * numpy.eye(3)
g = numpy.dot(numpy.dot(numpy.linalg.inv(jR(f).T) , g0) , numpy.linalg.inv(jR(f)))
return g
def jR(f):
"""
Differentials of the left and right translations for SO(3) in the principal chart
Attributes:
r: ?????
Return:
Jl: ?????
"""
#f = makeColVector(f,6); # unnecessary if 1D
f = grpReg(f);
Jr = numpy.zeros([6,6])
Jr[0:3,0:3] = jRotR(f[0:3]);
Jr[3:6,0:3] = -skew(f[3:6]);
Jr[3:6,3:6] = numpy.eye(3);
return Jr
def normA2R(a,f,v):
"""
This function calculates the normalised left
Attributes:
a: ?????
f: ?????
v: ?????
Return:
n: normalised vector
"""
v=grpReg(v);
n=numpy.dot(numpy.dot(v.T,qR(a,f)),v);
return n
def frechetR(a,tabf,tabw):
"""
This function computes the frechet-L mean
Attributes:
img: The fixed image that will be transformed (simpleitk type)
a: ?????
tabf: SE3 data points (Nx6 vector)
tabw: data point weights (Nx1 vector)
Return:
m: The mean
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating mean requires at least 2 points')
m = tabf[0,:]
# Iteration 0
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
# Iteration 1 until converges
while (normA2R(a,mbis,riemLogR(a,mbis,m))>EPS*sigma2R(a,mbis,tabf,tabw)):
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
return m
| [((99, 8, 99, 22), 'numpy.zeros', 'numpy.zeros', ({(99, 20, 99, 21): '6'}, {}), '(6)', False, 'import numpy\n'), ((152, 8, 152, 22), 'numpy.zeros', 'numpy.zeros', ({(152, 20, 152, 21): '6'}, {}), '(6)', False, 'import numpy\n'), ((170, 9, 170, 27), 'numpy.zeros', 'numpy.zeros', ({(170, 21, 170, 26): '[6, 6]'}, {}), '([6, 6])', False, 'import numpy\n'), ((171, 18, 171, 30), 'numpy.eye', 'numpy.eye', ({(171, 28, 171, 29): '3'}, {}), '(3)', False, 'import numpy\n'), ((188, 9, 188, 27), 'numpy.zeros', 'numpy.zeros', ({(188, 21, 188, 26): '[6, 6]'}, {}), '([6, 6])', False, 'import numpy\n'), ((191, 18, 191, 30), 'numpy.eye', 'numpy.eye', ({(191, 28, 191, 29): '3'}, {}), '(3)', False, 'import numpy\n'), ((235, 8, 235, 22), 'numpy.zeros', 'numpy.zeros', ({(235, 20, 235, 21): '6'}, {}), '(6)', False, 'import numpy\n'), ((172, 22, 172, 34), 'numpy.eye', 'numpy.eye', ({(172, 32, 172, 33): '(3)'}, {}), '(3)', False, 'import numpy\n'), ((244, 12, 244, 26), 'numpy.zeros', 'numpy.zeros', ({(244, 24, 244, 25): '6'}, {}), '(6)', False, 'import numpy\n')] |
StormDev87/VPH_bot_python | v0449gRpc_pb2.py | ae83a0b61e234912c0136ef0f176e7a88603ff28 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v0449gRpc.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0fv0449gRpc.proto\x12\tv0449gRpc\"\x1b\n\x0b\x64\x61taRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1a\n\x08\x64\x61ta2Plc\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1f\n\x0cslaveReq2Plc\x12\x0f\n\x07request\x18\x01 \x01(\x05\"\x1a\n\x08\x64\x61ta2Hmi\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1b\n\ndata2PlcJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1b\n\ndata2HmiJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1c\n\ndata2PlcPb\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1d\n\ndataAnswer\x12\x0f\n\x07message\x18\x01 \x01(\t2\x93\x01\n\x0cv0449gRpcSvc\x12=\n\x0bxchRtDataJs\x12\x15.v0449gRpc.data2PlcJs\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x12\x44\n\x10xchRtDataJsSlave\x12\x17.v0449gRpc.slaveReq2Plc\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x62\x06proto3')
_DATAREQUEST = DESCRIPTOR.message_types_by_name['dataRequest']
_DATA2PLC = DESCRIPTOR.message_types_by_name['data2Plc']
_SLAVEREQ2PLC = DESCRIPTOR.message_types_by_name['slaveReq2Plc']
_DATA2HMI = DESCRIPTOR.message_types_by_name['data2Hmi']
_DATA2PLCJS = DESCRIPTOR.message_types_by_name['data2PlcJs']
_DATA2HMIJS = DESCRIPTOR.message_types_by_name['data2HmiJs']
_DATA2PLCPB = DESCRIPTOR.message_types_by_name['data2PlcPb']
_DATAANSWER = DESCRIPTOR.message_types_by_name['dataAnswer']
dataRequest = _reflection.GeneratedProtocolMessageType('dataRequest', (_message.Message,), {
'DESCRIPTOR' : _DATAREQUEST,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.dataRequest)
})
_sym_db.RegisterMessage(dataRequest)
data2Plc = _reflection.GeneratedProtocolMessageType('data2Plc', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLC,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2Plc)
})
_sym_db.RegisterMessage(data2Plc)
slaveReq2Plc = _reflection.GeneratedProtocolMessageType('slaveReq2Plc', (_message.Message,), {
'DESCRIPTOR' : _SLAVEREQ2PLC,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.slaveReq2Plc)
})
_sym_db.RegisterMessage(slaveReq2Plc)
data2Hmi = _reflection.GeneratedProtocolMessageType('data2Hmi', (_message.Message,), {
'DESCRIPTOR' : _DATA2HMI,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2Hmi)
})
_sym_db.RegisterMessage(data2Hmi)
data2PlcJs = _reflection.GeneratedProtocolMessageType('data2PlcJs', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLCJS,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcJs)
})
_sym_db.RegisterMessage(data2PlcJs)
data2HmiJs = _reflection.GeneratedProtocolMessageType('data2HmiJs', (_message.Message,), {
'DESCRIPTOR' : _DATA2HMIJS,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2HmiJs)
})
_sym_db.RegisterMessage(data2HmiJs)
data2PlcPb = _reflection.GeneratedProtocolMessageType('data2PlcPb', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLCPB,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcPb)
})
_sym_db.RegisterMessage(data2PlcPb)
dataAnswer = _reflection.GeneratedProtocolMessageType('dataAnswer', (_message.Message,), {
'DESCRIPTOR' : _DATAANSWER,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.dataAnswer)
})
_sym_db.RegisterMessage(dataAnswer)
_V0449GRPCSVC = DESCRIPTOR.services_by_name['v0449gRpcSvc']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DATAREQUEST._serialized_start=30
_DATAREQUEST._serialized_end=57
_DATA2PLC._serialized_start=59
_DATA2PLC._serialized_end=85
_SLAVEREQ2PLC._serialized_start=87
_SLAVEREQ2PLC._serialized_end=118
_DATA2HMI._serialized_start=120
_DATA2HMI._serialized_end=146
_DATA2PLCJS._serialized_start=148
_DATA2PLCJS._serialized_end=175
_DATA2HMIJS._serialized_start=177
_DATA2HMIJS._serialized_end=204
_DATA2PLCPB._serialized_start=206
_DATA2PLCPB._serialized_end=234
_DATAANSWER._serialized_start=236
_DATAANSWER._serialized_end=265
_V0449GRPCSVC._serialized_start=268
_V0449GRPCSVC._serialized_end=415
# @@protoc_insertion_point(module_scope)
| [((12, 10, 12, 36), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ({}, {}), '()', True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((29, 14, 33, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(29, 55, 29, 68): '"""dataRequest"""', (29, 70, 29, 89): '(_message.Message,)', (29, 91, 33, 3): "{'DESCRIPTOR': _DATAREQUEST, '__module__': 'v0449gRpc_pb2'}"}, {}), "('dataRequest', (_message.Message,),\n {'DESCRIPTOR': _DATAREQUEST, '__module__': 'v0449gRpc_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((36, 11, 40, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(36, 52, 36, 62): '"""data2Plc"""', (36, 64, 36, 83): '(_message.Message,)', (36, 85, 40, 3): "{'DESCRIPTOR': _DATA2PLC, '__module__': 'v0449gRpc_pb2'}"}, {}), "('data2Plc', (_message.Message,), {\n 'DESCRIPTOR': _DATA2PLC, '__module__': 'v0449gRpc_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((43, 15, 47, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(43, 56, 43, 70): '"""slaveReq2Plc"""', (43, 72, 43, 91): '(_message.Message,)', (43, 93, 47, 3): "{'DESCRIPTOR': _SLAVEREQ2PLC, '__module__': 'v0449gRpc_pb2'}"}, {}), "('slaveReq2Plc', (_message.Message,\n ), {'DESCRIPTOR': _SLAVEREQ2PLC, '__module__': 'v0449gRpc_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((50, 11, 54, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(50, 52, 50, 62): '"""data2Hmi"""', (50, 64, 50, 83): '(_message.Message,)', (50, 85, 54, 3): "{'DESCRIPTOR': _DATA2HMI, '__module__': 'v0449gRpc_pb2'}"}, {}), "('data2Hmi', (_message.Message,), {\n 'DESCRIPTOR': _DATA2HMI, '__module__': 'v0449gRpc_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((57, 13, 61, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(57, 54, 57, 66): '"""data2PlcJs"""', (57, 68, 57, 87): '(_message.Message,)', (57, 89, 61, 3): "{'DESCRIPTOR': _DATA2PLCJS, '__module__': 'v0449gRpc_pb2'}"}, {}), "('data2PlcJs', (_message.Message,),\n {'DESCRIPTOR': _DATA2PLCJS, '__module__': 'v0449gRpc_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((64, 13, 68, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(64, 54, 64, 66): '"""data2HmiJs"""', (64, 68, 64, 87): '(_message.Message,)', (64, 89, 68, 3): "{'DESCRIPTOR': _DATA2HMIJS, '__module__': 'v0449gRpc_pb2'}"}, {}), "('data2HmiJs', (_message.Message,),\n {'DESCRIPTOR': _DATA2HMIJS, '__module__': 'v0449gRpc_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((71, 13, 75, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(71, 54, 71, 66): '"""data2PlcPb"""', (71, 68, 71, 87): '(_message.Message,)', (71, 89, 75, 3): "{'DESCRIPTOR': _DATA2PLCPB, '__module__': 'v0449gRpc_pb2'}"}, {}), "('data2PlcPb', (_message.Message,),\n {'DESCRIPTOR': _DATA2PLCPB, '__module__': 'v0449gRpc_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((78, 13, 82, 4), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', ({(78, 54, 78, 66): '"""dataAnswer"""', (78, 68, 78, 87): '(_message.Message,)', (78, 89, 82, 3): "{'DESCRIPTOR': _DATAANSWER, '__module__': 'v0449gRpc_pb2'}"}, {}), "('dataAnswer', (_message.Message,),\n {'DESCRIPTOR': _DATAANSWER, '__module__': 'v0449gRpc_pb2'})", True, 'from google.protobuf import reflection as _reflection\n'), ((17, 13, 17, 39), 'google.protobuf.descriptor_pool.Default', '_descriptor_pool.Default', ({}, {}), '()', True, 'from google.protobuf import descriptor_pool as _descriptor_pool\n')] |
AlexsLemonade/resources-portal | api/resources_portal/test/views/test_search_endpoint.py | d91c6c8d6135461faccbc78ef2b0be3f9b358f21 | import datetime
from django.core.management import call_command
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from resources_portal.management.commands.populate_dev_database import populate_dev_database
from resources_portal.models import Material, Organization, User
class SearchMaterialsEndpointTestCase(APITestCase):
"""
Tests /search/materials operations.
"""
@classmethod
def setUpClass(cls):
super(SearchMaterialsEndpointTestCase, cls).setUpClass()
populate_dev_database()
# Put newly created materials in the search index
call_command("search_index", "-f", "--rebuild")
cls.primary_prof = User.objects.get(username="PrimaryProf")
cls.secondary_prof = User.objects.get(username="SecondaryProf")
cls.post_doc = User.objects.get(username="PostDoc")
cls.primary_lab = Organization.objects.get(name="PrimaryLab")
cls.material1 = Material.objects.get(title="Melanoma Reduction Plasmid")
cls.material2 = Material.objects.get(title="Allele Extraction Protocol")
@classmethod
def tearDownClass(cls):
super(SearchMaterialsEndpointTestCase, cls).tearDownClass()
# Rebuild search index with what's actaully in the django database
call_command("search_index", "-f", "--rebuild")
def test_search_for_title_finds_a_given_material(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?search=" + self.material1.title
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_result_id = int(response.json()["results"][0]["id"])
self.assertEqual(first_result_id, self.material1.id)
def test_filter_on_organization_retrieves_all_organization_materials(self):
# Archive one material to make sure it goes to the bottom of the list.
archived_material = Material.objects.first()
archived_material.is_archived = True
archived_material.save()
self.client.force_authenticate(user=self.primary_prof)
search_url = (
reverse("search-materials-list")
+ "?organization="
+ self.primary_lab.name
+ "&limit=25"
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_json = response.json()
material_count = int(response_json["count"])
# Make sure archived materials are last:
self.assertEqual(response_json["results"][-1]["id"], archived_material.id)
material_titles = []
for material in response_json["results"]:
material_titles.append(material["title"])
self.assertEqual(material_count, len(self.primary_lab.materials.all()))
for title in material_titles:
self.assertTrue(
Material.objects.filter(title=title, organization=self.primary_lab).exists()
)
def test_filter_on_category_retrieves_all_materials_of_a_given_category(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?category=" + "MODEL_ORGANISM"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_count = int(response.json()["count"])
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
self.assertEqual(material_count, len(Material.objects.filter(category="MODEL_ORGANISM")))
for title in material_titles:
self.assertTrue(
Material.objects.filter(title=title, category="MODEL_ORGANISM").exists()
)
def test_filter_on_organisms_retrieves_all_materials_with_one_organism(self):
self.client.force_authenticate(user=self.primary_prof)
# Search with one organism name
search_url = reverse("search-materials-list") + "?organisms=" + "danio rerio"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organism_count = int(response.json()["count"])
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
database_organism_count = 0
database_titles = []
for material in Material.objects.all():
if material.organisms:
if "Danio rerio" in material.organisms:
database_organism_count += 1
database_titles.append(material.title)
self.assertEqual(organism_count, database_organism_count)
for title in material_titles:
self.assertTrue(title in database_titles)
def test_filter_on_organisms_retrieves_all_materials_with_multiple_organisms(self):
self.client.force_authenticate(user=self.primary_prof)
# Search with one organism name
search_url = (
reverse("search-materials-list")
+ "?organisms="
+ "danio rerio"
+ "&organisms="
+ "mus musculus"
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organism_count = int(response.json()["count"])
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
database_organism_count = 0
database_titles = []
for material in Material.objects.all():
if material.organisms:
if ("Danio rerio" in material.organisms) or ("Mus musculus" in material.organisms):
database_organism_count += 1
database_titles.append(material.title)
self.assertEqual(organism_count, database_organism_count)
for title in material_titles:
self.assertTrue(title in database_titles)
def test_ordering_on_updated_at_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?ordering=" + "updated_at"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_dates = []
for material in response.json()["results"]:
date = datetime.datetime.strptime(
material["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z"
).date()
material_dates.append(date)
self.assertEqual(material_dates, sorted(material_dates))
def test_combine_search_and_filter_and_ordering_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = (
reverse("search-materials-list")
+ "?search=MODEL_ORGANISM"
+ "ordering=updated_at"
+ "has_pre_print=true"
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_dates = []
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
date = datetime.datetime.strptime(
material["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z"
).date()
material_dates.append(date)
self.assertEqual(material_dates, sorted(material_dates))
for title in material_titles:
self.assertTrue(
Material.objects.filter(
title=title, category="MODEL_ORGANISM", has_pre_print=True
).exists()
)
def test_facets_return_number_of_materials(self):
self.client.force_authenticate(user=self.primary_prof)
# Search with no params
search_url = reverse("search-materials-list")
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
model_organism_count = int(response.json()["facets"]["category"]["MODEL_ORGANISM"])
self.assertEqual(
model_organism_count, len(Material.objects.filter(category="MODEL_ORGANISM"))
)
# Search for only danio rerio organisms
search_url = reverse("search-materials-list") + "?search=danio rerio"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
model_organism_count = int(response.json()["facets"]["category"]["MODEL_ORGANISM"])
database_count = 0
for material in Material.objects.all():
if material.organisms:
if ("Danio rerio" in material.organisms) and (
material.category == "MODEL_ORGANISM"
):
database_count += 1
self.assertEqual(model_organism_count, database_count)
def test_empty_search_returns_no_results(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?search="
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_count = int(response.json()["count"])
self.assertEqual(material_count, 0)
class SearchUsersEndpointTestCase(APITestCase):
"""
Tests /search/users operations.
"""
@classmethod
def setUpClass(cls):
super(SearchUsersEndpointTestCase, cls).setUpClass()
populate_dev_database()
# Put newly created materials in the search index
call_command("search_index", "-f", "--rebuild")
cls.primary_prof = User.objects.get(username="PrimaryProf")
@classmethod
def tearDownClass(cls):
super(SearchUsersEndpointTestCase, cls).tearDownClass()
# Rebuild search index with what's actaully in the django database
call_command("search_index", "-f", "--rebuild")
def test_search_for_name_returns_given_user(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = (
reverse("search-users-list")
+ "?search="
+ self.primary_prof.first_name
+ " "
+ self.primary_prof.last_name
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_result_id = response.json()["results"][0]["id"]
self.assertEqual(first_result_id, str(self.primary_prof.id))
def test_order_by_published_name_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-users-list") + "?ordering=published_name"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_published_names = []
for user in response.json()["results"]:
if user["published_name"]:
user_published_names.append(user["published_name"])
self.assertEqual(user_published_names, sorted(user_published_names))
def test_empty_search_returns_no_results(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-users-list") + "?search="
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_count = int(response.json()["count"])
self.assertEqual(user_count, 0)
class SearchOrganizationsEndpointTestCase(APITestCase):
"""
Tests /search/organizations operations.
"""
@classmethod
def setUpClass(cls):
super(SearchOrganizationsEndpointTestCase, cls).setUpClass()
populate_dev_database()
# Put newly created materials in the search index
call_command("search_index", "-f", "--rebuild")
cls.primary_prof = User.objects.get(username="PrimaryProf")
cls.primary_lab = Organization.objects.get(name="PrimaryLab")
@classmethod
def tearDownClass(cls):
super(SearchOrganizationsEndpointTestCase, cls).tearDownClass()
# Rebuild search index with what's actaully in the django database
call_command("search_index", "-f", "--rebuild")
def test_search_for_organization_name_returns_given_organization(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-organizations-list") + "?search=" + self.primary_lab.name
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_result_id = int(response.json()["results"][0]["id"])
self.assertEqual(first_result_id, self.primary_lab.id)
def test_search_for_owner_attribute_returns_related_organizations(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-organizations-list") + "?search=" + self.primary_prof.email
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organization_count = int(response.json()["count"])
organization_names = []
for org in response.json()["results"]:
organization_names.append(org["name"])
self.assertEqual(
organization_count, len(Organization.objects.filter(owner=self.primary_prof))
)
for name in organization_names:
self.assertTrue(
Organization.objects.filter(name=name, owner=self.primary_prof).exists()
)
def test_ordering_on_updated_at_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-organizations-list") + "?ordering=" + "updated_at"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organization_dates = []
for org in response.json()["results"]:
date = datetime.datetime.strptime(org["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z").date()
organization_dates.append(date)
self.assertEqual(organization_dates, sorted(organization_dates))
| [((21, 8, 21, 31), 'resources_portal.management.commands.populate_dev_database.populate_dev_database', 'populate_dev_database', ({}, {}), '()', False, 'from resources_portal.management.commands.populate_dev_database import populate_dev_database\n'), ((24, 8, 24, 55), 'django.core.management.call_command', 'call_command', ({(24, 21, 24, 35): '"""search_index"""', (24, 37, 24, 41): '"""-f"""', (24, 43, 24, 54): '"""--rebuild"""'}, {}), "('search_index', '-f', '--rebuild')", False, 'from django.core.management import call_command\n'), ((26, 27, 26, 67), 'resources_portal.models.User.objects.get', 'User.objects.get', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((27, 29, 27, 71), 'resources_portal.models.User.objects.get', 'User.objects.get', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((28, 23, 28, 59), 'resources_portal.models.User.objects.get', 'User.objects.get', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((30, 26, 30, 69), 'resources_portal.models.Organization.objects.get', 'Organization.objects.get', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((32, 24, 32, 80), 'resources_portal.models.Material.objects.get', 'Material.objects.get', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((33, 24, 33, 80), 'resources_portal.models.Material.objects.get', 'Material.objects.get', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((40, 8, 40, 55), 'django.core.management.call_command', 'call_command', ({(40, 21, 40, 35): '"""search_index"""', (40, 37, 40, 41): '"""-f"""', (40, 43, 40, 54): '"""--rebuild"""'}, {}), "('search_index', '-f', '--rebuild')", False, 'from django.core.management import call_command\n'), ((56, 28, 56, 52), 'resources_portal.models.Material.objects.first', 'Material.objects.first', ({}, {}), '()', False, 'from resources_portal.models import Material, Organization, User\n'), ((127, 24, 127, 46), 'resources_portal.models.Material.objects.all', 'Material.objects.all', ({}, {}), '()', False, 'from resources_portal.models import Material, Organization, User\n'), ((161, 24, 161, 46), 'resources_portal.models.Material.objects.all', 'Material.objects.all', ({}, {}), '()', False, 'from resources_portal.models import Material, Organization, User\n'), ((225, 21, 225, 53), 'django.urls.reverse', 'reverse', ({(225, 29, 225, 52): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((245, 24, 245, 46), 'resources_portal.models.Material.objects.all', 'Material.objects.all', ({}, {}), '()', False, 'from resources_portal.models import Material, Organization, User\n'), ((276, 8, 276, 31), 'resources_portal.management.commands.populate_dev_database.populate_dev_database', 'populate_dev_database', ({}, {}), '()', False, 'from resources_portal.management.commands.populate_dev_database import populate_dev_database\n'), ((279, 8, 279, 55), 'django.core.management.call_command', 'call_command', ({(279, 21, 279, 35): '"""search_index"""', (279, 37, 279, 41): '"""-f"""', (279, 43, 279, 54): '"""--rebuild"""'}, {}), "('search_index', '-f', '--rebuild')", False, 'from django.core.management import call_command\n'), ((281, 27, 281, 67), 'resources_portal.models.User.objects.get', 'User.objects.get', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((288, 8, 288, 55), 'django.core.management.call_command', 'call_command', ({(288, 21, 288, 35): '"""search_index"""', (288, 37, 288, 41): '"""-f"""', (288, 43, 288, 54): '"""--rebuild"""'}, {}), "('search_index', '-f', '--rebuild')", False, 'from django.core.management import call_command\n'), ((345, 8, 345, 31), 'resources_portal.management.commands.populate_dev_database.populate_dev_database', 'populate_dev_database', ({}, {}), '()', False, 'from resources_portal.management.commands.populate_dev_database import populate_dev_database\n'), ((348, 8, 348, 55), 'django.core.management.call_command', 'call_command', ({(348, 21, 348, 35): '"""search_index"""', (348, 37, 348, 41): '"""-f"""', (348, 43, 348, 54): '"""--rebuild"""'}, {}), "('search_index', '-f', '--rebuild')", False, 'from django.core.management import call_command\n'), ((350, 27, 350, 67), 'resources_portal.models.User.objects.get', 'User.objects.get', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((351, 26, 351, 69), 'resources_portal.models.Organization.objects.get', 'Organization.objects.get', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((358, 8, 358, 55), 'django.core.management.call_command', 'call_command', ({(358, 21, 358, 35): '"""search_index"""', (358, 37, 358, 41): '"""-f"""', (358, 43, 358, 54): '"""--rebuild"""'}, {}), "('search_index', '-f', '--rebuild')", False, 'from django.core.management import call_command\n'), ((237, 21, 237, 53), 'django.urls.reverse', 'reverse', ({(237, 29, 237, 52): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((257, 21, 257, 53), 'django.urls.reverse', 'reverse', ({(257, 29, 257, 52): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((311, 21, 311, 49), 'django.urls.reverse', 'reverse', ({(311, 29, 311, 48): '"""search-users-list"""'}, {}), "('search-users-list')", False, 'from django.urls import reverse\n'), ((326, 21, 326, 49), 'django.urls.reverse', 'reverse', ({(326, 29, 326, 48): '"""search-users-list"""'}, {}), "('search-users-list')", False, 'from django.urls import reverse\n'), ((45, 21, 45, 53), 'django.urls.reverse', 'reverse', ({(45, 29, 45, 52): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((92, 21, 92, 53), 'django.urls.reverse', 'reverse', ({(92, 29, 92, 52): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((103, 45, 103, 95), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((114, 21, 114, 53), 'django.urls.reverse', 'reverse', ({(114, 29, 114, 52): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((175, 21, 175, 53), 'django.urls.reverse', 'reverse', ({(175, 29, 175, 52): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((233, 38, 233, 88), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((363, 21, 363, 57), 'django.urls.reverse', 'reverse', ({(363, 29, 363, 56): '"""search-organizations-list"""'}, {}), "('search-organizations-list')", False, 'from django.urls import reverse\n'), ((375, 21, 375, 57), 'django.urls.reverse', 'reverse', ({(375, 29, 375, 56): '"""search-organizations-list"""'}, {}), "('search-organizations-list')", False, 'from django.urls import reverse\n'), ((387, 36, 387, 88), 'resources_portal.models.Organization.objects.filter', 'Organization.objects.filter', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((398, 21, 398, 57), 'django.urls.reverse', 'reverse', ({(398, 29, 398, 56): '"""search-organizations-list"""'}, {}), "('search-organizations-list')", False, 'from django.urls import reverse\n'), ((63, 12, 63, 44), 'django.urls.reverse', 'reverse', ({(63, 20, 63, 43): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((182, 19, 184, 13), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(183, 16, 183, 38): "material['updated_at']", (183, 40, 183, 64): '"""%Y-%m-%dT%H:%M:%S.%f%z"""'}, {}), "(material['updated_at'], '%Y-%m-%dT%H:%M:%S.%f%z')", False, 'import datetime\n'), ((193, 12, 193, 44), 'django.urls.reverse', 'reverse', ({(193, 20, 193, 43): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((207, 19, 209, 13), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(208, 16, 208, 38): "material['updated_at']", (208, 40, 208, 64): '"""%Y-%m-%dT%H:%M:%S.%f%z"""'}, {}), "(material['updated_at'], '%Y-%m-%dT%H:%M:%S.%f%z')", False, 'import datetime\n'), ((405, 19, 405, 90), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(405, 46, 405, 63): "org['updated_at']", (405, 65, 405, 89): '"""%Y-%m-%dT%H:%M:%S.%f%z"""'}, {}), "(org['updated_at'], '%Y-%m-%dT%H:%M:%S.%f%z')", False, 'import datetime\n'), ((86, 16, 86, 83), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((107, 16, 107, 79), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((143, 12, 143, 44), 'django.urls.reverse', 'reverse', ({(143, 20, 143, 43): '"""search-materials-list"""'}, {}), "('search-materials-list')", False, 'from django.urls import reverse\n'), ((216, 16, 218, 17), 'resources_portal.models.Material.objects.filter', 'Material.objects.filter', (), '', False, 'from resources_portal.models import Material, Organization, User\n'), ((294, 12, 294, 40), 'django.urls.reverse', 'reverse', ({(294, 20, 294, 39): '"""search-users-list"""'}, {}), "('search-users-list')", False, 'from django.urls import reverse\n'), ((392, 16, 392, 79), 'resources_portal.models.Organization.objects.filter', 'Organization.objects.filter', (), '', False, 'from resources_portal.models import Material, Organization, User\n')] |
hboshnak/mindarmour | mindarmour/utils/logger.py | 0609a4eaea875a84667bed279add9305752880cc | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Util for log module. """
import logging
_LOGGER = logging.getLogger('MA')
def _find_caller():
"""
Bind findCaller() method, which is used to find the stack frame of the
caller so that we can note the source file name, line number and
function name.
"""
return _LOGGER.findCaller()
class LogUtil:
"""
Logging module.
Raises:
SyntaxError: If create this class.
"""
_instance = None
_logger = None
_extra_fmt = ' [%s] [%s] '
def __init__(self):
raise SyntaxError('can not instance, please use get_instance.')
@staticmethod
def get_instance():
"""
Get instance of class `LogUtil`.
Returns:
Object, instance of class `LogUtil`.
"""
if LogUtil._instance is None:
LogUtil._instance = object.__new__(LogUtil)
LogUtil._logger = _LOGGER
LogUtil._init_logger()
return LogUtil._instance
@staticmethod
def _init_logger():
"""
Initialize logger.
"""
LogUtil._logger.setLevel(logging.WARNING)
log_fmt = '[%(levelname)s] %(name)s(%(process)d:%(thread)d,' \
'%(processName)s):%(asctime)s%(message)s'
log_fmt = logging.Formatter(log_fmt)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_fmt)
# add the handlers to the logger
LogUtil._logger.handlers = []
LogUtil._logger.addHandler(console_handler)
LogUtil._logger.propagate = False
def set_level(self, level):
"""
Set the logging level of this logger, level must be an integer or a
string. Supported levels are 'NOTSET'(integer: 0), 'ERROR'(integer: 1-40),
'WARNING'('WARN', integer: 1-30), 'INFO'(integer: 1-20) and 'DEBUG'(integer: 1-10).
For example, if logger.set_level('WARNING') or logger.set_level(21), then
logger.warn() and logger.error() in scripts would be printed while running,
while logger.info() or logger.debug() would not be printed.
Args:
level (Union[int, str]): Level of logger.
"""
self._logger.setLevel(level)
def add_handler(self, handler):
"""
Add other handler supported by logging module.
Args:
handler (logging.Handler): Other handler supported by logging module.
Raises:
ValueError: If handler is not an instance of logging.Handler.
"""
if isinstance(handler, logging.Handler):
self._logger.addHandler(handler)
else:
raise ValueError('handler must be an instance of logging.Handler,'
' but got {}'.format(type(handler)))
def debug(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'DEBUG'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.debug(self._extra_fmt + msg, file_info, tag, *args)
def info(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'INFO'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.info(self._extra_fmt + msg, file_info, tag, *args)
def warn(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'WARNING'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.warning(self._extra_fmt + msg, file_info, tag, *args)
def error(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'ERROR'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.error(self._extra_fmt + msg, file_info, tag, *args)
| [((17, 10, 17, 33), 'logging.getLogger', 'logging.getLogger', ({(17, 28, 17, 32): '"""MA"""'}, {}), "('MA')", False, 'import logging\n'), ((66, 18, 66, 44), 'logging.Formatter', 'logging.Formatter', ({(66, 36, 66, 43): 'log_fmt'}, {}), '(log_fmt)', False, 'import logging\n'), ((69, 26, 69, 49), 'logging.StreamHandler', 'logging.StreamHandler', ({}, {}), '()', False, 'import logging\n')] |
teodoramilcheva/softuni-software-engineering | Python/Programming Basics/Simple Calculations/17. Daily Earnings.py | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | workdays = float(input())
daily_tips = float(input())
exchange_rate = float(input())
salary = workdays * daily_tips
annual_income = salary * 12 + salary * 2.5
net_income = annual_income - annual_income * 25 / 100
result = net_income / 365 * exchange_rate
print('%.2f' % result)
| [] |
satya77/transformer_rankers | bert_rerannker_eval.py | 0d2c20bd26041d887fb65102020a0b609ec967fc | from transformer_rankers.trainers import transformer_trainer
from transformer_rankers.datasets import dataset, preprocess_scisumm_ranked
from transformer_rankers.eval import results_analyses_tools
from transformers import BertTokenizer, BertForSequenceClassification
from sacred.observers import FileStorageObserver
from sacred import Experiment
import numpy as np
import torch
import pandas as pd
import argparse
import logging
import sys
ex = Experiment('BERT-ranker experiment')
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.StreamHandler(sys.stdout)
]
)
@ex.main
def run_experiment(args):
args.run_id = str(ex.current_run._id)
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
train, valid, test = preprocess_scisumm_ranked.transform_to_dfs(
args.path_to_ranked_file,args.path_to_ranked_test,args.path_to_ranked_dev)
# Choose the negative candidate sampler
ns_train=None
ns_val=None
# Create the loaders for the datasets, with the respective negative samplers
dataloader = dataset.QueryDocumentDataLoader(train, valid, test,
tokenizer, ns_train, ns_val,
'classification', args.val_batch_size,
args.val_batch_size, 512,
0, args.data_folder + "/scisumm_ranked")
with_ranked_list=True
train_loader, val_loader, test_loader = dataloader.get_pytorch_dataloaders(with_ranked_list)
# Instantiate transformer model to be used
model = BertForSequenceClassification.from_pretrained('bert-base-cased')
model.resize_token_embeddings(len(dataloader.tokenizer))
e = torch.load(args.model_dir)
model.load_state_dict(e)
model.eval()
# Instantiate trainer that handles fitting.
trainer = transformer_trainer.TransformerTrainer(model, train_loader, val_loader, test_loader,
0, "classification", tokenizer,
False, 0,
0 ,0, 0)
# Predict for test
logging.info("Predicting")
preds, labels, doc_ids, all_queries, preds_without_acc = trainer.test()
# res = results_analyses_tools.evaluate_and_aggregate(preds, labels, ['R_10@1',
# 'R_10@2',
# 'R_10@5',
# 'R_2@1',
# 'accuracy_0.3',
# 'accuracy_0.3_upto_1',
# 'precision_0.3',
# 'recall_0.3',
# 'f_score_0.3',
# 'accuracy_0.4',
# 'accuracy_0.4_upto_1',
# 'precision_0.4',
# 'recall_0.4',
# 'f_score_0.4',
# 'accuracy_0.5',
# 'accuracy_0.5_upto_1',
# 'precision_0.5',
# 'recall_0.5',
# 'f_score_0.5'
# ])
# for metric, v in res.items():
# logging.info("Test {} : {:4f}".format(metric, v))
# # Saving predictions and labels to a file
# max_preds_column = max([len(l) for l in preds])
# preds_df = pd.DataFrame(preds, columns=["prediction_" + str(i) for i in range(max_preds_column)])
# preds_df.to_csv(args.output_dir + "/" + args.run_id + "/predictions.csv", index=False)
#
# labels_df = pd.DataFrame(labels, columns=["label_" + str(i) for i in range(max_preds_column)])
# labels_df.to_csv(args.output_dir + "/" + args.run_id + "/labels.csv", index=False)
# # predict on the test set
# preds, labels, doc_ids, all_queries, preds_without_acc = trainer.test()
new_preds=list((np.array(preds_without_acc)> 0.4).astype(int))
d = {'query': all_queries, 'doc_id': doc_ids,'label': new_preds, 'similiarity':preds_without_acc}
df_doc_ids = pd.DataFrame(d)
import pdb
pdb.set_trace()
df_doc_ids = df_doc_ids.groupby('query').agg(list).reset_index()
# df_doc_ids_ones = df_doc_ids[df_doc_ids['label']==1]
# df_doc_ids_ones = df_doc_ids_ones.groupby('query').agg(list).reset_index()
# df_doc_ids_non_ones = df_doc_ids.groupby('query').agg(list).reset_index()
# new_df=[]
# for i,row in df_doc_ids_non_ones.iterrows():
# if all([v == 0 for v in row['label']]):
# highest_value=[x for _, x in sorted(zip(row['similiarity'], row['doc_id']), key=lambda pair: pair[0])]
# highest_value_sim=[x for x in sorted(row['similiarity'])]
#
# row['label'] = [1]
# row[ 'doc_id'] = [highest_value[0]]
# row[ 'similiarity'] = [highest_value_sim[0]]
#
# new_df.append(row)
# result = pd.concat([df_doc_ids,pd.DataFrame(new_df)])
df_doc_ids.to_csv(args.output_dir + "/" + args.run_id + "/doc_ids_test_all_results.csv", index=False, sep='\t')
return trainer.best_ndcg
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_folder", default=None, type=str, required=True,
help="the folder containing data")
parser.add_argument("--model_dir", default=None, type=str, required=True,
help="the folder that the model is saved in.")
parser.add_argument("--val_batch_size", default=32, type=int, required=False,
help="Validation and test batch size.")
parser.add_argument("--path_to_ranked_file", default=None, type=str, required=False,
help="if there is a ranked file this will be the path to it. ")
parser.add_argument("--path_to_ranked_test", default=None, type=str, required=False,
help="if there is a ranked test file this will be the path to it. ")
parser.add_argument("--path_to_ranked_dev", default=None, type=str, required=False,
help="if there is a ranked test file this will be the path to it. ")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="the folder to output predictions")
args = parser.parse_args()
args.sacred_ex = ex
ex.observers.append(FileStorageObserver(args.output_dir))
ex.add_config({'args': args})
return ex.run()
if __name__ == "__main__":
main() | [((15, 5, 15, 41), 'sacred.Experiment', 'Experiment', ({(15, 16, 15, 40): '"""BERT-ranker experiment"""'}, {}), "('BERT-ranker experiment')", False, 'from sacred import Experiment\n'), ((30, 16, 30, 64), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', ({(30, 46, 30, 63): '"""bert-base-cased"""'}, {}), "('bert-base-cased')", False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((32, 25, 33, 82), 'transformer_rankers.datasets.preprocess_scisumm_ranked.transform_to_dfs', 'preprocess_scisumm_ranked.transform_to_dfs', ({(33, 8, 33, 32): 'args.path_to_ranked_file', (33, 33, 33, 57): 'args.path_to_ranked_test', (33, 58, 33, 81): 'args.path_to_ranked_dev'}, {}), '(args.path_to_ranked_file, args.\n path_to_ranked_test, args.path_to_ranked_dev)', False, 'from transformer_rankers.datasets import dataset, preprocess_scisumm_ranked\n'), ((41, 17, 45, 91), 'transformer_rankers.datasets.dataset.QueryDocumentDataLoader', 'dataset.QueryDocumentDataLoader', ({(41, 49, 41, 54): 'train', (41, 56, 41, 61): 'valid', (41, 63, 41, 67): 'test', (42, 49, 42, 58): 'tokenizer', (42, 60, 42, 68): 'ns_train', (42, 70, 42, 76): 'ns_val', (43, 49, 43, 65): '"""classification"""', (43, 67, 43, 86): 'args.val_batch_size', (44, 49, 44, 68): 'args.val_batch_size', (44, 70, 44, 73): '512', (45, 49, 45, 50): '0', (45, 52, 45, 90): "args.data_folder + '/scisumm_ranked'"}, {}), "(train, valid, test, tokenizer, ns_train,\n ns_val, 'classification', args.val_batch_size, args.val_batch_size, 512,\n 0, args.data_folder + '/scisumm_ranked')", False, 'from transformer_rankers.datasets import dataset, preprocess_scisumm_ranked\n'), ((50, 12, 50, 76), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', ({(50, 58, 50, 75): '"""bert-base-cased"""'}, {}), "('bert-base-cased')", False, 'from transformers import BertTokenizer, BertForSequenceClassification\n'), ((52, 8, 52, 34), 'torch.load', 'torch.load', ({(52, 19, 52, 33): 'args.model_dir'}, {}), '(args.model_dir)', False, 'import torch\n'), ((58, 14, 61, 61), 'transformer_rankers.trainers.transformer_trainer.TransformerTrainer', 'transformer_trainer.TransformerTrainer', ({(58, 53, 58, 58): 'model', (58, 60, 58, 72): 'train_loader', (58, 74, 58, 84): 'val_loader', (58, 86, 58, 97): 'test_loader', (59, 53, 59, 54): '0', (59, 56, 59, 72): '"""classification"""', (59, 74, 59, 83): 'tokenizer', (60, 53, 60, 58): 'False', (60, 60, 60, 61): '0', (61, 53, 61, 54): '0', (61, 56, 61, 57): '0', (61, 59, 61, 60): '0'}, {}), "(model, train_loader, val_loader,\n test_loader, 0, 'classification', tokenizer, False, 0, 0, 0, 0)", False, 'from transformer_rankers.trainers import transformer_trainer\n'), ((63, 4, 63, 30), 'logging.info', 'logging.info', ({(63, 17, 63, 29): '"""Predicting"""'}, {}), "('Predicting')", False, 'import logging\n'), ((102, 17, 102, 32), 'pandas.DataFrame', 'pd.DataFrame', ({(102, 30, 102, 31): 'd'}, {}), '(d)', True, 'import pandas as pd\n'), ((104, 4, 104, 19), 'pdb.set_trace', 'pdb.set_trace', ({}, {}), '()', False, 'import pdb\n'), ((130, 13, 130, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((150, 24, 150, 60), 'sacred.observers.FileStorageObserver', 'FileStorageObserver', ({(150, 44, 150, 59): 'args.output_dir'}, {}), '(args.output_dir)', False, 'from sacred.observers import FileStorageObserver\n'), ((21, 8, 21, 41), 'logging.StreamHandler', 'logging.StreamHandler', ({(21, 30, 21, 40): 'sys.stdout'}, {}), '(sys.stdout)', False, 'import logging\n'), ((99, 20, 99, 47), 'numpy.array', 'np.array', ({(99, 29, 99, 46): 'preds_without_acc'}, {}), '(preds_without_acc)', True, 'import numpy as np\n')] |
tonyfg/project_euler | python/p21.py | 3a9e6352a98faaa506056b42160c91bffe93838c | #Q: Evaluate the sum of all the amicable numbers under 10000.
#A: 31626
def divisor_sum(n):
return sum([i for i in xrange (1, n//2+1) if not n%i])
def sum_amicable(start, end):
sum = 0
for i in xrange(start, end):
tmp = divisor_sum(i)
if i == divisor_sum(tmp) and i != tmp:
sum += i+tmp
return sum/2 #each pair is found twice, so divide by 2 ;)
print sum_amicable(1,10000)
| [] |
Dysoncat/student-services-slas-chat-bot | check.py | 5d9c7105cef640c34018d260249b6a05b959e73f | import long_responses as long
# Returns the probability of a message matching the responses that we have
def messageProb(userMessage, recognizedWords, isSingleResponse=False, requiredWords=[]):
messageCertainty = 0
hasRequiredWords = True
# Counts how many words are present in each predefined message
for word in userMessage:
if word in recognizedWords:
messageCertainty += 1
# Calculates the percent of recognized words in a user message
percentage = float(messageCertainty) / float(len(recognizedWords))
# Checks that the required words are in the string
for word in requiredWords:
if word not in userMessage:
hasRequiredWords = False
break
# Must either have the required words, or be a single response
if hasRequiredWords or isSingleResponse:
return int(percentage * 100)
else:
return 0
# Checks all the responses using the probability of the messages
def checkAllMesages(message):
highest_prob_list = {}
ignore_list = {}
def ignoreResponse(bot_response, list_of_words, single_response=False, required_words=[]):
nonlocal ignore_list
ignore_list[bot_response] = messageProb(
message, list_of_words, single_response, required_words)
# Simplifies response creation / adds it to the dict
def response(bot_response, list_of_words, single_response=False, required_words=[]):
nonlocal highest_prob_list
highest_prob_list[bot_response] = messageProb(
message, list_of_words, single_response, required_words)
# Responses -------------------------------------------------------------------------------------------------------
response('Hello!', ['hello', 'hi', 'hey',
'sup', 'heyo'], single_response=True)
response('See you!', ['bye', 'goodbye'], single_response=True)
response('I\'m doing fine, and you?', [
'how', 'are', 'you', 'doing'], required_words=['how', "you"])
response('You\'re welcome!', ['thank', 'thanks'], single_response=True)
response("You can borrow a computer from room 315", ["how", "do", "i", "borrow", "a", "computer"], required_words=["borrow", "computer"])
response("You can apply for a new locker key in room 310", ["how", "can", "i", "apply", "for", "a", "new", "locker", "key"], ["new", "locker", "key"])
response("The guidance office is on the third floor", [
"where", "is", "the", "guidance", "office"], required_words=["guidance", "office"])
response("You can apply for the ID in room 310", [
"how", "can", "i", "get", "new", "id"], ["new", "id"])
response("A student ID costs 25 RMB, and it has to be in cash", [
"how", "much", "does", "a", "new", "id", "cost"], ["id", "cost"])
response("The secondary computer classroom is on the fifth floor, and is number 521", [
"where", "is", "the", "secondary", "computer", "classroom"], ["secondary", "computer"])
response("Don't worry about it.", ["sorry", "sry"], ["sorry", "sry"])
# Ignored Responses
ignoreResponse("Good to hear", [
"i", "doing", "good", "fine", "ok"], required_words=["i", "good"])
best_ignore_match = max(ignore_list, key=ignore_list.get)
# Longer responses
response(long.R_ADVICE, ['give', 'advice'], required_words=['advice'])
response(long.R_EATING, ['what', 'you', 'eat'],
required_words=['you', 'eat'])
response(long.R_SWEARING, [
"fuck", "shit", "motherfucker", "fuck", "you"])
best_match = max(highest_prob_list, key=highest_prob_list.get)
# DEBUGGING TOOLS IF NEEDED
print(highest_prob_list)
print("")
print(
f'Best match = {best_match} | Score: {highest_prob_list[best_match]}')
if highest_prob_list[best_match] < ignore_list[best_ignore_match]:
return best_ignore_match
elif highest_prob_list[best_match] < 1:
return long.unknown()
else:
return best_match
| [((97, 15, 97, 29), 'long_responses.unknown', 'long.unknown', ({}, {}), '()', True, 'import long_responses as long\n')] |
jdalzatec/streamlit-manizales-tech-talks | image_predictor/utils.py | 619af5edc79a22ed4cc9f50dd2d0379399357549 | from io import StringIO
import numpy as np
from h5py import File
from keras.models import load_model as keras_load_model
from PIL import Image, ImageOps
def predict(image, model):
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# Replace this with the path to your image
image = Image.open(image)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
return prediction[0]
def read_labels(labels_file):
labels = []
lines = StringIO(labels_file.getvalue().decode()).readlines()
for line in lines:
_, *remaining = line.split()
label = " ".join(remaining).strip()
labels.append(label)
return labels
def load_model(model_file):
return keras_load_model(File(model_file))
| [((13, 11, 13, 63), 'numpy.ndarray', 'np.ndarray', (), '', True, 'import numpy as np\n'), ((15, 12, 15, 29), 'PIL.Image.open', 'Image.open', ({(15, 23, 15, 28): 'image'}, {}), '(image)', False, 'from PIL import Image, ImageOps\n'), ((19, 12, 19, 54), 'PIL.ImageOps.fit', 'ImageOps.fit', ({(19, 25, 19, 30): 'image', (19, 32, 19, 36): 'size', (19, 38, 19, 53): 'Image.ANTIALIAS'}, {}), '(image, size, Image.ANTIALIAS)', False, 'from PIL import Image, ImageOps\n'), ((22, 18, 22, 35), 'numpy.asarray', 'np.asarray', ({(22, 29, 22, 34): 'image'}, {}), '(image)', True, 'import numpy as np\n'), ((44, 28, 44, 44), 'h5py.File', 'File', ({(44, 33, 44, 43): 'model_file'}, {}), '(model_file)', False, 'from h5py import File\n')] |
emilywoods/docker-workshop | client/setup.py | 46fef25ed06ab33f653bebffdd837ee4cc31c373 | from setuptools import setup
setup(
name="workshop-client",
install_requires=["flask==1.1.1", "requests==2.22.0"],
python_requires=">=3.7",
classifiers=[
"Development Status :: 1 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| [((3, 0, 14, 1), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n')] |
Socian-Ltd/python-facebook-1 | tests/facebook/models/test_photo.py | e9a4f626b37541103c9534a29342ef6033c09c06 | import json
import unittest
import pyfacebook.models as models
class PhotoModelTest(unittest.TestCase):
BASE_PATH = "testdata/facebook/models/photos/"
with open(BASE_PATH + 'photo.json', 'rb') as f:
PHOTO_INFO = json.loads(f.read().decode('utf-8'))
def testPhoto(self):
m = models.Photo.new_from_json_dict(self.PHOTO_INFO)
self.assertEqual(m.id, "166370841591183")
self.assertEqual(m.album.id, "108824087345859")
self.assertEqual(len(m.images), 8)
self.assertEqual(m.webp_images[0].height, 800)
| [((14, 12, 14, 60), 'pyfacebook.models.Photo.new_from_json_dict', 'models.Photo.new_from_json_dict', ({(14, 44, 14, 59): 'self.PHOTO_INFO'}, {}), '(self.PHOTO_INFO)', True, 'import pyfacebook.models as models\n')] |
curanaj/airbyte-dbt-demo | airbyte-integrations/connectors/source-scaffold-source-python/source_scaffold_source_python/source.py | f6b8ccd8f8e57b7ea84caf814b14d836338e8007 | # MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from datetime import datetime
from typing import Dict, Generator
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
from airbyte_cdk.sources import Source
class SourceScaffoldSourcePython(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the integration
e.g: if a provided Stripe API token can be used to connect to the Stripe API.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
# Not Implemented
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {str(e)}")
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
"""
Returns an AirbyteCatalog representing the available streams and fields in this integration.
For example, given valid credentials to a Postgres database,
returns an Airbyte catalog where each postgres table is a stream, and each table column is a field.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteCatalog is an object describing a list of all available streams in this source.
A stream is an AirbyteStream object that includes:
- its stream name (or table name in the case of Postgres)
- json_schema providing the specifications of expected schema for this stream (a list of columns described
by their names and types)
"""
streams = []
stream_name = "TableName" # Example
json_schema = { # Example
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {"columnName": {"type": "string"}},
}
# Not Implemented
streams.append(AirbyteStream(name=stream_name, json_schema=json_schema))
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
"""
Returns a generator of the AirbyteMessages generated by reading the source with the given configuration,
catalog, and state.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:param catalog: The input catalog is a ConfiguredAirbyteCatalog which is almost the same as AirbyteCatalog
returned by discover(), but
in addition, it's been configured in the UI! For each particular stream and field, there may have been provided
with extra modifications such as: filtering streams and/or columns out, renaming some entities, etc
:param state: When a Airbyte reads data from a source, it might need to keep a checkpoint cursor to resume
replication in the future from that saved checkpoint.
This is the object that is provided with state from previous runs and avoid replicating the entire set of
data everytime.
:return: A generator that produces a stream of AirbyteRecordMessage contained in AirbyteMessage object.
"""
stream_name = "TableName" # Example
data = {"columnName": "Hello World"} # Example
# Not Implemented
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=stream_name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
| [((91, 15, 91, 46), 'airbyte_cdk.models.AirbyteCatalog', 'AirbyteCatalog', (), '', False, 'from airbyte_cdk.models import AirbyteCatalog, AirbyteConnectionStatus, AirbyteMessage, AirbyteRecordMessage, AirbyteStream, ConfiguredAirbyteCatalog, Status, Type\n'), ((58, 19, 58, 67), 'airbyte_cdk.models.AirbyteConnectionStatus', 'AirbyteConnectionStatus', (), '', False, 'from airbyte_cdk.models import AirbyteCatalog, AirbyteConnectionStatus, AirbyteMessage, AirbyteRecordMessage, AirbyteStream, ConfiguredAirbyteCatalog, Status, Type\n'), ((90, 23, 90, 79), 'airbyte_cdk.models.AirbyteStream', 'AirbyteStream', (), '', False, 'from airbyte_cdk.models import AirbyteCatalog, AirbyteConnectionStatus, AirbyteMessage, AirbyteRecordMessage, AirbyteStream, ConfiguredAirbyteCatalog, Status, Type\n'), ((122, 86, 122, 100), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
SimplyVC/panic | alerter/src/monitorables/nodes/chainlink_node.py | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | from datetime import datetime
from typing import Optional, Dict, List, Union
from schema import Schema, Or
from src.monitorables.nodes.node import Node
from src.utils.exceptions import InvalidDictSchemaException
class ChainlinkNode(Node):
def __init__(self, node_name: str, node_id: str, parent_id: str) -> None:
super().__init__(node_name, node_id, parent_id)
# Metrics
self._went_down_at_prometheus = None
self._current_height = None
self._total_block_headers_received = None
self._max_pending_tx_delay = None
self._process_start_time_seconds = None
self._total_gas_bumps = None
self._total_gas_bumps_exceeds_limit = None
self._no_of_unconfirmed_txs = None
self._total_errored_job_runs = None
self._current_gas_price_info = {
'percentile': None,
'price': None,
}
self._eth_balance_info = {}
# This variable stores the url of the source used to get prometheus node
# data. Note that this had to be done because multiple prometheus
# sources can be associated with the same node, where at the same time
# only one source is available, and sources switch from time to time.
self._last_prometheus_source_used = None
# This stores the timestamp of the last successful monitoring round.
self._last_monitored_prometheus = None
@property
def is_down_prometheus(self) -> bool:
return self._went_down_at_prometheus is not None
@property
def went_down_at_prometheus(self) -> Optional[float]:
return self._went_down_at_prometheus
@property
def current_height(self) -> Optional[int]:
return self._current_height
@property
def total_block_headers_received(self) -> Optional[int]:
return self._total_block_headers_received
@property
def max_pending_tx_delay(self) -> Optional[int]:
return self._max_pending_tx_delay
@property
def process_start_time_seconds(self) -> Optional[float]:
return self._process_start_time_seconds
@property
def total_gas_bumps(self) -> Optional[int]:
return self._total_gas_bumps
@property
def total_gas_bumps_exceeds_limit(self) -> Optional[int]:
return self._total_gas_bumps_exceeds_limit
@property
def no_of_unconfirmed_txs(self) -> Optional[int]:
return self._no_of_unconfirmed_txs
@property
def total_errored_job_runs(self) -> Optional[int]:
return self._total_errored_job_runs
@property
def current_gas_price_info(self) -> Dict[str, Optional[float]]:
return self._current_gas_price_info
@property
def eth_balance_info(self) -> Dict[str, Union[str, float]]:
return self._eth_balance_info
@property
def last_prometheus_source_used(self) -> Optional[str]:
return self._last_prometheus_source_used
@property
def last_monitored_prometheus(self) -> Optional[float]:
return self._last_monitored_prometheus
@staticmethod
def get_int_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing integer prometheus
: metrics.
"""
return [
'current_height',
'total_block_headers_received',
'max_pending_tx_delay', 'total_gas_bumps',
'total_gas_bumps_exceeds_limit', 'no_of_unconfirmed_txs',
'total_errored_job_runs'
]
@staticmethod
def get_float_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing float prometheus
: metrics.
"""
return [
'went_down_at_prometheus', 'process_start_time_seconds',
'last_monitored_prometheus'
]
@staticmethod
def get_dict_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing dict prometheus
: metrics.
"""
return ['current_gas_price_info', 'eth_balance_info']
@staticmethod
def get_str_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing string prometheus
: metrics.
"""
return ['last_prometheus_source_used']
def get_all_prometheus_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing prometheus metrics
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [
*str_prometheus_metric_attributes,
*int_prometheus_metric_attributes,
*float_prometheus_metric_attributes,
*dict_prometheus_metric_attributes
]
def get_int_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing int metrics.
"""
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
return [*int_prometheus_metric_attributes]
def get_float_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing float metrics.
"""
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
return [*float_prometheus_metric_attributes]
def get_dict_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing dict metrics.
"""
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [*dict_prometheus_metric_attributes]
def get_str_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing str metrics.
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
return [*str_prometheus_metric_attributes]
def get_all_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing metrics
"""
prometheus_metric_attributes = \
self.get_all_prometheus_metric_attributes()
return [*prometheus_metric_attributes]
def set_went_down_at_prometheus(
self, went_down_at_prometheus: Optional[float]) -> None:
self._went_down_at_prometheus = went_down_at_prometheus
def set_prometheus_as_down(self, downtime: Optional[float]) -> None:
"""
This function sets the node's prometheus interface as down. It sets the
time that the interface was initially down to the parameter 'downtime'
if it is not None, otherwise it sets it to the current timestamp.
:param downtime:
:return:
"""
if downtime is None:
self.set_went_down_at_prometheus(datetime.now().timestamp())
else:
self.set_went_down_at_prometheus(downtime)
def set_prometheus_as_up(self) -> None:
"""
This function sets a node's prometheus interface as up. A node's
interface is said to be up if went_down_at_prometheus is None.
:return: None
"""
self.set_went_down_at_prometheus(None)
def set_current_height(self, new_height: Optional[int]) -> None:
self._current_height = new_height
def set_total_block_headers_received(
self, new_total_block_headers_received: Optional[int]) -> None:
self._total_block_headers_received = new_total_block_headers_received
def set_max_pending_tx_delay(
self, new_max_pending_tx_delay: Optional[int]) -> None:
self._max_pending_tx_delay = new_max_pending_tx_delay
def set_process_start_time_seconds(
self, new_process_start_time_seconds: Optional[float]) -> None:
self._process_start_time_seconds = new_process_start_time_seconds
def set_total_gas_bumps(self, new_total_gas_bumps: Optional[int]) -> None:
self._total_gas_bumps = new_total_gas_bumps
def set_total_gas_bumps_exceeds_limit(
self, new_total_gas_bumps_exceeds_limit: Optional[int]) -> None:
self._total_gas_bumps_exceeds_limit = new_total_gas_bumps_exceeds_limit
def set_no_of_unconfirmed_txs(
self, new_no_of_unconfirmed_txs: Optional[int]) -> None:
self._no_of_unconfirmed_txs = new_no_of_unconfirmed_txs
def set_total_errored_job_runs(
self, new_total_errored_job_runs: Optional[int]) -> None:
self._total_errored_job_runs = new_total_errored_job_runs
def set_current_gas_price_info(self, new_percentile: Optional[float],
new_price: Optional[float]) -> None:
"""
This method sets the current_gas_price_info dict based on the new
percentile and price. This is done in this way to protect the Dict
schema.
:param new_percentile: The new percentile to be stored
:param new_price: The new gas to be stored
:return: None
"""
self._current_gas_price_info['percentile'] = new_percentile
self._current_gas_price_info['price'] = new_price
@staticmethod
def _new_eth_balance_info_valid(new_eth_balance_info: Dict) -> bool:
"""
This method checks that the new eth_balance_info dict obeys the required
schema.
:param new_eth_balance_info: The dict to check
:return: True if the dict obeys the required schema
: False otherwise
"""
schema = Schema(Or({
'address': str,
'balance': float,
'latest_usage': float,
}, {}))
return schema.is_valid(new_eth_balance_info)
def set_eth_balance_info(
self, new_eth_balance_info: Dict[str, Union[str, float]]) -> None:
"""
This method sets the new_eth_balance_info. It first checks that the new
dict obeys the required schema. If not, an InvalidDictSchemaException is
raised.
:param new_eth_balance_info: The new eth_balance_info to store.
:return: None
"""""
if self._new_eth_balance_info_valid(new_eth_balance_info):
self._eth_balance_info = new_eth_balance_info
else:
raise InvalidDictSchemaException('new_eth_balance_info')
def set_last_prometheus_source_used(
self, new_last_prometheus_source_used: Optional[str]) -> None:
self._last_prometheus_source_used = new_last_prometheus_source_used
def set_last_monitored_prometheus(
self, new_last_monitored_prometheus: Optional[float]) -> None:
self._last_monitored_prometheus = new_last_monitored_prometheus
def reset(self) -> None:
"""
This method resets all metrics to their initial state
:return: None
"""
self.set_went_down_at_prometheus(None)
self.set_current_height(None)
self.set_total_block_headers_received(None)
self.set_max_pending_tx_delay(None)
self.set_process_start_time_seconds(None)
self.set_total_gas_bumps(None)
self.set_total_gas_bumps_exceeds_limit(None)
self.set_no_of_unconfirmed_txs(None)
self.set_total_errored_job_runs(None)
self.set_current_gas_price_info(None, None)
self.set_eth_balance_info({})
self.set_last_prometheus_source_used(None)
self.set_last_monitored_prometheus(None)
| [((272, 24, 276, 14), 'schema.Or', 'Or', ({(272, 27, 276, 9): "{'address': str, 'balance': float, 'latest_usage': float}", (276, 11, 276, 13): '{}'}, {}), "({'address': str, 'balance': float, 'latest_usage': float}, {})", False, 'from schema import Schema, Or\n'), ((291, 18, 291, 68), 'src.utils.exceptions.InvalidDictSchemaException', 'InvalidDictSchemaException', ({(291, 45, 291, 67): '"""new_eth_balance_info"""'}, {}), "('new_eth_balance_info')", False, 'from src.utils.exceptions import InvalidDictSchemaException\n'), ((208, 45, 208, 59), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
petrapoklukar/DCA | experiments/vgg16/VGG16_utils.py | e5b3f3481433306a4b33e712272f8bbf5e9d05ce | import pickle
import numpy as np
import os
def _analyze_query_point_assignment(
query_data_dict: dict,
init_Rdata_dict: dict,
init_Edata_dict: dict,
num_R: int,
query_point_assignment_array: np.ndarray,
root: str,
n_points_to_copy=50,
):
"""
Analyzes and visualizes qDCA results.
:param query_data_dict: raw query data.
:param init_Rdata_dict: raw R data.
:param init_Edata_dict: raw E data.
:param num_R: total number of R points.
:param query_point_assignment_array: query point assignments results.
:param root: root directory of the experiment.
:param n_points_to_copy: number of images to save.
:return: accuracy of qDCA assignments; list of (R, query) points with same label;
list of (R, query) points with different label
"""
true_query_data_labels = query_data_dict["labels"]
assigned_R = query_point_assignment_array[
query_point_assignment_array[:, 1] < num_R, 1
]
assigned_E = query_point_assignment_array[
query_point_assignment_array[:, 1] >= num_R, 1
]
assigned_R_labels = init_Rdata_dict["labels"][assigned_R]
assigned_E_labels = init_Edata_dict["labels"][assigned_E - num_R]
assigned_query_data_labels = np.empty(
shape=query_point_assignment_array.shape[0]
).astype(np.int32)
assigned_query_data_labels[
query_point_assignment_array[:, 1] < num_R
] = assigned_R_labels
assigned_query_data_labels[
query_point_assignment_array[:, 1] >= num_R
] = assigned_E_labels
accuracy = (
true_query_data_labels == assigned_query_data_labels
).sum() / assigned_query_data_labels.shape[0]
same_label_idx = np.where(true_query_data_labels == assigned_query_data_labels)[0]
wrong_label_idx = np.where(true_query_data_labels != assigned_query_data_labels)[0]
correct_pairs = []
for i in query_point_assignment_array[same_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
wrong_pairs = []
for i in query_point_assignment_array[wrong_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
with open(
os.path.join(root, "logs", "analyzed_query_point_assignments.pkl"), "wb"
) as f:
pickle.dump(
{
"accuracy": accuracy,
"same_label_idx": same_label_idx,
"wrong_label_idx": wrong_label_idx,
"correct_pairs": correct_pairs,
"wrong_pairs": wrong_pairs,
"query_point_assignment_array": query_point_assignment_array,
},
f,
)
same_label_image_path = os.path.join(root, "visualization", "same_label_images")
wrong_label_image_path = os.path.join(root, "visualization", "wrong_label_images")
if not os.path.exists(wrong_label_image_path):
os.mkdir(wrong_label_image_path)
if not os.path.exists(same_label_image_path):
os.mkdir(same_label_image_path)
for i in range(n_points_to_copy):
query_image_path, init_image_path, query_label, init_label = correct_pairs[i]
path_to_copy = os.path.join(
same_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(init_image_path, path_to_copy))
path_to_copy2 = os.path.join(
same_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(query_image_path, path_to_copy2))
(
w_query_image_path,
w_init_image_path,
w_query_label,
w_init_label,
) = wrong_pairs[i]
path_to_copy_w = os.path.join(
wrong_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(w_query_label), str(w_init_label)
),
)
os.system("cp {0} {1}".format(w_init_image_path, path_to_copy_w))
path_to_copy_w2 = os.path.join(
wrong_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
i, w_query_label, w_init_label
),
)
os.system("cp {0} {1}".format(w_query_image_path, path_to_copy_w2))
return accuracy, correct_pairs, wrong_pairs
def _generate_query_sets(version: str, N: int = 5000):
"""
Generates query sets for qDCA experiment in Section 4.3.
:param version: either version1 (dogs vs kitchen utils) or version2 (random).
:param N: number of points to sample for R used in DCA.
"""
with open(f"representations/vgg16/{version}/Rfeatures.pkl", "rb") as f:
Rdata_v1 = pickle.load(f)
with open(f"representations/vgg16/{version}/Efeatures.pkl", "rb") as f:
Edata_v1 = pickle.load(f)
init_Ridxs = np.random.choice(
np.arange(len(Rdata_v1["feat_lin1"])), size=N, replace=False
)
query_Ridxs = np.setdiff1d(np.arange(len(Rdata_v1["feat_lin1"])), init_Ridxs)
init_Eidxs = np.random.choice(
np.arange(len(Edata_v1["feat_lin1"])), size=N, replace=False
)
query_Eidxs = np.setdiff1d(np.arange(len(Edata_v1["feat_lin1"])), init_Eidxs)
with open(f"representations/vgg16/{version}/sampled_Rfeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Rdata_v1["feat_lin1"][init_Ridxs],
"feat_lin2": Rdata_v1["feat_lin2"][init_Ridxs],
"labels": Rdata_v1["labels"][init_Ridxs],
"paths": np.array(Rdata_v1["paths"])[init_Ridxs],
"init_Ridx": init_Ridxs,
"query_Ridx": query_Ridxs,
},
f,
)
with open(f"representations/vgg16/{version}/sampled_Efeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Edata_v1["feat_lin1"][init_Eidxs],
"feat_lin2": Edata_v1["feat_lin2"][init_Eidxs],
"labels": Edata_v1["labels"][init_Eidxs],
"paths": np.array(Edata_v1["paths"])[init_Eidxs],
"init_Eidx": init_Eidxs,
"query_Eidx": query_Eidxs,
},
f,
)
with open(f"representations/vgg16/{version}/query_features.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": np.concatenate(
[
Rdata_v1["feat_lin1"][query_Ridxs],
Edata_v1["feat_lin1"][query_Eidxs],
]
),
"feat_lin2": np.concatenate(
[
Rdata_v1["feat_lin2"][query_Ridxs],
Edata_v1["feat_lin2"][query_Eidxs],
]
),
"labels": np.concatenate(
[Rdata_v1["labels"][query_Ridxs], Edata_v1["labels"][query_Eidxs]]
),
"paths": np.concatenate(
[
np.array(Rdata_v1["paths"])[query_Ridxs],
np.array(Edata_v1["paths"])[query_Eidxs],
]
),
"init_Eidxs": init_Eidxs,
"query_Eidxs": query_Eidxs,
"init_Ridxs": init_Ridxs,
"query_Ridxs": query_Ridxs,
},
f,
)
| [((113, 28, 113, 84), 'os.path.join', 'os.path.join', ({(113, 41, 113, 45): 'root', (113, 47, 113, 62): '"""visualization"""', (113, 64, 113, 83): '"""same_label_images"""'}, {}), "(root, 'visualization', 'same_label_images')", False, 'import os\n'), ((114, 29, 114, 86), 'os.path.join', 'os.path.join', ({(114, 42, 114, 46): 'root', (114, 48, 114, 63): '"""visualization"""', (114, 65, 114, 85): '"""wrong_label_images"""'}, {}), "(root, 'visualization', 'wrong_label_images')", False, 'import os\n'), ((51, 21, 51, 83), 'numpy.where', 'np.where', ({(51, 30, 51, 82): '(true_query_data_labels == assigned_query_data_labels)'}, {}), '(true_query_data_labels == assigned_query_data_labels)', True, 'import numpy as np\n'), ((52, 22, 52, 84), 'numpy.where', 'np.where', ({(52, 31, 52, 83): '(true_query_data_labels != assigned_query_data_labels)'}, {}), '(true_query_data_labels != assigned_query_data_labels)', True, 'import numpy as np\n'), ((101, 8, 111, 9), 'pickle.dump', 'pickle.dump', ({(102, 12, 109, 13): "{'accuracy': accuracy, 'same_label_idx': same_label_idx, 'wrong_label_idx':\n wrong_label_idx, 'correct_pairs': correct_pairs, 'wrong_pairs':\n wrong_pairs, 'query_point_assignment_array': query_point_assignment_array}", (110, 12, 110, 13): 'f'}, {}), "({'accuracy': accuracy, 'same_label_idx': same_label_idx,\n 'wrong_label_idx': wrong_label_idx, 'correct_pairs': correct_pairs,\n 'wrong_pairs': wrong_pairs, 'query_point_assignment_array':\n query_point_assignment_array}, f)", False, 'import pickle\n'), ((115, 11, 115, 49), 'os.path.exists', 'os.path.exists', ({(115, 26, 115, 48): 'wrong_label_image_path'}, {}), '(wrong_label_image_path)', False, 'import os\n'), ((116, 8, 116, 40), 'os.mkdir', 'os.mkdir', ({(116, 17, 116, 39): 'wrong_label_image_path'}, {}), '(wrong_label_image_path)', False, 'import os\n'), ((118, 11, 118, 48), 'os.path.exists', 'os.path.exists', ({(118, 26, 118, 47): 'same_label_image_path'}, {}), '(same_label_image_path)', False, 'import os\n'), ((119, 8, 119, 39), 'os.mkdir', 'os.mkdir', ({(119, 17, 119, 38): 'same_label_image_path'}, {}), '(same_label_image_path)', False, 'import os\n'), ((172, 19, 172, 33), 'pickle.load', 'pickle.load', ({(172, 31, 172, 32): 'f'}, {}), '(f)', False, 'import pickle\n'), ((175, 19, 175, 33), 'pickle.load', 'pickle.load', ({(175, 31, 175, 32): 'f'}, {}), '(f)', False, 'import pickle\n'), ((37, 33, 39, 5), 'numpy.empty', 'np.empty', (), '', True, 'import numpy as np\n'), ((99, 8, 99, 74), 'os.path.join', 'os.path.join', ({(99, 21, 99, 25): 'root', (99, 27, 99, 33): '"""logs"""', (99, 35, 99, 73): '"""analyzed_query_point_assignments.pkl"""'}, {}), "(root, 'logs', 'analyzed_query_point_assignments.pkl')", False, 'import os\n'), ((216, 29, 221, 17), 'numpy.concatenate', 'np.concatenate', ({(217, 20, 220, 21): "[Rdata_v1['feat_lin1'][query_Ridxs], Edata_v1['feat_lin1'][query_Eidxs]]"}, {}), "([Rdata_v1['feat_lin1'][query_Ridxs], Edata_v1['feat_lin1'][\n query_Eidxs]])", True, 'import numpy as np\n'), ((222, 29, 227, 17), 'numpy.concatenate', 'np.concatenate', ({(223, 20, 226, 21): "[Rdata_v1['feat_lin2'][query_Ridxs], Edata_v1['feat_lin2'][query_Eidxs]]"}, {}), "([Rdata_v1['feat_lin2'][query_Ridxs], Edata_v1['feat_lin2'][\n query_Eidxs]])", True, 'import numpy as np\n'), ((228, 26, 230, 17), 'numpy.concatenate', 'np.concatenate', ({(229, 20, 229, 86): "[Rdata_v1['labels'][query_Ridxs], Edata_v1['labels'][query_Eidxs]]"}, {}), "([Rdata_v1['labels'][query_Ridxs], Edata_v1['labels'][\n query_Eidxs]])", True, 'import numpy as np\n'), ((193, 25, 193, 52), 'numpy.array', 'np.array', ({(193, 34, 193, 51): "Rdata_v1['paths']"}, {}), "(Rdata_v1['paths'])", True, 'import numpy as np\n'), ((206, 25, 206, 52), 'numpy.array', 'np.array', ({(206, 34, 206, 51): "Edata_v1['paths']"}, {}), "(Edata_v1['paths'])", True, 'import numpy as np\n'), ((233, 24, 233, 51), 'numpy.array', 'np.array', ({(233, 33, 233, 50): "Rdata_v1['paths']"}, {}), "(Rdata_v1['paths'])", True, 'import numpy as np\n'), ((234, 24, 234, 51), 'numpy.array', 'np.array', ({(234, 33, 234, 50): "Edata_v1['paths']"}, {}), "(Edata_v1['paths'])", True, 'import numpy as np\n')] |
Coldarra/RawFishSheep | back-end/RawFishSheep/app_cart/views.py | 266bd9d8d9832d5c692b63e7515d45fdc4f6acc4 | from .models import *
from decorator import *
from app_goods.views import getGoodsByID
# 查询当前用户所有的购物车信息
def getCartByUser(user_id=None):
if user_id == None:
raise ParamException()
return Cart.objects.filter(user_id=user_id)
def getSelectedCart(user_id=None):
if user_id == None:
raise ParamException()
return Cart.objects.filter(user_id=user_id, selection="1")
def getCartByGoods(user_id=None, goods_id=None):
if None in [user_id, goods_id]:
raise ParamException()
if Cart.objects.filter(user_id=user_id, goods_id=goods_id).count() <= 0:
raise RFSException("40012", "无效购物车商品")
return Cart.objects.get(user_id=user_id, goods_id=goods_id)
def checkCartByGoods(user_id, goods_id):
return Cart.objects.filter(user_id=user_id, goods_id=goods_id).count() > 0
def createCart(user_id=None, goods_id=None, amount=None):
if None in [user_id, goods_id, amount]:
raise ParamException()
if checkCartByGoods(user_id, goods_id):
appendToCart(user_id, goods_id, amount)
return Cart.objects.create(
user_id=user_id, goods_id=goods_id, amount=amount)
def appendToCart(user_id=None, goods_id=None, amount=None):
if None in [user_id, goods_id, amount]:
raise ParamException()
amount = int(amount)
if getGoodsByID(goods_id).remain < amount:
raise RFSException("40013", "商品余辆不足")
if checkCartByGoods(user_id, goods_id):
cart_obj = getCartByGoods(user_id, goods_id)
cart_obj.amount += amount
cart_obj.save()
return cart_obj
else:
return createCart(user_id, goods_id, amount)
def deleteCartByGoods(user_id=None, goods_id=None):
if None in [user_id, goods_id]:
raise ParamException()
Cart.objects.filter(user_id=user_id,
goods_id=goods_id).delete()
def deleteCartByUser(user_id=None):
if None in [user_id, goods_id]:
raise ParamException()
Cart.objects.filter(user_id=user_id).delete()
def deleteSelectedCart(user_id=None):
if user_id == None:
raise ParamException()
Cart.objects.filter(user_id=user_id, selection="1").delete()
def setCartAmount(user_id=None, goods_id=None, amount=None):
if None in [user_id, goods_id, amount]:
raise ParamException()
amount = int(amount)
cart = getCartByGoods(user_id, goods_id)
if amount <= 0:
raise RFSException("40033", "购物车商品数量非法")
cart.amount = amount
cart.save()
return cart
def setCartSelection(user_id=None, goods_id=None, selection=None):
# 检测参数是否合法
if None in [user_id, goods_id, selection]:
raise ParamException()
cart = getCartByGoods(user_id, goods_id)
# 检测商品状态是否合法
if cart.selection != "0" and cart.selection != "1":
raise RFSException("40033", "状态非法")
# 改变商品状态
cart.selection = selection
cart.save()
return cart
| [((46, 7, 46, 29), 'app_goods.views.getGoodsByID', 'getGoodsByID', ({(46, 20, 46, 28): 'goods_id'}, {}), '(goods_id)', False, 'from app_goods.views import getGoodsByID\n')] |
johannesgiorgis/my-timewarrior-extensions | extensions/catsum.py | 1a8b83359298d3cbf002148f02b5ef6f1693a797 | #!/usr/bin/env python3
###############################################################################
#
# Category Summaries
#
#
###############################################################################
import datetime
import io
import json
import logging
import pprint
import sys
from typing import Dict, Any
from dateutil import tz
# set logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create handler
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
LOG_FORMAT = "[%(asctime)s - %(levelname)-8s - %(module)s:%(name)s ] %(message)s"
c_format = logging.Formatter(LOG_FORMAT)
c_handler.setFormatter(c_format)
# Add handlers to the logger
logger.addHandler(c_handler)
DATE_FORMAT = "%Y%m%dT%H%M%SZ"
# TODO: Convert to defaultdict
# https://www.accelebrate.com/blog/using-defaultdict-python
# https://stackoverflow.com/questions/9358983/dictionaries-and-default-values
# https://docs.python.org/2/library/collections.html#collections.defaultdict
CATEGORIES: dict = {
"PT": "Personal Time",
"PW": "Planned Work",
"UW": "Unplanned Work",
"OW": "Other Work",
}
def main():
print("~" * 100)
totals = calculate_totals(sys.stdin)
# print(totals)
if not totals:
sys.exit(0)
categories_total = extract_categories(totals)
# All Categories Statistics
category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_category_breakdown = format_category_breakdown(category_percent_breakdown)
display_category_breakdown(formatted_category_breakdown)
# remove personal category
categories_total.pop("Personal Time", None)
work_category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_work_category_breakdown = format_category_breakdown(work_category_percent_breakdown)
display_category_breakdown(formatted_work_category_breakdown)
# formatted_category_breakdown.pop("Personal Time", None)
# formatted
# print(type(formatted_category_breakdown))
# print(formatted_category_breakdown.keys())
def format_seconds(seconds: int) -> str:
"""
Convert seconds to a formatted string
Convert seconds: 3661
To formatted: " 1:01:01"
"""
# print(seconds, type(seconds))
hours = seconds // 3600
minutes = seconds % 3600 // 60
seconds = seconds % 60
return f"{hours:4d}:{minutes:02d}:{seconds:02d}"
def calculate_totals(input_stream: io.TextIOWrapper) -> Dict[str, datetime.timedelta]:
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
# Extract the configuration settings.
header = 1
configuration = dict()
body = ""
for line in input_stream:
if header:
if line == "\n":
header = 0
else:
fields = line.strip().split(": ", 2)
if len(fields) == 2:
configuration[fields[0]] = fields[1]
else:
configuration[fields[0]] = ""
else:
body += line
# Sum the seconds tracked by tag
totals = dict()
untagged = None
j = json.loads(body)
for object in j:
start = datetime.datetime.strptime(object["start"], DATE_FORMAT)
if "end" in object:
end = datetime.datetime.strptime(object["end"], DATE_FORMAT)
else:
end = datetime.datetime.utcnow()
tracked = end - start
if "tags" not in object or object["tags"] == []:
if untagged is None:
untagged = tracked
else:
untagged += tracked
else:
for tag in object["tags"]:
if tag in totals:
totals[tag] += tracked
else:
totals[tag] = tracked
if "temp.report.start" not in configuration:
print("There is no data in the database")
return totals
start_utc = datetime.datetime.strptime(configuration["temp.report.start"], DATE_FORMAT)
start_utc = start_utc.replace(tzinfo=from_zone)
start = start_utc.astimezone(to_zone)
if "temp.report.end" in configuration:
end_utc = datetime.datetime.strptime(configuration["temp.report.end"], DATE_FORMAT)
end_utc = end_utc.replace(tzinfo=from_zone)
end = end_utc.astimezone(to_zone)
else:
end = datetime.datetime.now()
if len(totals) == 0 and untagged is None:
print(f"No data in the range {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
print(f"\nCategory Summary Data for {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
def extract_categories(totals: Dict[str, datetime.timedelta]) -> Dict[str, datetime.timedelta]:
categories_total = {}
for category, category_full_name in CATEGORIES.items():
categories_total[category_full_name] = totals.get(category, datetime.timedelta(0))
return categories_total
def get_category_percent_breakdown(
category_run_times: Dict[str, datetime.timedelta]
) -> Dict[str, Any]:
logger.debug("Getting category percentage breakdown...")
total_time = sum([run_time.total_seconds() for run_time in category_run_times.values()])
logger.debug(f"Total Time:{total_time}")
category_percentage_breakdown: dict = {}
for category, run_time in category_run_times.items():
category_percent = run_time.total_seconds() / total_time
category_percentage_breakdown[category] = {
"percent": category_percent,
"duration": run_time.total_seconds() / 60,
"run_time": format_seconds(int(run_time.total_seconds())),
}
# add total time statistics
category_percentage_breakdown["Total"] = {
"percent": total_time / total_time,
"duration": total_time / 60,
"run_time": format_seconds(int(total_time)),
}
logger.debug(pprint.pformat(category_percentage_breakdown))
return category_percentage_breakdown
def format_category_breakdown(category_breakdown: dict) -> Dict[str, Any]:
# print(type(category_breakdown))
# pprint.pprint(category_breakdown)
formatted_category_breakdown = {}
for category, category_statistics in category_breakdown.items():
formatted_category_breakdown[category] = {
# convert duration to mins
"duration": round(category_statistics["duration"], 2),
"percent": round(category_statistics["percent"] * 100, 2),
"run_time": category_statistics["run_time"],
}
return formatted_category_breakdown
def display_category_breakdown(category_breakdown: dict, title: str = "Category Breakdown"):
# Determine largest width
max_width = len("Category")
for category_statistics in category_breakdown.values():
if len(category_statistics) > max_width:
max_width = len(category_statistics)
print_dotted_line()
print(f"\t\t{title.capitalize():>{max_width}}")
print(
f"{'Category':{max_width}}\t"
f"{'Duration':{max_width}}\t"
f"{'Run_Time':>{max_width + 2}}\t"
f"{'Percent':{max_width + 1}}"
)
for category, category_statistics in category_breakdown.items():
print(
f"{category:{max_width}}\t"
f"{category_statistics['duration']:{max_width}}\t"
f"{category_statistics['run_time']:}\t"
f"{category_statistics['percent']}%"
)
print_dotted_line()
def print_dotted_line(width: int = 72):
"""Print a dotted (rather 'dashed') line"""
print("-" * width)
if __name__ == "__main__":
main()
| [((22, 9, 22, 36), 'logging.getLogger', 'logging.getLogger', ({(22, 27, 22, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((26, 12, 26, 35), 'logging.StreamHandler', 'logging.StreamHandler', ({}, {}), '()', False, 'import logging\n'), ((31, 11, 31, 40), 'logging.Formatter', 'logging.Formatter', ({(31, 29, 31, 39): 'LOG_FORMAT'}, {}), '(LOG_FORMAT)', False, 'import logging\n'), ((93, 16, 93, 26), 'dateutil.tz.tzutc', 'tz.tzutc', ({}, {}), '()', False, 'from dateutil import tz\n'), ((94, 14, 94, 26), 'dateutil.tz.tzlocal', 'tz.tzlocal', ({}, {}), '()', False, 'from dateutil import tz\n'), ((116, 8, 116, 24), 'json.loads', 'json.loads', ({(116, 19, 116, 23): 'body'}, {}), '(body)', False, 'import json\n'), ((143, 16, 143, 91), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(143, 43, 143, 77): "configuration['temp.report.start']", (143, 79, 143, 90): 'DATE_FORMAT'}, {}), "(configuration['temp.report.start'], DATE_FORMAT)", False, 'import datetime\n'), ((57, 8, 57, 19), 'sys.exit', 'sys.exit', ({(57, 17, 57, 18): '(0)'}, {}), '(0)', False, 'import sys\n'), ((118, 16, 118, 72), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(118, 43, 118, 58): "object['start']", (118, 60, 118, 71): 'DATE_FORMAT'}, {}), "(object['start'], DATE_FORMAT)", False, 'import datetime\n'), ((148, 18, 148, 91), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(148, 45, 148, 77): "configuration['temp.report.end']", (148, 79, 148, 90): 'DATE_FORMAT'}, {}), "(configuration['temp.report.end'], DATE_FORMAT)", False, 'import datetime\n'), ((152, 14, 152, 37), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((193, 17, 193, 62), 'pprint.pformat', 'pprint.pformat', ({(193, 32, 193, 61): 'category_percentage_breakdown'}, {}), '(category_percentage_breakdown)', False, 'import pprint\n'), ((121, 18, 121, 72), 'datetime.datetime.strptime', 'datetime.datetime.strptime', ({(121, 45, 121, 58): "object['end']", (121, 60, 121, 71): 'DATE_FORMAT'}, {}), "(object['end'], DATE_FORMAT)", False, 'import datetime\n'), ((123, 18, 123, 44), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n'), ((166, 68, 166, 89), 'datetime.timedelta', 'datetime.timedelta', ({(166, 87, 166, 88): '0'}, {}), '(0)', False, 'import datetime\n')] |
jnascimentocode/REST-API-COM-PYTHON-E-FLASK | resources/hotel.py | c55dca53f3a864c6c1aba8bbde63dcadc3c19347 | from typing import ParamSpecArgs
from flask_restful import Resource, reqparse
from models.hotel import HotelModel
from flask_jwt_extended import jwt_required
from models.site import SiteModel
from resources.filtros import *
import sqlite3
path_params = reqparse.RequestParser()
path_params.add_argument('cidade', type=str)
path_params.add_argument('estrelas_min', type=float)
path_params.add_argument('estrelas_max', type=float)
path_params.add_argument('diaria_min', type=float)
path_params.add_argument('diaria_max', type=float)
path_params.add_argument('limit', type=float)
path_params.add_argument('offset', type=float)
class Hoteis(Resource):
def get(self):
connection = sqlite3.connect('banco.db')
cursor = connection.cursor()
dados = path_params.parse_args()
dados_validos = {chave:dados[chave] for chave in dados if dados[chave] is not None}
parametros = normalize_path_params(**dados_validos)
if not parametros.get('cidade'):
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_sem_cidade, tupla)
else:
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_com_cidade, tupla)
hoteis = []
for linha in resultado:
hoteis.append({
'hotel_id': linha[0],
'nome': linha[1],
'estrelas': linha[2],
'diaria': linha[3],
'cidade': linha[4],
'site_id': linha[5]
})
return {'hoteis': hoteis}
class Hotel(Resource):
argumentos = reqparse.RequestParser()
argumentos.add_argument('nome', type=str, required=True, help="The field 'nome' cannot be left blank")
argumentos.add_argument('estrelas', type=float, required=True, help="The field 'estrelas' cannot be left blank")
argumentos.add_argument('diaria')
argumentos.add_argument('cidade')
argumentos.add_argument('site_id', type=int, required=True, help="Every hotel needs to be linked with site")
def get(self, hotel_id):
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
return hotel.json()
return {'message': 'Hotel not found.'}, 404
@jwt_required()
def post(self, hotel_id):
if HotelModel.find_hotel(hotel_id):
return {"message": "Hotel id '{}' already exists.".format(hotel_id)}, 400
dados = Hotel.argumentos.parse_args()
hotel = HotelModel(hotel_id, **dados)
if not SiteModel.find_by_id(dados.get('site_id')):
return {'message': 'The hotel must be associated to a valid site id'}, 400
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json()
@jwt_required()
def put(self, hotel_id):
dados = Hotel.argumentos.parse_args()
hotel_encontrado = HotelModel.find_hotel(hotel_id)
if hotel_encontrado:
hotel_encontrado.update_hotel(**dados)
hotel_encontrado.save_hotel()
return hotel_encontrado.json(), 200
hotel = HotelModel(hotel_id, **dados)
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json(), 201 #created
@jwt_required()
def delete(self, hotel_id):
global hoteis
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
try:
hotel.delete_hotel()
except:
return {'message': 'An error occurred trying to delete hotel.'}, 500
return {'message': 'Hotel deleted.'}
return {'message': 'Hotel not found.'}, 404
| [((9, 14, 9, 38), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ({}, {}), '()', False, 'from flask_restful import Resource, reqparse\n'), ((51, 17, 51, 41), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ({}, {}), '()', False, 'from flask_restful import Resource, reqparse\n'), ((64, 5, 64, 19), 'flask_jwt_extended.jwt_required', 'jwt_required', ({}, {}), '()', False, 'from flask_jwt_extended import jwt_required\n'), ((82, 5, 82, 19), 'flask_jwt_extended.jwt_required', 'jwt_required', ({}, {}), '()', False, 'from flask_jwt_extended import jwt_required\n'), ((99, 5, 99, 19), 'flask_jwt_extended.jwt_required', 'jwt_required', ({}, {}), '()', False, 'from flask_jwt_extended import jwt_required\n'), ((21, 21, 21, 48), 'sqlite3.connect', 'sqlite3.connect', ({(21, 37, 21, 47): '"""banco.db"""'}, {}), "('banco.db')", False, 'import sqlite3\n'), ((59, 16, 59, 47), 'models.hotel.HotelModel.find_hotel', 'HotelModel.find_hotel', ({(59, 38, 59, 46): 'hotel_id'}, {}), '(hotel_id)', False, 'from models.hotel import HotelModel\n'), ((67, 11, 67, 42), 'models.hotel.HotelModel.find_hotel', 'HotelModel.find_hotel', ({(67, 33, 67, 41): 'hotel_id'}, {}), '(hotel_id)', False, 'from models.hotel import HotelModel\n'), ((71, 16, 71, 45), 'models.hotel.HotelModel', 'HotelModel', ({(71, 27, 71, 35): 'hotel_id'}, {}), '(hotel_id, **dados)', False, 'from models.hotel import HotelModel\n'), ((86, 27, 86, 58), 'models.hotel.HotelModel.find_hotel', 'HotelModel.find_hotel', ({(86, 49, 86, 57): 'hotel_id'}, {}), '(hotel_id)', False, 'from models.hotel import HotelModel\n'), ((92, 16, 92, 45), 'models.hotel.HotelModel', 'HotelModel', ({(92, 27, 92, 35): 'hotel_id'}, {}), '(hotel_id, **dados)', False, 'from models.hotel import HotelModel\n'), ((102, 16, 102, 47), 'models.hotel.HotelModel.find_hotel', 'HotelModel.find_hotel', ({(102, 38, 102, 46): 'hotel_id'}, {}), '(hotel_id)', False, 'from models.hotel import HotelModel\n')] |
dmgolembiowski/magic-wormhole | src/wormhole/__main__.py | d517a10282d5e56f300db462b1a6eec517202af7 | from __future__ import absolute_import, print_function, unicode_literals
if __name__ == "__main__":
from .cli import cli
cli.wormhole()
else:
# raise ImportError('this module should not be imported')
pass
| [] |
Eliezer-Beczi/CNDP | testing/berge_equilibrium_cndp.py | 73decdfaef1c9e546ad94dd7448c89078af27034 | import networkx as nx
import utils.connectivity_metrics as connectivity_metric
from platypus import NSGAII, EpsMOEA, NSGAIII, EpsNSGAII, Problem, Dominance, Subset, TournamentSelector, \
HypervolumeFitnessEvaluator, Archive
import statistics
import multiprocessing as mp
G = nx.read_adjlist("input/Ventresca/BarabasiAlbert_n500m1.txt")
k = 50
num_of_tests = 10
def get_pairwise_connectivity(exclude=None):
if exclude is None:
exclude = {}
S = set(exclude)
subgraph = nx.subgraph_view(G, filter_node=lambda n: n not in S)
return connectivity_metric.pairwise_connectivity(subgraph)
class CNDP(Problem):
def __init__(self):
super(CNDP, self).__init__(1, 1)
self.types[:] = Subset(list(G), k)
def evaluate(self, solution):
solution.objectives[0] = get_pairwise_connectivity(solution.variables[0])
class BergeDominance(Dominance):
def __init__(self):
super(BergeDominance, self).__init__()
def compare(self, x, y):
k1 = 0
k2 = 0
nodes_x = x.variables[0][:]
nodes_y = y.variables[0][:]
metric_x = x.objectives[0]
metric_y = y.objectives[0]
for i in range(k):
tmp = nodes_y[i]
nodes_y[i] = nodes_x[i]
if get_pairwise_connectivity(nodes_y) < metric_x:
k1 += 1
nodes_y[i] = tmp
for i in range(k):
tmp = nodes_x[i]
nodes_x[i] = nodes_y[i]
if get_pairwise_connectivity(nodes_x) < metric_y:
k2 += 1
nodes_x[i] = tmp
if k1 < k2:
return -1
elif k1 > k2:
return 1
else:
return 0
class BergeArchive(Archive):
def __init__(self):
super(BergeArchive, self).__init__(dominance=BergeDominance())
def get_critical_nodes():
algorithm = NSGAII(CNDP(), selector=TournamentSelector(dominance=BergeDominance()), archive=BergeArchive())
algorithm.run(1000)
fitness = algorithm.result[0].objectives[0]
print(fitness)
return fitness
if __name__ == '__main__':
pool = mp.Pool(mp.cpu_count())
samples = pool.starmap_async(get_critical_nodes, [() for _ in range(num_of_tests)]).get()
pool.close()
avg = sum(samples) / len(samples)
stdev = statistics.stdev(samples)
print(f"Average: {avg}")
print(f"Standard Deviation: {stdev}")
| [((8, 4, 8, 64), 'networkx.read_adjlist', 'nx.read_adjlist', ({(8, 20, 8, 63): '"""input/Ventresca/BarabasiAlbert_n500m1.txt"""'}, {}), "('input/Ventresca/BarabasiAlbert_n500m1.txt')", True, 'import networkx as nx\n'), ((18, 15, 18, 68), 'networkx.subgraph_view', 'nx.subgraph_view', (), '', True, 'import networkx as nx\n'), ((19, 11, 19, 62), 'utils.connectivity_metrics.pairwise_connectivity', 'connectivity_metric.pairwise_connectivity', ({(19, 53, 19, 61): 'subgraph'}, {}), '(subgraph)', True, 'import utils.connectivity_metrics as connectivity_metric\n'), ((92, 12, 92, 37), 'statistics.stdev', 'statistics.stdev', ({(92, 29, 92, 36): 'samples'}, {}), '(samples)', False, 'import statistics\n'), ((87, 19, 87, 33), 'multiprocessing.cpu_count', 'mp.cpu_count', ({}, {}), '()', True, 'import multiprocessing as mp\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.