repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
BrightLamp/PyLearningCodes | ros_tf_publisher.py | ed237528c41ab2a9832b88806732097ffae0a0ed | # encoding=utf-8
import rospy
import tf
if __name__ == '__main__':
rospy.init_node('py_tf_broadcaster')
br = tf.TransformBroadcaster()
x = 0.0
y = 0.0
z = 0.0
roll = 0
pitch = 0
yaw = 1.57
rate = rospy.Rate(1)
while not rospy.is_shutdown():
yaw = yaw + 0.1
roll = roll + 0.1
br.sendTransform((x, y, z),
tf.transformations.quaternion_from_euler(roll, pitch, yaw),
rospy.Time.now(),
"base_link",
"front_caster") # 发布base_link到link1的平移和翻转
rate.sleep()
| [((7, 4, 7, 40), 'rospy.init_node', 'rospy.init_node', ({(7, 20, 7, 39): '"""py_tf_broadcaster"""'}, {}), "('py_tf_broadcaster')", False, 'import rospy\n'), ((8, 9, 8, 34), 'tf.TransformBroadcaster', 'tf.TransformBroadcaster', ({}, {}), '()', False, 'import tf\n'), ((16, 11, 16, 24), 'rospy.Rate', 'rospy.Rate', ({(16, 22, 16, 23): '1'}, {}), '(1)', False, 'import rospy\n'), ((17, 14, 17, 33), 'rospy.is_shutdown', 'rospy.is_shutdown', ({}, {}), '()', False, 'import rospy\n'), ((21, 25, 21, 83), 'tf.transformations.quaternion_from_euler', 'tf.transformations.quaternion_from_euler', ({(21, 66, 21, 70): 'roll', (21, 72, 21, 77): 'pitch', (21, 79, 21, 82): 'yaw'}, {}), '(roll, pitch, yaw)', False, 'import tf\n'), ((22, 25, 22, 41), 'rospy.Time.now', 'rospy.Time.now', ({}, {}), '()', False, 'import rospy\n')] |
NightingaleV/bakalarska_prace-ann-algotrading | dataset_manager/technical_indicators.py | 07866e092cb527a7e1d9d7050790d9ffd611dc83 | # Imports
import numpy as np
class TechnicalIndicators:
cci_constant = 0.015
def __init__(self):
self.df = None
# Exponentially-weighted moving average
def ewma(self, periods):
indicator = 'EWMA{}'.format(periods)
self.df[indicator] = self.df['close'].ewm(span=periods).mean()
return self
# Stochastic Oscillator
def stochastic_oscilator(self, k_period, d_period, smooth=1):
lows = 'l{}'.format(k_period)
highs = 'h{}'.format(k_period)
self.df = self.calc_roll_min(self.df, k_period)
self.df = self.calc_roll_max(self.df, k_period)
self.df = self.stok(self.df, k_period)
if smooth >= 1:
self.df = self.smooth_stok(self.df, smooth)
self.df = self.stod(self.df, d_period)
self.df.drop([lows, highs], axis=1, inplace=True)
return self
@staticmethod
def calc_roll_min(dataset, k_period):
lows = 'l{}'.format(k_period)
dataset[lows] = dataset['low'].rolling(window=k_period).min()
return dataset
@staticmethod
def calc_roll_max(dataset, k_period):
highs = 'h{}'.format(k_period)
dataset[highs] = dataset['high'].rolling(window=k_period).max()
return dataset
@staticmethod
def stok(dataset, k_period):
lows = 'l{}'.format(k_period)
highs = 'h{}'.format(k_period)
dataset['%k'] = ((dataset['close'] - dataset[lows]) / (
dataset[highs] - dataset[lows])) * 100
return dataset
@staticmethod
def smooth_stok(dataset, smooth):
dataset['%k'] = dataset['%k'].rolling(window=smooth).mean()
return dataset
@staticmethod
def stod(dataset, d_period):
dataset['%d'] = dataset['%k'].rolling(window=d_period).mean()
return dataset
# RSI - Relative Strength Index
def rsi_indicator(self, period):
rsi = 'rsi{}'.format(period)
# Calculate differences between prices
deltas = np.diff(self.df['close'])
# For every row calculate rsi
for i, row in self.df.iterrows():
if i < period:
self.df.loc[i, rsi] = 0
else:
self.df.loc[i, rsi] = self.calc_rsi(i, period, deltas)
return self
@staticmethod
def calc_rsi(index, period, deltas):
seed = deltas[index - period:index]
average_gain = seed[seed >= 0].sum() / period
average_loss = seed[seed < 0].sum() / period
if abs(average_loss) == 0:
rs = 0
else:
rs = average_gain / abs(average_loss)
rsi = 100. - (100. / (1 + rs))
return rsi
| [((65, 17, 65, 42), 'numpy.diff', 'np.diff', ({(65, 25, 65, 41): "self.df['close']"}, {}), "(self.df['close'])", True, 'import numpy as np\n')] |
makutas/CocktailWebsite | users/models.py | c5192e5fc2b750a32500f5c3421ed07e89c9c7e1 | from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
user_description = models.CharField(max_length=200, null=True)
user_avatar = models.ImageField(null=True, blank=True)
user_uploaded_recipes = models.IntegerField() # Increment by 1 on upload
def __str__(self):
return f"{self.user.username}"
| [((6, 11, 6, 63), 'django.db.models.OneToOneField', 'models.OneToOneField', (), '', False, 'from django.db import models\n'), ((8, 23, 8, 66), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((9, 18, 9, 58), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import models\n'), ((10, 28, 10, 49), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n')] |
Samyak005/Multi-Hop-QG | deploy/trained_model.py | 15cc794a48ac9df058689c410007ea52b0e12a6a |
import torch
import logging
# Transformer version 4.9.1 - Newer versions may not work.
from transformers import AutoTokenizer
from trained_gpt_model import get_inference2
def t5_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def t5_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
# if __name__ == "__main__":
# review_text = "<answer> a fusional language <context> Typologically, Estonian represents a transitional form from an agglutinating language to a fusional language. The canonical word order is SVO (subject–verb–object)."
# t5_supp_inference(review_text, md2, device)
def get_inference(answer, context, model_name):
valuation_text = "<answer> " + answer + " <context> " + context
if model_name == 't5_supp':
return t5_supp_inference(valuation_text)
elif model_name == 't5_full':
return t5_full_inference(valuation_text)
elif model_name == 'bart_supp':
return bart_supp_inference(valuation_text)
elif model_name == 'bart_full':
return bart_full_inference(valuation_text)
elif model_name == 'gpt2':
return get_inference2(answer, context)
| [((15, 16, 15, 63), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', ({(15, 46, 15, 62): 'PRETRAINED_MODEL'}, {}), '(PRETRAINED_MODEL)', False, 'from transformers import AutoTokenizer\n'), ((21, 12, 21, 85), 'torch.load', 'torch.load', ({(21, 23, 21, 84): '"""../trained_models/t5_model_hotpot_supporting_facts_last.pth"""'}, {}), "('../trained_models/t5_model_hotpot_supporting_facts_last.pth')", False, 'import torch\n'), ((35, 4, 35, 52), 'logging.debug', 'logging.debug', ({(35, 18, 35, 51): "('Decoded string' + decoded_string)"}, {}), "('Decoded string' + decoded_string)", False, 'import logging\n'), ((48, 16, 48, 63), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', ({(48, 46, 48, 62): 'PRETRAINED_MODEL'}, {}), '(PRETRAINED_MODEL)', False, 'from transformers import AutoTokenizer\n'), ((54, 12, 54, 81), 'torch.load', 'torch.load', ({(54, 23, 54, 80): '"""../trained_models/t5_model_hotpot_full_context_last.pth"""'}, {}), "('../trained_models/t5_model_hotpot_full_context_last.pth')", False, 'import torch\n'), ((68, 4, 68, 52), 'logging.debug', 'logging.debug', ({(68, 18, 68, 51): "('Decoded string' + decoded_string)"}, {}), "('Decoded string' + decoded_string)", False, 'import logging\n'), ((81, 16, 81, 63), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', ({(81, 46, 81, 62): 'PRETRAINED_MODEL'}, {}), '(PRETRAINED_MODEL)', False, 'from transformers import AutoTokenizer\n'), ((87, 12, 87, 87), 'torch.load', 'torch.load', ({(87, 23, 87, 86): '"""../trained_models/bart_model_hotpot_supporting_facts_last.pth"""'}, {}), "('../trained_models/bart_model_hotpot_supporting_facts_last.pth')", False, 'import torch\n'), ((101, 4, 101, 52), 'logging.debug', 'logging.debug', ({(101, 18, 101, 51): "('Decoded string' + decoded_string)"}, {}), "('Decoded string' + decoded_string)", False, 'import logging\n'), ((114, 16, 114, 63), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', ({(114, 46, 114, 62): 'PRETRAINED_MODEL'}, {}), '(PRETRAINED_MODEL)', False, 'from transformers import AutoTokenizer\n'), ((120, 12, 120, 83), 'torch.load', 'torch.load', ({(120, 23, 120, 82): '"""../trained_models/bart_model_hotpot_full_context_last.pth"""'}, {}), "('../trained_models/bart_model_hotpot_full_context_last.pth')", False, 'import torch\n'), ((134, 4, 134, 52), 'logging.debug', 'logging.debug', ({(134, 18, 134, 51): "('Decoded string' + decoded_string)"}, {}), "('Decoded string' + decoded_string)", False, 'import logging\n'), ((32, 9, 32, 24), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((65, 9, 65, 24), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((98, 9, 98, 24), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((131, 9, 131, 24), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((10, 36, 10, 61), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((43, 36, 43, 61), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((76, 36, 76, 61), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((109, 36, 109, 61), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((158, 15, 158, 46), 'trained_gpt_model.get_inference2', 'get_inference2', ({(158, 30, 158, 36): 'answer', (158, 38, 158, 45): 'context'}, {}), '(answer, context)', False, 'from trained_gpt_model import get_inference2\n')] |
shagunsodhani/ParlAI | parlai/agents/drqa/config.py | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import sys
import logging
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_cmdline_args(parser):
# Runtime environment
agent = parser.add_argument_group('DrQA Arguments')
agent.add_argument('--no_cuda', type='bool', default=False)
agent.add_argument('--gpu', type=int, default=-1)
agent.add_argument('--random_seed', type=int, default=1013)
# Basics
agent.add_argument('--embedding_file', type=str, default=None,
help='File of space separated embeddings: w e1 ... ed')
agent.add_argument('--pretrained_model', type=str, default=None,
help='Load dict/features/weights/opts from this file')
agent.add_argument('--log_file', type=str, default=None)
# Model details
agent.add_argument('--fix_embeddings', type='bool', default=True)
agent.add_argument('--tune_partial', type=int, default=0,
help='Train the K most frequent word embeddings')
agent.add_argument('--embedding_dim', type=int, default=300,
help=('Default embedding size if '
'embedding_file is not given'))
agent.add_argument('--hidden_size', type=int, default=128,
help='Hidden size of RNN units')
agent.add_argument('--doc_layers', type=int, default=3,
help='Number of RNN layers for passage')
agent.add_argument('--question_layers', type=int, default=3,
help='Number of RNN layers for question')
agent.add_argument('--rnn_type', type=str, default='lstm',
help='RNN type: lstm (default), gru, or rnn')
# Optimization details
agent.add_argument('--valid_metric', type=str,
choices=['accuracy', 'f1'], default='f1',
help='Metric for choosing best valid model')
agent.add_argument('--max_len', type=int, default=15,
help='The max span allowed during decoding')
agent.add_argument('--rnn_padding', type='bool', default=False)
agent.add_argument('--display_iter', type=int, default=10,
help='Print train error after every \
<display_iter> epoches (default 10)')
agent.add_argument('--dropout_emb', type=float, default=0.4,
help='Dropout rate for word embeddings')
agent.add_argument('--dropout_rnn', type=float, default=0.4,
help='Dropout rate for RNN states')
agent.add_argument('--dropout_rnn_output', type='bool', default=True,
help='Whether to dropout the RNN output')
agent.add_argument('--optimizer', type=str, default='adamax',
help='Optimizer: sgd or adamax (default)')
agent.add_argument('--learning_rate', '-lr', type=float, default=0.1,
help='Learning rate for SGD (default 0.1)')
agent.add_argument('--grad_clipping', type=float, default=10,
help='Gradient clipping (default 10.0)')
agent.add_argument('--weight_decay', type=float, default=0,
help='Weight decay (default 0)')
agent.add_argument('--momentum', type=float, default=0,
help='Momentum (default 0)')
# Model-specific
agent.add_argument('--concat_rnn_layers', type='bool', default=True)
agent.add_argument('--question_merge', type=str, default='self_attn',
help='The way of computing question representation')
agent.add_argument('--use_qemb', type='bool', default=True,
help='Whether to use weighted question embeddings')
agent.add_argument('--use_in_question', type='bool', default=True,
help='Whether to use in_question features')
agent.add_argument('--use_tf', type='bool', default=True,
help='Whether to use tf features')
agent.add_argument('--use_time', type=int, default=0,
help='Time features marking how recent word was said')
def set_defaults(opt):
# Embeddings options
if opt.get('embedding_file'):
if not os.path.isfile(opt['embedding_file']):
raise IOError('No such file: %s' % args.embedding_file)
with open(opt['embedding_file']) as f:
dim = len(f.readline().strip().split(' ')) - 1
opt['embedding_dim'] = dim
elif not opt.get('embedding_dim'):
raise RuntimeError(('Either embedding_file or embedding_dim '
'needs to be specified.'))
# Make sure tune_partial and fix_embeddings are consistent
if opt['tune_partial'] > 0 and opt['fix_embeddings']:
print('Setting fix_embeddings to False as tune_partial > 0.')
opt['fix_embeddings'] = False
# Make sure fix_embeddings and embedding_file are consistent
if opt['fix_embeddings']:
if not opt.get('embedding_file') and not opt.get('pretrained_model'):
print('Setting fix_embeddings to False as embeddings are random.')
opt['fix_embeddings'] = False
def override_args(opt, override_opt):
# Major model args are reset to the values in override_opt.
# Non-architecture args (like dropout) are kept.
args = set(['embedding_file', 'embedding_dim', 'hidden_size', 'doc_layers',
'question_layers', 'rnn_type', 'optimizer', 'concat_rnn_layers',
'question_merge', 'use_qemb', 'use_in_question', 'use_tf',
'vocab_size', 'num_features', 'use_time'])
for k, v in override_opt.items():
if k in args:
opt[k] = v
| [((87, 15, 87, 52), 'os.path.isfile', 'os.path.isfile', ({(87, 30, 87, 51): "opt['embedding_file']"}, {}), "(opt['embedding_file'])", False, 'import os\n')] |
yongli82/CodeGenerator | gen4service/gen4bean.py | 4ca9255c3c4c5392e45815fd20f605ccbbfd2325 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
reload(sys)
sys.path.append("..")
sys.setdefaultencoding('utf-8')
from jinja2 import Environment
from jinja2 import Template
import re
from sqlalchemy import schema, types
from sqlalchemy.engine import create_engine
import yyutil
import CodeGen
project_name = "budget"
data_name = "BudgetReport"
table_name = "FC_BudgetBaseInfo"
searchBeanPackage="com.dianping.ba.finance.budget.api.beans"
searchBeanName="BudgetReportSearchBean"
searchBeanField="""
private int budgetTypeId;
private int costDepartmentId;
private String budgetOwnerNo;
private boolean exceedBudget;
private boolean withExpenseType;
private int beginYear;
private int beginMonth;
private int endYear;
private int endMonth;
"""
dataBeanPackage="com.dianping.ba.finance.budget.api.beans"
dataBeanName="BudgetYearReportDisplayBean"
dataBeanField="""
private int budgetYear;
private int budgetTypeId;
private String budgetTypeNo;
private String budgetTypeName;
private int costDepartmentId;
private String costDepartmentName;
private String budgetOwnerNo;
private String budgetOwnerName;
private int budgetStatus;
private String budgetStatusName;
private int budgetPlanId;
private String budgetPlanNo;
private int strategyId;
private int strategyPeriodType;
private String strategyPeriodTypeName;
private BigDecimal yearTotalAmount;
private BigDecimal yearAvailableAmount;
private BigDecimal yearUsedAmount;
private BigDecimal yearFrozenAmount;
private BigDecimal quarterTotalAmount1;
private BigDecimal quarterAvailableAmount1;
private BigDecimal quarterUsedAmount1;
private BigDecimal quarterFrozenAmount1;
private BigDecimal quarterTotalAmount2;
private BigDecimal quarterAvailableAmount2;
private BigDecimal quarterUsedAmount2;
private BigDecimal quarterFrozenAmount2;
private BigDecimal quarterTotalAmount3;
private BigDecimal quarterAvailableAmount3;
private BigDecimal quarterUsedAmount3;
private BigDecimal quarterFrozenAmount3;
private BigDecimal quarterTotalAmount4;
private BigDecimal quarterAvailableAmount4;
private BigDecimal quarterUsedAmount4;
private BigDecimal quarterFrozenAmount4;
private BigDecimal monthTotalAmount1;
private BigDecimal monthAvailableAmount1;
private BigDecimal monthUsedAmount1;
private BigDecimal monthFrozenAmount1;
private BigDecimal monthTotalAmount2;
private BigDecimal monthAvailableAmount2;
private BigDecimal monthUsedAmount2;
private BigDecimal monthFrozenAmount2;
private BigDecimal monthTotalAmount3;
private BigDecimal monthAvailableAmount3;
private BigDecimal monthUsedAmount3;
private BigDecimal monthFrozenAmount3;
private BigDecimal monthTotalAmount4;
private BigDecimal monthAvailableAmount4;
private BigDecimal monthUsedAmount4;
private BigDecimal monthFrozenAmount4;
private BigDecimal monthTotalAmount5;
private BigDecimal monthAvailableAmount5;
private BigDecimal monthUsedAmount5;
private BigDecimal monthFrozenAmount5;
private BigDecimal monthTotalAmount6;
private BigDecimal monthAvailableAmount6;
private BigDecimal monthUsedAmount6;
private BigDecimal monthFrozenAmount6;
private BigDecimal monthTotalAmount7;
private BigDecimal monthAvailableAmount7;
private BigDecimal monthUsedAmount7;
private BigDecimal monthFrozenAmount7;
private BigDecimal monthTotalAmount8;
private BigDecimal monthAvailableAmount8;
private BigDecimal monthUsedAmount8;
private BigDecimal monthFrozenAmount8;
private BigDecimal monthTotalAmount9;
private BigDecimal monthAvailableAmount9;
private BigDecimal monthUsedAmount9;
private BigDecimal monthFrozenAmount9;
private BigDecimal monthTotalAmount10;
private BigDecimal monthAvailableAmount10;
private BigDecimal monthUsedAmount10;
private BigDecimal monthFrozenAmount10;
private BigDecimal monthTotalAmount11;
private BigDecimal monthAvailableAmount11;
private BigDecimal monthUsedAmount11;
private BigDecimal monthFrozenAmount11;
private BigDecimal monthTotalAmount12;
private BigDecimal monthAvailableAmount12;
private BigDecimal monthUsedAmount12;
private BigDecimal monthFrozenAmount12;
"""
columns = yyutil.convert_bean_to_columns(dataBeanField)
search_columns = yyutil.convert_bean_to_columns(searchBeanField)
jinja2_env = CodeGen.getEnvironment("gen4service")
template = jinja2_env.get_template("bean_code_template.md")
#snippet = template.render(table_name=table_name, data_name=data_name, columns=columns)
snippet = template.render(locals())
print snippet
with open(data_name + "_generate.md", 'wb') as f:
f.write(snippet)
f.flush()
f.close()
os.system("open " + data_name + "_generate.md")
| [] |
zigzax/Basic_Python | Log_tao.py | d9d3256f2ac627e6e98991f73ab67ef8fcc4172d | Python 3.9.0 (tags/v3.9.0:9cf6752, Oct 5 2020, 15:34:40) [MSC v.1927 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> import turtle
>>> tao = turtle.Turtle()
>>> tao.shape('turtle')
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.forward(100)
>>> tao.left(90)
>>> tao.reset
<bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>>
>>> tao.reset()
>>> for i in range(4)
SyntaxError: invalid syntax
>>> for i in range(4):
tao.forward(100)tao.left(90)
SyntaxError: invalid syntax
>>> for i in range(4):
tao.forward(100)
tao.left(90)
>>> range (4)
range(0, 4)
>>> list (range(4))
[0, 1, 2, 3]
>>> for i in range(5)
SyntaxError: invalid syntax
>>> for i in range(5):
print(i)
0
1
2
3
4
\
>>> for i in range(5):
print(i)
0
1
2
3
4
>>> for i in range[10,50,90]:
print(i)
Traceback (most recent call last):
File "<pyshell#28>", line 1, in <module>
for i in range[10,50,90]:
TypeError: 'type' object is not subscriptable
>>> for i in[10,50,90]:
print(i)
10
50
90
>>> range (1,10)
range(1, 10)
>>> list (range(1,10))
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> tao.reset()
>>> for i in range (4):
tao.forward(100)
tao.left(90)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
>>> tao.reset
<bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>>
>>> tao.reset()
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> tao.left(45)
>>> tao.forward(100)
>>> for i in range (8):
tao.forward(100)
tao.left(45)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
No. 4
No. 5
No. 6
No. 7
>>> tao.reset()
>>> for i in range (8):
tao.forward(100)
tao.left(45)
print('No.',i)
No. 0
No. 1
No. 2
No. 3
No. 4
No. 5
No. 6
No. 7
>>> tao.reset()
>>> def regtangle():
for i in range(4):
tao.forward(100)
tao.left(90)
>>> regtangle()
>>> tao.reset()
>>> for i in range(10):
regtangle()
tao.left(36)
>>> tao.reset()
>>> | [] |
pome-ta/CodeMirror | run.py | ef39c3032ea128d988c263ed97851860db9f977c | """
Pythonista3 app CodeMirror
"""
import pythonista.wkwebview as wkwebview
import ui
import pathlib
uri = pathlib.Path('./main_index.html')
class View(ui.View):
def __init__(self):
self.wv = wkwebview.WKWebView(flex='WH')
self.wv.load_url(str(uri))
self.add_subview(self.wv)
def will_close(self):
self.wv.clear_cache()
_view = View()
_view.present(style='fullscreen', orientations=['portrait'])
| [((9, 6, 9, 39), 'pathlib.Path', 'pathlib.Path', ({(9, 19, 9, 38): '"""./main_index.html"""'}, {}), "('./main_index.html')", False, 'import pathlib\n'), ((14, 14, 14, 44), 'pythonista.wkwebview.WKWebView', 'wkwebview.WKWebView', (), '', True, 'import pythonista.wkwebview as wkwebview\n')] |
NPCai/graphene-py | gen_cnn_dataset.py | 50163eb65f55c25a3d090bad03e34304b1cb3037 | import wrapper as w
from multiprocessing import Process
import atexit
import time
from queue import Queue
''' 8 Processes, 24 threads per process = 192 threads '''
NUM_PROCESSES = 8
workerList = [] # Worker processes
class Worker(Process): # Need multiple threads or else it takes forever
def __init__(self, queue): # filNum is the id of the file to extract from
super().__init__()
self.queue = queue
self.outQueue = Queue()
def run(self):
with concurrent.futures.ThreadPoolExecutor(max_workers=24) as executor:
executor.submit(loadUrl())
def loadUrl():
while not self.queue.empty():
sentence = self.queue.get()
ex = w.GrapheneExtract(sentence)
self.outQueue.put(sentence.strip() + "\t" + str(ex.json) + "\n")
queues = [] # Use seperate queues to avoid waiting for locks
with open("data/all_news.txt", "r") as news:
for line in news[::len(news) / NUM_PROCESSES]:
queue = Queue()
queue.put(line.strip())
print("Queue populated")
for i in range(NUM_PROCESSES):
worker = Worker(queues[i])
worker.daemon = True
worker.start()
workerList.append(worker)
def close_running_threads():
for thread in workerList:
thread.join()
atexit.register(close_running_threads)
print("All threads registered and working.")
while True:
print(queue.qsize() " sentences remaining to be requested")
time.sleep(2) # Print every two seconds | [] |
EikanWang/pytorch | torch/_prims/context.py | 823ddb6e87e8111c9b5a99523503172e5bf62c49 | from typing import Callable, Sequence, Any, Dict
import functools
import torch
import torch.overrides
from torch._prims.utils import torch_function_passthrough
import torch._refs as refs
import torch._refs
import torch._refs.nn
import torch._refs.nn.functional
import torch._refs.special
import torch._prims
# TODO: automap torch operations to references
# (need to throw a good assertion if the mapping doesn't exist)
_torch_to_reference_map = {
torch.add: refs.add,
# torch.div: refs.div,
torch.mul: refs.mul,
torch.ge: refs.ge,
torch.gt: refs.gt,
torch.le: refs.le,
torch.lt: refs.lt,
}
@functools.lru_cache(None)
def torch_to_refs_map():
"""
Mapping of torch API functions to torch._refs functions.
E.g. torch_to_refs_map()[torch.add] == torch._refs.add
"""
modules = [
(torch, torch._refs),
(torch.nn, torch._refs.nn),
(torch.nn.functional, torch._refs.nn.functional),
(torch.special, torch._refs.special),
]
r = {}
for mod_torch, mod_refs in modules:
for s in mod_refs.__all__: # type: ignore[attr-defined]
r[mod_torch.__dict__.get(s)] = mod_refs.__dict__.get(s)
return r
@functools.lru_cache(None)
def all_prims():
"""
Set of all prim functions, e.g., torch._prims.add in all_prims()
"""
return {torch._prims.__dict__.get(s) for s in torch._prims.__all__}
class TorchRefsMode(torch.overrides.TorchFunctionMode):
"""
Switches the interpretation of torch.* functions and Tensor methods to
use PrimTorch refs in torch._refs. (Direct calls to _refs are unaffected.)
>>> with TorchRefsMode.push():
... torch.add(x, y) # calls torch._refs.add(x, y)
By default, this context manager will fall back on the torch.* if the
ref does not exist; set strict=True to error if this occurs.
"""
def __init__(self, strict=False):
self.strict = strict
def __torch_function__(
self,
orig_func: Callable,
types: Sequence,
args: Sequence[Any] = (),
kwargs: Dict = None,
):
if kwargs is None:
kwargs = {}
# For primitive operations, run them as is without interception
if orig_func in torch_function_passthrough or orig_func in all_prims():
return orig_func(*args, **kwargs)
mapping = torch_to_refs_map()
func = mapping.get(orig_func, None)
if func is not None:
return func(*args, **kwargs)
if self.strict:
raise RuntimeError(
f"no _refs support for {torch.overrides.resolve_name(orig_func)}"
)
return orig_func(*args, **kwargs)
| [((32, 1, 32, 26), 'functools.lru_cache', 'functools.lru_cache', ({(32, 21, 32, 25): 'None'}, {}), '(None)', False, 'import functools\n'), ((51, 1, 51, 26), 'functools.lru_cache', 'functools.lru_cache', ({(51, 21, 51, 25): 'None'}, {}), '(None)', False, 'import functools\n'), ((56, 12, 56, 40), 'torch._prims.__dict__.get', 'torch._prims.__dict__.get', ({(56, 38, 56, 39): 's'}, {}), '(s)', False, 'import torch\n'), ((92, 40, 92, 79), 'torch.overrides.resolve_name', 'torch.overrides.resolve_name', ({(92, 69, 92, 78): 'orig_func'}, {}), '(orig_func)', False, 'import torch\n')] |
cotsog/pathways-backend | search/tests/test_read_similarities.py | 9231731359fc97833dbdbca33ac23eebeac4f715 | from django.test import TestCase
from search.read_similarities import build_manual_similarity_map
from common.testhelpers.random_test_values import a_string, a_float
class TestReadingManualTaskSimilarities(TestCase):
def test_convert_matrix_to_map_from_topic_to_array_of_services(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_multiple_services_for_a_topic(self):
data = [
['topic1', ],
['service1'],
['service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service2', 'service3'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_different_numbers_of_services_for_different_topics(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['service3'],
]
expected_result = {
'topic1': ['service1', 'service3'],
'topic2': ['service2'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
def test_can_handle_empty_entries(self):
data = [
['topic1', 'topic2'],
['service1', 'service2'],
['', 'service3'],
[None, 'service4'],
]
expected_result = {
'topic1': ['service1'],
'topic2': ['service2', 'service3', 'service4'],
}
result = build_manual_similarity_map(data)
self.assertEqual(result, expected_result)
| [((16, 17, 16, 50), 'search.read_similarities.build_manual_similarity_map', 'build_manual_similarity_map', ({(16, 45, 16, 49): 'data'}, {}), '(data)', False, 'from search.read_similarities import build_manual_similarity_map\n'), ((29, 17, 29, 50), 'search.read_similarities.build_manual_similarity_map', 'build_manual_similarity_map', ({(29, 45, 29, 49): 'data'}, {}), '(data)', False, 'from search.read_similarities import build_manual_similarity_map\n'), ((42, 17, 42, 50), 'search.read_similarities.build_manual_similarity_map', 'build_manual_similarity_map', ({(42, 45, 42, 49): 'data'}, {}), '(data)', False, 'from search.read_similarities import build_manual_similarity_map\n'), ((56, 17, 56, 50), 'search.read_similarities.build_manual_similarity_map', 'build_manual_similarity_map', ({(56, 45, 56, 49): 'data'}, {}), '(data)', False, 'from search.read_similarities import build_manual_similarity_map\n')] |
Zabamund/HackCPH18 | fortuna/fortuna.py | 3855547824c6277ca6f4e7b97c3ad0b3829e266b | """
Fortuna
Python project to visualize uncertatinty in probabilistic exploration models.
Created on 09/06/2018
@authors: Natalia Shchukina, Graham Brew, Marco van Veen, Behrooz Bashokooh, Tobias Stål, Robert Leckenby
"""
# Import libraries
import numpy as np
import glob
from matplotlib import pyplot as plt
import pandas as pd
import xarray as xr
import pyproj as proj
from scipy.stats import norm
class Fortuna(object):
"""
Class to load the fortuna dataset and call different methods for visualization in a web frontend.
Args:
There are no required arguments at the moment. Input files could be defined.
"""
def __init__(self, **kwargs):
"""
Method that is called when a object of the class Fortuna is initiated, it imports the data and directly creates some important variables.
"""
# hardcode geometry
self.size_raster = (250,162)
self.X_corner = 390885
self.Y_corner = 7156947
self.dx, self.dy, self.dz = 25, 25, 100
self.top_model = 950
self.bottom_model = 1050
self.base_cube = None
self.top_cube = None
self.base_n = None
self.top_n = None
self.vol = None
# Create empty xarray dataset
self.ds = xr.Dataset()
self.xx = None
self.yy = None
self.zz = None
self.model = None
self.base_mean = None
self.base_std = None
self.top_mean = None
self.top_std = None
## Initial methods to load
self.import_data()
self.calc_xarray()
self.calc_stat()
### Methods for initiating the object
def folder2cube(self, files):
"""
Method to read a file.
"""
base_set = glob.glob(files)
cube = np.zeros(self.size_raster + (len(base_set),))
for i, model in enumerate(base_set):
cube[:, :, i] = np.loadtxt(model, skiprows=1).reshape(self.size_raster)
return cube, len(base_set)
def import_data(self):
"""
Method to load different data objects from files.
"""
self.base_cube, self.base_n = self.folder2cube('data/Hackaton/BaseSet/MapSimu__*.data')
self.top_cube, self.top_n = self.folder2cube('data/Hackaton/TopSet/MapSimu__*.data')
self.vol = pd.read_csv('data/Hackaton/VolumeDistribution/Volumes', delim_whitespace=True)
def calc_xarray (self):
self.xx = np.linspace(self.X_corner, self.X_corner + self.size_raster[0] * self.dx, self.size_raster[0])
self.yy = np.linspace(self.Y_corner, self.Y_corner + self.size_raster[1] * self.dy, self.size_raster[1])
self.zz = np.linspace(self.top_model, self.bottom_model, self.dz)
self.model = np.linspace(0, self.top_model, self.base_n)
self.ds.coords['X'] = self.xx
self.ds.coords['Y'] = self.yy
self.ds.coords['Z'] = self.zz
self.ds.coords['MODEL'] = self.model
self.ds['BASE'] = (('X', 'Y', 'MODEL'), self.base_cube)
self.ds['TOP'] = (('X', 'Y', 'MODEL'), self.top_cube)
def calc_stat (self):
self.base_mean = self.ds['BASE'].mean(dim='MODEL')
self.base_std = self.ds['BASE'].std(dim='MODEL')
self.top_mean = self.ds['TOP'].mean(dim='MODEL')
self.top_std = self.ds['TOP'].std(dim='MODEL')
## Data Management methods
def load_pickle(self, path):
return np.load(path)
## Methods to compute different uncertatinty cubes --> cubes to be displayed in the frontend
def calc_lithology(self, iterations = 2):
"""
Sample from both distributions and fill each z-stack accordingly
"""
# create empty array
block = np.zeros((iterations, self.size_raster[0], self.size_raster[1], self.zz.size), dtype='int8')
for i in range(iterations):
for j in range(self.size_raster[0]): # size_raster[0]
for k in range(self.size_raster[1]):
# sample from top and base distributions for specific x,y position
top = np.random.normal(self.top_mean[j, k], self.top_std[j, k])
base = np.random.normal(self.base_mean[j, k], self.base_std[j, k])
# iterate over vertical z-stack
for l in range(self.zz.size):
if self.zz[l] <= top:
block[i, j, k, l] = 1
elif self.zz[l] > base:
block[i, j, k, l] = 3
elif ((self.zz[l] > top) and (l <= base)):
block[i, j, k, l] = 2
return block
def calc_lithology_vect(self, iterations=2):
"""
Resample from z value statistics and fill each z-stack in a lithology block accordingly.
This is the new method with vectorized operations to speed up calculations.
"""
# create empty array
block = np.zeros((iterations, self.xx.size, self.yy.size, self.zz.size), dtype='int8')
for i in range(iterations):
# create meshgrids grid for coordinate-wise iterations
mesh_x, mesh_y, mesh_z = np.meshgrid(np.arange(self.xx.size),
np.arange(self.yy.size),
np.arange(self.zz.size))
# sample from top and base distributions for specific x,y position
top = np.zeros([self.xx.size, self.yy.size])
base = np.zeros([self.xx.size, self.yy.size])
top[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
base[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y],
self.top_std.values[mesh_x, mesh_y])
# compare each cell to resampled reference values
# TODO generalize for any number of lithologies
block[i, mesh_x, mesh_y, mesh_z] = np.where(self.zz < top[mesh_x, mesh_y], 1,
np.where(self.zz < base[mesh_x, mesh_y], 2, 3))
return block
### Modifyed from GemPy!
def calc_probability_lithology(self, cube):
"""Blocks must be just the lith blocks!"""
lith_blocks = cube.reshape([cube.shape[0], (self.xx.size * self.yy.size * self.zz.size)])
lith_id = np.unique(lith_blocks)
# lith_count = np.zeros_like(lith_blocks[0:len(lith_id)])
lith_count = np.zeros((len(np.unique(lith_blocks)), lith_blocks.shape[1]))
for i, l_id in enumerate(lith_id):
lith_count[i] = np.sum(lith_blocks == l_id, axis=0)
lith_prob = lith_count / len(lith_blocks)
return lith_prob
### Modyfied from GemPy!
def calc_information_entropy(self, lith_prob):
"""Calculates information entropy for the given probability array."""
cube = np.zeros_like(lith_prob[0])
for l in lith_prob:
pm = np.ma.masked_equal(l, 0) # mask where layer prob is 0
cube -= (pm * np.ma.log2(pm)).filled(0)
return cube.reshape([self.xx.size, self.yy.size, self.zz.size])
# Try numpy.flatten and numpy.ravel
## Simple plotting methods
def plot_entropy(self, cube, slice=10):
plt.imshow(cube[slice, :, :].T, origin='upperleft', cmap='viridis')
plt.show() | [((54, 18, 54, 30), 'xarray.Dataset', 'xr.Dataset', ({}, {}), '()', True, 'import xarray as xr\n'), ((80, 19, 80, 35), 'glob.glob', 'glob.glob', ({(80, 29, 80, 34): 'files'}, {}), '(files)', False, 'import glob\n'), ((94, 19, 94, 97), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((98, 18, 98, 112), 'numpy.linspace', 'np.linspace', ({(98, 30, 98, 43): 'self.X_corner', (98, 45, 98, 90): 'self.X_corner + self.size_raster[0] * self.dx', (98, 92, 98, 111): 'self.size_raster[0]'}, {}), '(self.X_corner, self.X_corner + self.size_raster[0] * self.dx,\n self.size_raster[0])', True, 'import numpy as np\n'), ((99, 18, 99, 112), 'numpy.linspace', 'np.linspace', ({(99, 30, 99, 43): 'self.Y_corner', (99, 45, 99, 90): 'self.Y_corner + self.size_raster[1] * self.dy', (99, 92, 99, 111): 'self.size_raster[1]'}, {}), '(self.Y_corner, self.Y_corner + self.size_raster[1] * self.dy,\n self.size_raster[1])', True, 'import numpy as np\n'), ((100, 18, 100, 73), 'numpy.linspace', 'np.linspace', ({(100, 30, 100, 44): 'self.top_model', (100, 46, 100, 63): 'self.bottom_model', (100, 65, 100, 72): 'self.dz'}, {}), '(self.top_model, self.bottom_model, self.dz)', True, 'import numpy as np\n'), ((102, 21, 102, 64), 'numpy.linspace', 'np.linspace', ({(102, 33, 102, 34): '0', (102, 36, 102, 50): 'self.top_model', (102, 52, 102, 63): 'self.base_n'}, {}), '(0, self.top_model, self.base_n)', True, 'import numpy as np\n'), ((125, 15, 125, 28), 'numpy.load', 'np.load', ({(125, 23, 125, 27): 'path'}, {}), '(path)', True, 'import numpy as np\n'), ((135, 16, 135, 108), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((165, 16, 165, 94), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((195, 18, 195, 40), 'numpy.unique', 'np.unique', ({(195, 28, 195, 39): 'lith_blocks'}, {}), '(lith_blocks)', True, 'import numpy as np\n'), ((207, 15, 207, 42), 'numpy.zeros_like', 'np.zeros_like', ({(207, 29, 207, 41): 'lith_prob[0]'}, {}), '(lith_prob[0])', True, 'import numpy as np\n'), ((217, 8, 217, 75), 'matplotlib.pyplot.imshow', 'plt.imshow', (), '', True, 'from matplotlib import pyplot as plt\n'), ((218, 8, 218, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((175, 18, 175, 56), 'numpy.zeros', 'np.zeros', ({(175, 27, 175, 55): '[self.xx.size, self.yy.size]'}, {}), '([self.xx.size, self.yy.size])', True, 'import numpy as np\n'), ((176, 19, 176, 57), 'numpy.zeros', 'np.zeros', ({(176, 28, 176, 56): '[self.xx.size, self.yy.size]'}, {}), '([self.xx.size, self.yy.size])', True, 'import numpy as np\n'), ((178, 34, 179, 87), 'numpy.random.normal', 'np.random.normal', ({(178, 51, 178, 87): 'self.top_mean.values[mesh_x, mesh_y]', (179, 51, 179, 86): 'self.top_std.values[mesh_x, mesh_y]'}, {}), '(self.top_mean.values[mesh_x, mesh_y], self.top_std.values[\n mesh_x, mesh_y])', True, 'import numpy as np\n'), ((180, 35, 181, 88), 'numpy.random.normal', 'np.random.normal', ({(180, 52, 180, 88): 'self.top_mean.values[mesh_x, mesh_y]', (181, 52, 181, 87): 'self.top_std.values[mesh_x, mesh_y]'}, {}), '(self.top_mean.values[mesh_x, mesh_y], self.top_std.values[\n mesh_x, mesh_y])', True, 'import numpy as np\n'), ((199, 28, 199, 63), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((209, 17, 209, 41), 'numpy.ma.masked_equal', 'np.ma.masked_equal', ({(209, 36, 209, 37): 'l', (209, 39, 209, 40): '0'}, {}), '(l, 0)', True, 'import numpy as np\n'), ((170, 49, 170, 72), 'numpy.arange', 'np.arange', ({(170, 59, 170, 71): 'self.xx.size'}, {}), '(self.xx.size)', True, 'import numpy as np\n'), ((171, 49, 171, 72), 'numpy.arange', 'np.arange', ({(171, 59, 171, 71): 'self.yy.size'}, {}), '(self.yy.size)', True, 'import numpy as np\n'), ((172, 49, 172, 72), 'numpy.arange', 'np.arange', ({(172, 59, 172, 71): 'self.zz.size'}, {}), '(self.zz.size)', True, 'import numpy as np\n'), ((186, 56, 186, 102), 'numpy.where', 'np.where', ({(186, 65, 186, 95): 'self.zz < base[mesh_x, mesh_y]', (186, 97, 186, 98): '2', (186, 100, 186, 101): '3'}, {}), '(self.zz < base[mesh_x, mesh_y], 2, 3)', True, 'import numpy as np\n'), ((83, 28, 83, 57), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n'), ((142, 26, 142, 83), 'numpy.random.normal', 'np.random.normal', ({(142, 43, 142, 62): 'self.top_mean[j, k]', (142, 64, 142, 82): 'self.top_std[j, k]'}, {}), '(self.top_mean[j, k], self.top_std[j, k])', True, 'import numpy as np\n'), ((143, 27, 143, 86), 'numpy.random.normal', 'np.random.normal', ({(143, 44, 143, 64): 'self.base_mean[j, k]', (143, 66, 143, 85): 'self.base_std[j, k]'}, {}), '(self.base_mean[j, k], self.base_std[j, k])', True, 'import numpy as np\n'), ((197, 35, 197, 57), 'numpy.unique', 'np.unique', ({(197, 45, 197, 56): 'lith_blocks'}, {}), '(lith_blocks)', True, 'import numpy as np\n'), ((210, 26, 210, 40), 'numpy.ma.log2', 'np.ma.log2', ({(210, 37, 210, 39): 'pm'}, {}), '(pm)', True, 'import numpy as np\n')] |
Linx3/6.867-Final-Project | resize.py | 374d7093159be0bc524b291bacad52741f6bdc95 | from PIL import Image
# open an image file (.bmp,.jpg,.png,.gif) you have in the working folder
# //imageFile = "03802.png"
import os
arr=os.listdir()
for imageFile in arr:
if "png" in imageFile:
im1 = Image.open(imageFile)
# adjust width and height to your needs
width = 416
height = 416
# use one of these filter options to resize the image
im2 = im1.resize((width, height), Image.NEAREST) # use nearest neighbour
# im3 = im1.resize((width, height), Image.BILINEAR) # linear interpolation in a 2x2 environment
# im4 = im1.resize((width, height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment
# im5 = im1.resize((width, height), Image.ANTIALIAS) # best down-sizing filter
ext = ".png"
# print(imageFile.split(".")[0])
num=imageFile.split(".")[0]
print(num)
print(type(num))
im2.save(imageFile)
# im2.save(imageFile+ ext)
# im3.save("BILINEAR" + ext)
# im4.save("BICUBIC" + ext)
# im5.save("ANTIALIAS" + ext)
| [((7, 4, 7, 16), 'os.listdir', 'os.listdir', ({}, {}), '()', False, 'import os\n'), ((11, 14, 11, 35), 'PIL.Image.open', 'Image.open', ({(11, 25, 11, 34): 'imageFile'}, {}), '(imageFile)', False, 'from PIL import Image\n')] |
UnBParadigmas/2020.1_G2_SMA_DarwInPython | src/game/exceptions.py | 34cdc979a95f827f230bd4f13442f6c67d81ba2b | class InvalidMovementException(Exception):
pass
class InvalidMovementTargetException(InvalidMovementException):
pass
class InvalidMovimentOriginException(InvalidMovementException):
pass | [] |
simonepri/fever-transformers | src/pipeline/sentence-retrieval/run.py | 3e9c57b0b4e781f318438d48589a56db709124c4 | #!/usr/bin/env python3
import argparse
import bisect
import csv
import json
import os
from collections import defaultdict
from functools import reduce
from tqdm import tqdm
def get_best_evidence(scores_file, max_sentences_per_claim):
weighted_claim_evidence = defaultdict(lambda: [])
with open(scores_file, "r") as f:
nlines = reduce(lambda a, b: a + b, map(lambda x: 1, f.readlines()), 0)
f.seek(0)
lines = csv.reader(f, delimiter="\t")
for line in tqdm(lines, desc="Score", total=nlines):
claim_id, claim, page, sent_id, sent, score = line
claim_id, sent_id, score = int(claim_id), int(sent_id), float(score)
evid = (page, sent_id, sent)
bisect.insort(weighted_claim_evidence[claim_id], (-score, evid))
if len(weighted_claim_evidence[claim_id]) > max_sentences_per_claim:
weighted_claim_evidence[claim_id].pop()
for claim_id in weighted_claim_evidence:
for i, (score, evid) in enumerate(weighted_claim_evidence[claim_id]):
weighted_claim_evidence[claim_id][i] = (-score, evid)
return weighted_claim_evidence
def main(scores_file, in_file, out_file, max_sentences_per_claim=None):
path = os.getcwd()
scores_file = os.path.join(path, scores_file)
in_file = os.path.join(path, in_file)
out_file = os.path.join(path, out_file)
best_evidence = get_best_evidence(scores_file, max_sentences_per_claim)
with open(out_file, "w+") as fout:
with open(in_file, "r") as fin:
nlines = reduce(lambda a, b: a + b, map(lambda x: 1, fin.readlines()), 0)
fin.seek(0)
lines = map(json.loads, fin.readlines())
for line in tqdm(lines, desc="Claim", total=nlines):
claim_id = line["id"]
line["predicted_sentences"] = best_evidence[claim_id]
fout.write(json.dumps(line) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scores-file", type=str)
parser.add_argument("--in-file", type=str, help="input dataset")
parser.add_argument("--out-file", type=str,
help="path to save output dataset")
parser.add_argument("--max-sentences-per-claim", type=int,
help="number of top sentences to return for each claim")
args = parser.parse_args()
main(args.scores_file, args.in_file, args.out_file, max_sentences_per_claim=args.max_sentences_per_claim)
| [((15, 30, 15, 53), 'collections.defaultdict', 'defaultdict', ({(15, 42, 15, 52): 'lambda : []'}, {}), '(lambda : [])', False, 'from collections import defaultdict\n'), ((34, 11, 34, 22), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((35, 18, 35, 49), 'os.path.join', 'os.path.join', ({(35, 31, 35, 35): 'path', (35, 37, 35, 48): 'scores_file'}, {}), '(path, scores_file)', False, 'import os\n'), ((36, 14, 36, 41), 'os.path.join', 'os.path.join', ({(36, 27, 36, 31): 'path', (36, 33, 36, 40): 'in_file'}, {}), '(path, in_file)', False, 'import os\n'), ((37, 15, 37, 43), 'os.path.join', 'os.path.join', ({(37, 28, 37, 32): 'path', (37, 34, 37, 42): 'out_file'}, {}), '(path, out_file)', False, 'import os\n'), ((53, 13, 53, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((19, 16, 19, 45), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((20, 20, 20, 59), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((24, 12, 24, 76), 'bisect.insort', 'bisect.insort', ({(24, 26, 24, 59): 'weighted_claim_evidence[claim_id]', (24, 61, 24, 75): '(-score, evid)'}, {}), '(weighted_claim_evidence[claim_id], (-score, evid))', False, 'import bisect\n'), ((46, 24, 46, 63), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((49, 27, 49, 43), 'json.dumps', 'json.dumps', ({(49, 38, 49, 42): 'line'}, {}), '(line)', False, 'import json\n')] |
KOTBOTS/Telegram-CloneBot | bot/__main__.py | 446d66ba46817f784e8de2b8bd2966865ee1965f | from telegram.ext import CommandHandler, run_async
from bot.gDrive import GoogleDriveHelper
from bot.fs_utils import get_readable_file_size
from bot import LOGGER, dispatcher, updater, bot
from bot.config import BOT_TOKEN, OWNER_ID, GDRIVE_FOLDER_ID
from bot.decorators import is_authorised, is_owner
from telegram.error import TimedOut, BadRequest
from bot.clone_status import CloneStatus
from bot.msg_utils import deleteMessage, sendMessage
import time
REPO_LINK = "https://t.me/KOT_BOTS"
# Soon to be used for direct updates from within the bot.
@run_async
def start(update, context):
sendMessage("Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!" \
"\nSend /help for checking all available commands.",
context.bot, update, 'Markdown')
# ;-;
@run_async
def helper(update, context):
sendMessage("Here are the available commands of the bot\n\n" \
"*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`" \
"\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone." \
"\n\nYou can also *ignore folders* from clone process by doing the following:\n" \
"`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message." \
"*Make sure to not put any space between commas (,).*\n" \
f"Source of this bot: [GitHub]({REPO_LINK})", context.bot, update, 'Markdown')
# TODO Cancel Clones with /cancel command.
@run_async
@is_authorised
def cloneNode(update, context):
args = update.message.text.split(" ")
if len(args) > 1:
link = args[1]
try:
ignoreList = args[-1].split(',')
except IndexError:
ignoreList = []
DESTINATION_ID = GDRIVE_FOLDER_ID
try:
DESTINATION_ID = args[2]
print(DESTINATION_ID)
except IndexError:
pass
# Usage: /clone <FolderToClone> <Destination> <IDtoIgnoreFromClone>,<IDtoIgnoreFromClone>
msg = sendMessage(f"<b>Cloning:</b> <code>{link}</code>", context.bot, update)
status_class = CloneStatus()
gd = GoogleDriveHelper(GFolder_ID=DESTINATION_ID)
sendCloneStatus(update, context, status_class, msg, link)
result = gd.clone(link, status_class, ignoreList=ignoreList)
deleteMessage(context.bot, msg)
status_class.set_status(True)
sendMessage(result, context.bot, update)
else:
sendMessage("Please Provide a Google Drive Shared Link to Clone.", bot, update)
@run_async
def sendCloneStatus(update, context, status, msg, link):
old_text = ''
while not status.done():
sleeper(3)
try:
text=f'🔗 *Cloning:* [{status.MainFolderName}]({status.MainFolderLink})\n━━━━━━━━━━━━━━\n🗃️ *Current File:* `{status.get_name()}`\n⬆️ *Transferred*: `{status.get_size()}`\n📁 *Destination:* [{status.DestinationFolderName}]({status.DestinationFolderLink})'
if status.checkFileStatus():
text += f"\n🕒 *Checking Existing Files:* `{str(status.checkFileStatus())}`"
if not text == old_text:
msg.edit_text(text=text, parse_mode="Markdown", timeout=200)
old_text = text
except Exception as e:
LOGGER.error(e)
if str(e) == "Message to edit not found":
break
sleeper(2)
continue
return
def sleeper(value, enabled=True):
time.sleep(int(value))
return
@run_async
@is_owner
def sendLogs(update, context):
with open('log.txt', 'rb') as f:
bot.send_document(document=f, filename=f.name,
reply_to_message_id=update.message.message_id,
chat_id=update.message.chat_id)
def main():
LOGGER.info("Bot Started!")
clone_handler = CommandHandler('clone', cloneNode)
start_handler = CommandHandler('start', start)
help_handler = CommandHandler('help', helper)
log_handler = CommandHandler('logs', sendLogs)
dispatcher.add_handler(log_handler)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(clone_handler)
dispatcher.add_handler(help_handler)
updater.start_polling()
main()
| [((17, 4, 19, 36), 'bot.msg_utils.sendMessage', 'sendMessage', ({(17, 16, 18, 59): '"""Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!\nSend /help for checking all available commands."""', (19, 4, 19, 15): 'context.bot', (19, 17, 19, 23): 'update', (19, 25, 19, 35): '"""Markdown"""'}, {}), '(\n """Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!\nSend /help for checking all available commands."""\n , context.bot, update, \'Markdown\')', False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((24, 4, 30, 102), 'bot.msg_utils.sendMessage', 'sendMessage', ({(24, 16, 30, 68): 'f"""Here are the available commands of the bot\n\n*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone.\n\nYou can also *ignore folders* from clone process by doing the following:\n`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message.*Make sure to not put any space between commas (,).*\nSource of this bot: [GitHub]({REPO_LINK})"""', (30, 70, 30, 81): 'context.bot', (30, 83, 30, 89): 'update', (30, 91, 30, 101): '"""Markdown"""'}, {}), '(\n f"""Here are the available commands of the bot\n\n*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone.\n\nYou can also *ignore folders* from clone process by doing the following:\n`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message.*Make sure to not put any space between commas (,).*\nSource of this bot: [GitHub]({REPO_LINK})"""\n , context.bot, update, \'Markdown\')', False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((97, 4, 97, 31), 'bot.LOGGER.info', 'LOGGER.info', ({(97, 16, 97, 30): '"""Bot Started!"""'}, {}), "('Bot Started!')", False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((98, 20, 98, 54), 'telegram.ext.CommandHandler', 'CommandHandler', ({(98, 35, 98, 42): '"""clone"""', (98, 44, 98, 53): 'cloneNode'}, {}), "('clone', cloneNode)", False, 'from telegram.ext import CommandHandler, run_async\n'), ((99, 20, 99, 50), 'telegram.ext.CommandHandler', 'CommandHandler', ({(99, 35, 99, 42): '"""start"""', (99, 44, 99, 49): 'start'}, {}), "('start', start)", False, 'from telegram.ext import CommandHandler, run_async\n'), ((100, 19, 100, 49), 'telegram.ext.CommandHandler', 'CommandHandler', ({(100, 34, 100, 40): '"""help"""', (100, 42, 100, 48): 'helper'}, {}), "('help', helper)", False, 'from telegram.ext import CommandHandler, run_async\n'), ((101, 18, 101, 50), 'telegram.ext.CommandHandler', 'CommandHandler', ({(101, 33, 101, 39): '"""logs"""', (101, 41, 101, 49): 'sendLogs'}, {}), "('logs', sendLogs)", False, 'from telegram.ext import CommandHandler, run_async\n'), ((102, 4, 102, 39), 'bot.dispatcher.add_handler', 'dispatcher.add_handler', ({(102, 27, 102, 38): 'log_handler'}, {}), '(log_handler)', False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((103, 4, 103, 41), 'bot.dispatcher.add_handler', 'dispatcher.add_handler', ({(103, 27, 103, 40): 'start_handler'}, {}), '(start_handler)', False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((104, 4, 104, 41), 'bot.dispatcher.add_handler', 'dispatcher.add_handler', ({(104, 27, 104, 40): 'clone_handler'}, {}), '(clone_handler)', False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((105, 4, 105, 40), 'bot.dispatcher.add_handler', 'dispatcher.add_handler', ({(105, 27, 105, 39): 'help_handler'}, {}), '(help_handler)', False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((106, 4, 106, 27), 'bot.updater.start_polling', 'updater.start_polling', ({}, {}), '()', False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((52, 14, 52, 86), 'bot.msg_utils.sendMessage', 'sendMessage', ({(52, 26, 52, 64): 'f"""<b>Cloning:</b> <code>{link}</code>"""', (52, 66, 52, 77): 'context.bot', (52, 79, 52, 85): 'update'}, {}), "(f'<b>Cloning:</b> <code>{link}</code>', context.bot, update)", False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((53, 23, 53, 36), 'bot.clone_status.CloneStatus', 'CloneStatus', ({}, {}), '()', False, 'from bot.clone_status import CloneStatus\n'), ((54, 13, 54, 57), 'bot.gDrive.GoogleDriveHelper', 'GoogleDriveHelper', (), '', False, 'from bot.gDrive import GoogleDriveHelper\n'), ((57, 8, 57, 39), 'bot.msg_utils.deleteMessage', 'deleteMessage', ({(57, 22, 57, 33): 'context.bot', (57, 35, 57, 38): 'msg'}, {}), '(context.bot, msg)', False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((59, 8, 59, 48), 'bot.msg_utils.sendMessage', 'sendMessage', ({(59, 20, 59, 26): 'result', (59, 28, 59, 39): 'context.bot', (59, 41, 59, 47): 'update'}, {}), '(result, context.bot, update)', False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((61, 8, 61, 87), 'bot.msg_utils.sendMessage', 'sendMessage', ({(61, 20, 61, 73): '"""Please Provide a Google Drive Shared Link to Clone."""', (61, 75, 61, 78): 'bot', (61, 80, 61, 86): 'update'}, {}), "('Please Provide a Google Drive Shared Link to Clone.', bot, update)", False, 'from bot.msg_utils import deleteMessage, sendMessage\n'), ((92, 8, 94, 55), 'bot.bot.send_document', 'bot.send_document', (), '', False, 'from bot import LOGGER, dispatcher, updater, bot\n'), ((77, 12, 77, 27), 'bot.LOGGER.error', 'LOGGER.error', ({(77, 25, 77, 26): 'e'}, {}), '(e)', False, 'from bot import LOGGER, dispatcher, updater, bot\n')] |
AnaSan27/pyfinlab | src/pyfinlab/risk_models.py | 509cc9544af5e1a5b2b642eca9ae02d383dd743c | import pandas as pd
import numpy as np
from portfoliolab.utils import RiskMetrics
from portfoliolab.estimators import RiskEstimators
from pypfopt import risk_models as risk_models_
"""
Available covariance risk models in PortfolioLab library.
https://hudson-and-thames-portfoliolab-pro.readthedocs-hosted.com/en/latest/estimators/risk_estimators.html
Available covariance risk models in PyPortfolioOpt library.
https://pyportfolioopt.readthedocs.io/en/latest/RiskModels.html#
These functions bring together all covariance matrix risk models from PortfolioLab and PyPortfolioOpt into one
function for ease of use.
"""
risk_met = RiskMetrics()
risk_estimators = RiskEstimators()
risk_models = [
# PyPortfolioOpt
'sample_cov',
'semicovariance',
'exp_cov',
'ledoit_wolf_constant_variance',
'ledoit_wolf_single_factor',
'ledoit_wolf_constant_correlation',
'oracle_approximating',
# PortfolioLab
'sample_covariance',
'minimum_covariance_determinant',
'empirical_covariance',
'shrinked_covariance_basic',
'shrinked_covariance_lw',
'shrinked_covariance_oas',
'semi_covariance',
'exponential_covariance',
'constant_residual_eigenvalues_denoised',
'constant_residual_spectral_denoised',
'targeted_shrinkage_denoised',
'targeted_shrinkage_detoned',
'constant_residual_detoned',
'hierarchical_filtered_complete',
'hierarchical_filtered_single',
'hierarchical_filtered_avg'
]
def risk_model(prices, model, kde_bwidth=0.01, basic_shrinkage=0.1):
"""
Calculates the covariance matrix for a dataframe of asset prices.
:param prices: (pd.DataFrame) Dataframe where each column is a series of prices for an asset.
:param model: (str) Risk model to use. Should be one of:
PyPortfolioOpt
- 'sample_cov',
- 'semicovariance',
- 'exp_cov',
- 'ledoit_wolf_constant_variance',
- 'ledoit_wolf_single_factor'
- 'ledoit_wolf_constant_correlation',
- 'oracle_approximating'
PortfolioLab
- 'sample_covariance',
- 'minimum_covariance_determinant',
- 'empirical_covariance',
- 'shrinked_covariance_basic',
- 'shrinked_covariance_lw',
- 'shrinked_covariance_oas',
- 'semi_covariance',
- 'exponential_covariance',
- 'constant_residual_eigenvalues_denoised',
- 'constant_residual_spectral_denoised',
- 'targeted_shrinkage_denoised',
- 'targeted_shrinkage_detoned',
- 'constant_residual_detoned',
- 'hierarchical_filtered_complete',
- 'hierarchical_filtered_single',
- 'hierarchical_filtered_avg'
:param kde_bwidth: (float) Optional, bandwidth of the kernel to fit KDE. (0.01 by default)
:param basic_shrinkage: (float) Optional, between 0 and 1. Coefficient in the convex combination for basic shrinkage.
(0.1 by default)
:return: (pd.DataFrame) Estimated covariance matrix.
"""
tn_relation = prices.shape[0] / prices.shape[1]
sample_cov = prices.pct_change().dropna().cov()
empirical_cov = pd.DataFrame(risk_estimators.empirical_covariance(prices, price_data=True),
index=sample_cov.index, columns=sample_cov.columns)
empirical_corr = pd.DataFrame(risk_estimators.cov_to_corr(empirical_cov ** 2),
index=sample_cov.index, columns=sample_cov.columns)
std = np.diag(empirical_cov) ** (1 / 2)
if model == 'sample_covariance':
return prices.pct_change().dropna().cov()
elif model == 'minimum_covariance_determinant':
covariance_matrix = risk_estimators.minimum_covariance_determinant(prices, price_data=True)
elif model == 'empirical_covariance':
covariance_matrix = risk_estimators.empirical_covariance(prices, price_data=True)
elif model == 'shrinked_covariance_basic':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='basic', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_lw':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='lw', basic_shrinkage=basic_shrinkage)
elif model == 'shrinked_covariance_oas':
covariance_matrix = risk_estimators.shrinked_covariance(
prices, price_data=True, shrinkage_type='oas', basic_shrinkage=basic_shrinkage)
elif model == 'semi_covariance':
covariance_matrix = risk_estimators.semi_covariance(prices, price_data=True, threshold_return=0)
elif model == 'exponential_covariance':
covariance_matrix = risk_estimators.exponential_covariance(prices, price_data=True, window_span=60)
elif model == 'constant_residual_eigenvalues_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=False, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_spectral_denoised':
covariance_matrix = risk_estimators.denoise_covariance(empirical_cov, tn_relation, denoise_method='spectral')
elif model == 'targeted_shrinkage_denoised':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=False, kde_bwidth=kde_bwidth)
elif model == 'targeted_shrinkage_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='target_shrink', detone=True, kde_bwidth=kde_bwidth)
elif model == 'constant_residual_detoned':
covariance_matrix = risk_estimators.denoise_covariance(
empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=True, market_component=1,
kde_bwidth=kde_bwidth)
elif model == 'hierarchical_filtered_complete':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='complete', draw_plot=False), std)
elif model == 'hierarchical_filtered_single':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='single', draw_plot=False), std)
elif model == 'hierarchical_filtered_avg':
covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical(
empirical_corr.to_numpy(), method='average', draw_plot=False), std)
elif model == 'sample_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.sample_cov(prices)) / 252
elif model == 'semicovariance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.semicovariance(prices)) / 252
elif model == 'exp_cov':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.exp_cov(prices, span=180)) / 252
elif model == 'ledoit_wolf_constant_variance':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_single_factor':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'ledoit_wolf_constant_correlation':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
elif model == 'oracle_approximating':
covariance_matrix = risk_models_.fix_nonpositive_semidefinite(
risk_models_.risk_matrix(prices, model)) / 252
else:
raise NameError('You must input a risk model. Check spelling. Case-Sensitive.')
if not isinstance(covariance_matrix, pd.DataFrame):
covariance_matrix = pd.DataFrame(covariance_matrix, index=sample_cov.index, columns=sample_cov.columns).round(6)
return covariance_matrix * 252
| [((17, 11, 17, 24), 'portfoliolab.utils.RiskMetrics', 'RiskMetrics', ({}, {}), '()', False, 'from portfoliolab.utils import RiskMetrics\n'), ((18, 18, 18, 34), 'portfoliolab.estimators.RiskEstimators', 'RiskEstimators', ({}, {}), '()', False, 'from portfoliolab.estimators import RiskEstimators\n'), ((95, 10, 95, 32), 'numpy.diag', 'np.diag', ({(95, 18, 95, 31): 'empirical_cov'}, {}), '(empirical_cov)', True, 'import numpy as np\n'), ((163, 28, 163, 111), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((141, 12, 141, 43), 'pypfopt.risk_models.sample_cov', 'risk_models_.sample_cov', ({(141, 36, 141, 42): 'prices'}, {}), '(prices)', True, 'from pypfopt import risk_models as risk_models_\n'), ((144, 12, 144, 47), 'pypfopt.risk_models.semicovariance', 'risk_models_.semicovariance', ({(144, 40, 144, 46): 'prices'}, {}), '(prices)', True, 'from pypfopt import risk_models as risk_models_\n'), ((147, 12, 147, 50), 'pypfopt.risk_models.exp_cov', 'risk_models_.exp_cov', (), '', True, 'from pypfopt import risk_models as risk_models_\n'), ((150, 12, 150, 51), 'pypfopt.risk_models.risk_matrix', 'risk_models_.risk_matrix', ({(150, 37, 150, 43): 'prices', (150, 45, 150, 50): 'model'}, {}), '(prices, model)', True, 'from pypfopt import risk_models as risk_models_\n'), ((153, 12, 153, 51), 'pypfopt.risk_models.risk_matrix', 'risk_models_.risk_matrix', ({(153, 37, 153, 43): 'prices', (153, 45, 153, 50): 'model'}, {}), '(prices, model)', True, 'from pypfopt import risk_models as risk_models_\n'), ((156, 12, 156, 51), 'pypfopt.risk_models.risk_matrix', 'risk_models_.risk_matrix', ({(156, 37, 156, 43): 'prices', (156, 45, 156, 50): 'model'}, {}), '(prices, model)', True, 'from pypfopt import risk_models as risk_models_\n'), ((159, 12, 159, 51), 'pypfopt.risk_models.risk_matrix', 'risk_models_.risk_matrix', ({(159, 37, 159, 43): 'prices', (159, 45, 159, 50): 'model'}, {}), '(prices, model)', True, 'from pypfopt import risk_models as risk_models_\n')] |
Soft-illusion/ComputerVision | gaussian_blur/gaussian_blur.py | 9afaa9eafef8ac47fdb1023c5332cff98626f1bd | import cv2 as cv
import sys
import numpy as np
import random as r
import os
from PIL import Image as im
def noisy(noise_typ,image):
if noise_typ == "gauss":
# Generate Gaussian noise
gauss = np.random.normal(0,1,image.size)
print(gauss)
gauss = gauss.reshape(image.shape[0],image.shape[1],image.shape[2]).astype('uint8')
# Add the Gaussian noise to the image
img_gauss = cv.add(image,gauss)
cv.imwrite("Noise.png", gauss)
return img_gauss
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
img = cv.imread(cv.samples.findFile("3.png"))
if img is None:
sys.exit("Could not read the image.")
else :
width , height , depth = img.shape
img_noisy = noisy("gauss",img)
for kernal_size in range (1,71,2):
print(kernal_size)
dst = cv.GaussianBlur(img_noisy,(kernal_size,kernal_size),0)
# print( cv.getGaussianKernel(kernal_size,0))
file_name = "gaussian_blur" + str(kernal_size) + ".png"
cv.imwrite(file_name, dst)
# dst = img_noisy
# for kernal_no in range (0,200):
# print(kernal_no)
# dst = cv.GaussianBlur(dst,(3,3),1)
# # print( cv.getGaussianKernel(kernal_size,3))
# file_name = "gaussian_blur" + str(kernal_no) + ".png"
# cv.imwrite(file_name, dst)
for kernal_size in range (1,71,2):
print(kernal_size)
dst = cv.bilateralFilter(img_noisy,kernal_size,300,300)
# print( cv.getGaussianKernel(kernal_size,0))
file_name = "bilateral_blur" + str(kernal_size) + ".png"
cv.imwrite(file_name, dst)
| [((48, 16, 48, 44), 'cv2.samples.findFile', 'cv.samples.findFile', ({(48, 36, 48, 43): '"""3.png"""'}, {}), "('3.png')", True, 'import cv2 as cv\n'), ((50, 4, 50, 41), 'sys.exit', 'sys.exit', ({(50, 13, 50, 40): '"""Could not read the image."""'}, {}), "('Could not read the image.')", False, 'import sys\n'), ((11, 16, 11, 48), 'numpy.random.normal', 'np.random.normal', ({(11, 33, 11, 34): '0', (11, 35, 11, 36): '1', (11, 37, 11, 47): 'image.size'}, {}), '(0, 1, image.size)', True, 'import numpy as np\n'), ((15, 20, 15, 39), 'cv2.add', 'cv.add', ({(15, 27, 15, 32): 'image', (15, 33, 15, 38): 'gauss'}, {}), '(image, gauss)', True, 'import cv2 as cv\n'), ((16, 8, 16, 38), 'cv2.imwrite', 'cv.imwrite', ({(16, 19, 16, 30): '"""Noise.png"""', (16, 32, 16, 37): 'gauss'}, {}), "('Noise.png', gauss)", True, 'import cv2 as cv\n'), ((58, 14, 58, 68), 'cv2.GaussianBlur', 'cv.GaussianBlur', ({(58, 30, 58, 39): 'img_noisy', (58, 40, 58, 65): '(kernal_size, kernal_size)', (58, 66, 58, 67): '0'}, {}), '(img_noisy, (kernal_size, kernal_size), 0)', True, 'import cv2 as cv\n'), ((61, 8, 61, 34), 'cv2.imwrite', 'cv.imwrite', ({(61, 19, 61, 28): 'file_name', (61, 30, 61, 33): 'dst'}, {}), '(file_name, dst)', True, 'import cv2 as cv\n'), ((73, 14, 73, 63), 'cv2.bilateralFilter', 'cv.bilateralFilter', ({(73, 33, 73, 42): 'img_noisy', (73, 43, 73, 54): 'kernal_size', (73, 55, 73, 58): '300', (73, 59, 73, 62): '300'}, {}), '(img_noisy, kernal_size, 300, 300)', True, 'import cv2 as cv\n'), ((76, 8, 76, 34), 'cv2.imwrite', 'cv.imwrite', ({(76, 19, 76, 28): 'file_name', (76, 30, 76, 33): 'dst'}, {}), '(file_name, dst)', True, 'import cv2 as cv\n'), ((23, 12, 23, 26), 'numpy.copy', 'np.copy', ({(23, 20, 23, 25): 'image'}, {}), '(image)', True, 'import numpy as np\n'), ((25, 17, 25, 54), 'numpy.ceil', 'np.ceil', ({(25, 25, 25, 53): 'amount * image.size * s_vs_p'}, {}), '(amount * image.size * s_vs_p)', True, 'import numpy as np\n'), ((31, 19, 31, 62), 'numpy.ceil', 'np.ceil', ({(31, 27, 31, 61): 'amount * image.size * (1.0 - s_vs_p)'}, {}), '(amount * image.size * (1.0 - s_vs_p))', True, 'import numpy as np\n'), ((37, 19, 37, 35), 'numpy.unique', 'np.unique', ({(37, 29, 37, 34): 'image'}, {}), '(image)', True, 'import numpy as np\n'), ((39, 16, 39, 47), 'numpy.random.poisson', 'np.random.poisson', ({(39, 34, 39, 46): '(image * vals)'}, {}), '(image * vals)', True, 'import numpy as np\n'), ((43, 16, 43, 43), 'numpy.random.randn', 'np.random.randn', ({(43, 32, 43, 35): 'row', (43, 36, 43, 39): 'col', (43, 40, 43, 42): 'ch'}, {}), '(row, col, ch)', True, 'import numpy as np\n'), ((38, 28, 38, 41), 'numpy.log2', 'np.log2', ({(38, 36, 38, 40): 'vals'}, {}), '(vals)', True, 'import numpy as np\n')] |
fossabot/CityWok-Manager | citywok_ms/employee/routes.py | ccd31eb684ddeec5c741c9520c779d98eb0e3cc6 | from citywok_ms.file.models import EmployeeFile, File
import citywok_ms.employee.messages as employee_msg
import citywok_ms.file.messages as file_msg
from citywok_ms.employee.forms import EmployeeForm
from citywok_ms.file.forms import FileForm
from flask import Blueprint, flash, redirect, render_template, url_for
from citywok_ms.employee.models import Employee
employee = Blueprint("employee", __name__, url_prefix="/employee")
@employee.route("/")
def index():
return render_template(
"employee/index.html",
title=employee_msg.INDEX_TITLE,
active_employees=Employee.get_active(),
suspended_employees=Employee.get_suspended(),
)
@employee.route("/new", methods=["GET", "POST"])
def new():
form = EmployeeForm()
if form.validate_on_submit():
employee = Employee.create_by_form(form)
flash(employee_msg.NEW_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.index"))
return render_template(
"employee/form.html", title=employee_msg.NEW_TITLE, form=form
)
@employee.route("/<int:employee_id>")
def detail(employee_id):
return render_template(
"employee/detail.html",
title=employee_msg.DETAIL_TITLE,
employee=Employee.get_or_404(employee_id),
file_form=FileForm(),
)
@employee.route("/<int:employee_id>/update", methods=["GET", "POST"])
def update(employee_id):
employee = Employee.get_or_404(employee_id)
form = EmployeeForm()
form.hide_id.data = employee_id
if form.validate_on_submit():
employee.update_by_form(form)
flash(employee_msg.UPDATE_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
form.process(obj=employee)
return render_template(
"employee/form.html",
employee=employee,
form=form,
title=employee_msg.UPDATE_TITLE,
)
@employee.route("/<int:employee_id>/suspend", methods=["POST"])
def suspend(employee_id):
employee = Employee.get_or_404(employee_id)
employee.suspend()
flash(employee_msg.SUSPEND_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
@employee.route("/<int:employee_id>/activate", methods=["POST"])
def activate(employee_id):
employee = Employee.get_or_404(employee_id)
employee.activate()
flash(employee_msg.ACTIVATE_SUCCESS.format(name=employee.full_name), "success")
return redirect(url_for("employee.detail", employee_id=employee_id))
@employee.route("/<int:employee_id>/upload", methods=["POST"])
def upload(employee_id):
form = FileForm()
file = form.file.data
if form.validate_on_submit():
db_file = EmployeeFile.create_by_form(form, Employee.get_or_404(employee_id))
flash(file_msg.UPLOAD_SUCCESS.format(name=db_file.full_name), "success")
elif file is not None:
flash(
file_msg.INVALID_FORMAT.format(format=File.split_file_format(file)),
"danger",
)
else:
flash(file_msg.NO_FILE, "danger")
return redirect(url_for("employee.detail", employee_id=employee_id))
| [((9, 11, 9, 66), 'flask.Blueprint', 'Blueprint', (), '', False, 'from flask import Blueprint, flash, redirect, render_template, url_for\n'), ((24, 11, 24, 25), 'citywok_ms.employee.forms.EmployeeForm', 'EmployeeForm', ({}, {}), '()', False, 'from citywok_ms.employee.forms import EmployeeForm\n'), ((29, 11, 31, 5), 'flask.render_template', 'render_template', (), '', False, 'from flask import Blueprint, flash, redirect, render_template, url_for\n'), ((46, 15, 46, 47), 'citywok_ms.employee.models.Employee.get_or_404', 'Employee.get_or_404', ({(46, 35, 46, 46): 'employee_id'}, {}), '(employee_id)', False, 'from citywok_ms.employee.models import Employee\n'), ((47, 11, 47, 25), 'citywok_ms.employee.forms.EmployeeForm', 'EmployeeForm', ({}, {}), '()', False, 'from citywok_ms.employee.forms import EmployeeForm\n'), ((56, 11, 61, 5), 'flask.render_template', 'render_template', (), '', False, 'from flask import Blueprint, flash, redirect, render_template, url_for\n'), ((66, 15, 66, 47), 'citywok_ms.employee.models.Employee.get_or_404', 'Employee.get_or_404', ({(66, 35, 66, 46): 'employee_id'}, {}), '(employee_id)', False, 'from citywok_ms.employee.models import Employee\n'), ((74, 15, 74, 47), 'citywok_ms.employee.models.Employee.get_or_404', 'Employee.get_or_404', ({(74, 35, 74, 46): 'employee_id'}, {}), '(employee_id)', False, 'from citywok_ms.employee.models import Employee\n'), ((82, 11, 82, 21), 'citywok_ms.file.forms.FileForm', 'FileForm', ({}, {}), '()', False, 'from citywok_ms.file.forms import FileForm\n'), ((26, 19, 26, 48), 'citywok_ms.employee.models.Employee.create_by_form', 'Employee.create_by_form', ({(26, 43, 26, 47): 'form'}, {}), '(form)', False, 'from citywok_ms.employee.models import Employee\n'), ((68, 10, 68, 70), 'citywok_ms.employee.messages.SUSPEND_SUCCESS.format', 'employee_msg.SUSPEND_SUCCESS.format', (), '', True, 'import citywok_ms.employee.messages as employee_msg\n'), ((69, 20, 69, 71), 'flask.url_for', 'url_for', (), '', False, 'from flask import Blueprint, flash, redirect, render_template, url_for\n'), ((76, 10, 76, 71), 'citywok_ms.employee.messages.ACTIVATE_SUCCESS.format', 'employee_msg.ACTIVATE_SUCCESS.format', (), '', True, 'import citywok_ms.employee.messages as employee_msg\n'), ((77, 20, 77, 71), 'flask.url_for', 'url_for', (), '', False, 'from flask import Blueprint, flash, redirect, render_template, url_for\n'), ((94, 20, 94, 71), 'flask.url_for', 'url_for', (), '', False, 'from flask import Blueprint, flash, redirect, render_template, url_for\n'), ((17, 25, 17, 46), 'citywok_ms.employee.models.Employee.get_active', 'Employee.get_active', ({}, {}), '()', False, 'from citywok_ms.employee.models import Employee\n'), ((18, 28, 18, 52), 'citywok_ms.employee.models.Employee.get_suspended', 'Employee.get_suspended', ({}, {}), '()', False, 'from citywok_ms.employee.models import Employee\n'), ((27, 14, 27, 70), 'citywok_ms.employee.messages.NEW_SUCCESS.format', 'employee_msg.NEW_SUCCESS.format', (), '', True, 'import citywok_ms.employee.messages as employee_msg\n'), ((28, 24, 28, 49), 'flask.url_for', 'url_for', ({(28, 32, 28, 48): '"""employee.index"""'}, {}), "('employee.index')", False, 'from flask import Blueprint, flash, redirect, render_template, url_for\n'), ((39, 17, 39, 49), 'citywok_ms.employee.models.Employee.get_or_404', 'Employee.get_or_404', ({(39, 37, 39, 48): 'employee_id'}, {}), '(employee_id)', False, 'from citywok_ms.employee.models import Employee\n'), ((40, 18, 40, 28), 'citywok_ms.file.forms.FileForm', 'FileForm', ({}, {}), '()', False, 'from citywok_ms.file.forms import FileForm\n'), ((51, 14, 51, 73), 'citywok_ms.employee.messages.UPDATE_SUCCESS.format', 'employee_msg.UPDATE_SUCCESS.format', (), '', True, 'import citywok_ms.employee.messages as employee_msg\n'), ((52, 24, 52, 75), 'flask.url_for', 'url_for', (), '', False, 'from flask import Blueprint, flash, redirect, render_template, url_for\n'), ((85, 52, 85, 84), 'citywok_ms.employee.models.Employee.get_or_404', 'Employee.get_or_404', ({(85, 72, 85, 83): 'employee_id'}, {}), '(employee_id)', False, 'from citywok_ms.employee.models import Employee\n'), ((86, 14, 86, 68), 'citywok_ms.file.messages.UPLOAD_SUCCESS.format', 'file_msg.UPLOAD_SUCCESS.format', (), '', True, 'import citywok_ms.file.messages as file_msg\n'), ((93, 8, 93, 41), 'flask.flash', 'flash', ({(93, 14, 93, 30): 'file_msg.NO_FILE', (93, 32, 93, 40): '"""danger"""'}, {}), "(file_msg.NO_FILE, 'danger')", False, 'from flask import Blueprint, flash, redirect, render_template, url_for\n'), ((89, 50, 89, 78), 'citywok_ms.file.models.File.split_file_format', 'File.split_file_format', ({(89, 73, 89, 77): 'file'}, {}), '(file)', False, 'from citywok_ms.file.models import EmployeeFile, File\n')] |
safwanrahman/Ford | kitsune/customercare/cron.py | 87e91dea1cc22b1759eea81cef069359ccb5cd0b | import calendar
from datetime import datetime, timedelta
import json
import logging
import re
import rfc822
from django.conf import settings
from django.db.utils import IntegrityError
import cronjobs
from multidb.pinning import pin_this_thread
from statsd import statsd
from twython import Twython
from kitsune.customercare.models import Tweet, TwitterAccount, Reply
from kitsune.sumo.redis_utils import redis_client, RedisError
from kitsune.sumo.utils import chunked
LINK_REGEX = re.compile('https?\:', re.IGNORECASE)
RT_REGEX = re.compile('^rt\W', re.IGNORECASE)
ALLOWED_USERS = [
{'id': 2142731, 'username': 'Firefox'},
{'id': 150793437, 'username': 'FirefoxBrasil'},
{'id': 107272435, 'username': 'firefox_es'},
]
log = logging.getLogger('k.twitter')
def get_word_blacklist_regex():
"""
Make a regex that looks kind of like r'\b(foo|bar|baz)\b'.
This is a function so that it isn't calculated at import time,
and so can be tested more easily.
This doesn't use raw strings (r'') because the "mismatched" parens
were confusing my syntax highlighter, which was confusing me.
"""
return re.compile(
'\\b(' +
'|'.join(map(re.escape, settings.CC_WORD_BLACKLIST)) +
')\\b')
@cronjobs.register
def collect_tweets():
# Don't (ab)use the twitter API from dev and stage.
if settings.STAGE:
return
"""Collect new tweets about Firefox."""
with statsd.timer('customercare.tweets.time_elapsed'):
t = Twython(settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN,
settings.TWITTER_ACCESS_TOKEN_SECRET)
search_options = {
'q': ('firefox OR #fxinput OR @firefoxbrasil OR #firefoxos '
'OR @firefox_es'),
'count': settings.CC_TWEETS_PERPAGE, # Items per page.
'result_type': 'recent', # Retrieve tweets by date.
}
# If we already have some tweets, collect nothing older than what we
# have.
try:
latest_tweet = Tweet.latest()
except Tweet.DoesNotExist:
log.debug('No existing tweets. Retrieving %d tweets from search.' %
settings.CC_TWEETS_PERPAGE)
else:
search_options['since_id'] = latest_tweet.tweet_id
log.info('Retrieving tweets with id >= %s' % latest_tweet.tweet_id)
# Retrieve Tweets
results = t.search(**search_options)
if len(results['statuses']) == 0:
# Twitter returned 0 results.
return
# Drop tweets into DB
for item in results['statuses']:
# Apply filters to tweet before saving
# Allow links in #fxinput tweets
statsd.incr('customercare.tweet.collected')
item = _filter_tweet(item,
allow_links='#fxinput' in item['text'])
if not item:
continue
created_date = datetime.utcfromtimestamp(calendar.timegm(
rfc822.parsedate(item['created_at'])))
item_lang = item['metadata'].get('iso_language_code', 'en')
tweet = Tweet(tweet_id=item['id'], raw_json=json.dumps(item),
locale=item_lang, created=created_date)
try:
tweet.save()
statsd.incr('customercare.tweet.saved')
except IntegrityError:
pass
@cronjobs.register
def purge_tweets():
"""Periodically purge old tweets for each locale.
This does a lot of DELETEs on master, so it shouldn't run too frequently.
Probably once every hour or more.
"""
# Pin to master
pin_this_thread()
# Build list of tweets to delete, by id.
for locale in settings.SUMO_LANGUAGES:
locale = settings.LOCALES[locale].iso639_1
# Some locales don't have an iso639_1 code, too bad for them.
if not locale:
continue
oldest = _get_oldest_tweet(locale, settings.CC_MAX_TWEETS)
if oldest:
log.debug('Truncating tweet list: Removing tweets older than %s, '
'for [%s].' % (oldest.created, locale))
Tweet.objects.filter(locale=locale,
created__lte=oldest.created).delete()
def _get_oldest_tweet(locale, n=0):
"""Returns the nth oldest tweet per locale, defaults to newest."""
try:
return Tweet.objects.filter(locale=locale).order_by(
'-created')[n]
except IndexError:
return None
def _filter_tweet(item, allow_links=False):
"""
Apply some filters to an incoming tweet.
May modify tweet. If None is returned, tweet will be discarded.
Used to exclude replies and such from incoming tweets.
"""
text = item['text'].lower()
# No replies, except to ALLOWED_USERS
allowed_user_ids = [u['id'] for u in ALLOWED_USERS]
to_user_id = item.get('to_user_id')
if to_user_id and to_user_id not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No mentions, except of ALLOWED_USERS
for user in item['entities']['user_mentions']:
if user['id'] not in allowed_user_ids:
statsd.incr('customercare.tweet.rejected.reply_or_mention')
return None
# No retweets
if RT_REGEX.search(text) or text.find('(via ') > -1:
statsd.incr('customercare.tweet.rejected.retweet')
return None
# No links
if not allow_links and LINK_REGEX.search(text):
statsd.incr('customercare.tweet.rejected.link')
return None
screen_name = item['user']['screen_name']
# Django's caching system will save us here.
IGNORED_USERS = set(
TwitterAccount.objects
.filter(ignored=True)
.values_list('username', flat=True)
)
# Exclude filtered users
if screen_name in IGNORED_USERS:
statsd.incr('customercare.tweet.rejected.user')
return None
# Exlude users with firefox in the handle
if 'firefox' in screen_name.lower():
statsd.incr('customercare.tweet.rejected.firefox_in_handle')
return None
# Exclude problem words
match = get_word_blacklist_regex().search(text)
if match:
bad_word = match.group(1)
statsd.incr('customercare.tweet.rejected.blacklist_word.' + bad_word)
return None
return item
@cronjobs.register
def get_customercare_stats():
"""
Generate customer care stats from the Replies table.
This gets cached in Redis as a sorted list of contributors, stored as JSON.
Example Top Contributor data:
[
{
'twitter_username': 'username1',
'avatar': 'http://twitter.com/path/to/the/avatar.png',
'avatar_https': 'https://twitter.com/path/to/the/avatar.png',
'all': 5211,
'1m': 230,
'1w': 33,
'1d': 3,
},
{ ... },
{ ... },
]
"""
if settings.STAGE:
return
contributor_stats = {}
now = datetime.now()
one_month_ago = now - timedelta(days=30)
one_week_ago = now - timedelta(days=7)
yesterday = now - timedelta(days=1)
for chunk in chunked(Reply.objects.all(), 2500, Reply.objects.count()):
for reply in chunk:
user = reply.twitter_username
if user not in contributor_stats:
raw = json.loads(reply.raw_json)
if 'from_user' in raw: # For tweets collected using v1 API
user_data = raw
else:
user_data = raw['user']
contributor_stats[user] = {
'twitter_username': user,
'avatar': user_data['profile_image_url'],
'avatar_https': user_data['profile_image_url_https'],
'all': 0, '1m': 0, '1w': 0, '1d': 0,
}
contributor = contributor_stats[reply.twitter_username]
contributor['all'] += 1
if reply.created > one_month_ago:
contributor['1m'] += 1
if reply.created > one_week_ago:
contributor['1w'] += 1
if reply.created > yesterday:
contributor['1d'] += 1
sort_key = settings.CC_TOP_CONTRIB_SORT
limit = settings.CC_TOP_CONTRIB_LIMIT
# Sort by whatever is in settings, break ties with 'all'
contributor_stats = sorted(contributor_stats.values(),
key=lambda c: (c[sort_key], c['all']),
reverse=True)[:limit]
try:
redis = redis_client(name='default')
key = settings.CC_TOP_CONTRIB_CACHE_KEY
redis.set(key, json.dumps(contributor_stats))
except RedisError as e:
statsd.incr('redis.error')
log.error('Redis error: %s' % e)
return contributor_stats
| [((21, 13, 21, 50), 're.compile', 're.compile', ({(21, 24, 21, 34): '"""https?\\\\:"""', (21, 36, 21, 49): 're.IGNORECASE'}, {}), "('https?\\\\:', re.IGNORECASE)", False, 'import re\n'), ((22, 11, 22, 45), 're.compile', 're.compile', ({(22, 22, 22, 29): '"""^rt\\\\W"""', (22, 31, 22, 44): 're.IGNORECASE'}, {}), "('^rt\\\\W', re.IGNORECASE)", False, 'import re\n'), ((30, 6, 30, 36), 'logging.getLogger', 'logging.getLogger', ({(30, 24, 30, 35): '"""k.twitter"""'}, {}), "('k.twitter')", False, 'import logging\n'), ((120, 4, 120, 21), 'multidb.pinning.pin_this_thread', 'pin_this_thread', ({}, {}), '()', False, 'from multidb.pinning import pin_this_thread\n'), ((233, 10, 233, 24), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((56, 9, 56, 57), 'statsd.statsd.timer', 'statsd.timer', ({(56, 22, 56, 56): '"""customercare.tweets.time_elapsed"""'}, {}), "('customercare.tweets.time_elapsed')", False, 'from statsd import statsd\n'), ((57, 12, 60, 57), 'twython.Twython', 'Twython', ({(57, 20, 57, 49): 'settings.TWITTER_CONSUMER_KEY', (58, 20, 58, 52): 'settings.TWITTER_CONSUMER_SECRET', (59, 20, 59, 49): 'settings.TWITTER_ACCESS_TOKEN', (60, 20, 60, 56): 'settings.TWITTER_ACCESS_TOKEN_SECRET'}, {}), '(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET,\n settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET)', False, 'from twython import Twython\n'), ((157, 8, 157, 67), 'statsd.statsd.incr', 'statsd.incr', ({(157, 20, 157, 66): '"""customercare.tweet.rejected.reply_or_mention"""'}, {}), "('customercare.tweet.rejected.reply_or_mention')", False, 'from statsd import statsd\n'), ((168, 8, 168, 58), 'statsd.statsd.incr', 'statsd.incr', ({(168, 20, 168, 57): '"""customercare.tweet.rejected.retweet"""'}, {}), "('customercare.tweet.rejected.retweet')", False, 'from statsd import statsd\n'), ((173, 8, 173, 55), 'statsd.statsd.incr', 'statsd.incr', ({(173, 20, 173, 54): '"""customercare.tweet.rejected.link"""'}, {}), "('customercare.tweet.rejected.link')", False, 'from statsd import statsd\n'), ((187, 8, 187, 55), 'statsd.statsd.incr', 'statsd.incr', ({(187, 20, 187, 54): '"""customercare.tweet.rejected.user"""'}, {}), "('customercare.tweet.rejected.user')", False, 'from statsd import statsd\n'), ((192, 8, 192, 68), 'statsd.statsd.incr', 'statsd.incr', ({(192, 20, 192, 67): '"""customercare.tweet.rejected.firefox_in_handle"""'}, {}), "('customercare.tweet.rejected.firefox_in_handle')", False, 'from statsd import statsd\n'), ((199, 8, 199, 77), 'statsd.statsd.incr', 'statsd.incr', ({(199, 20, 199, 76): "('customercare.tweet.rejected.blacklist_word.' + bad_word)"}, {}), "('customercare.tweet.rejected.blacklist_word.' + bad_word)", False, 'from statsd import statsd\n'), ((234, 26, 234, 44), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((235, 25, 235, 42), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((236, 22, 236, 39), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((238, 25, 238, 44), 'kitsune.customercare.models.Reply.objects.all', 'Reply.objects.all', ({}, {}), '()', False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((238, 52, 238, 73), 'kitsune.customercare.models.Reply.objects.count', 'Reply.objects.count', ({}, {}), '()', False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((272, 16, 272, 44), 'kitsune.sumo.redis_utils.redis_client', 'redis_client', (), '', False, 'from kitsune.sumo.redis_utils import redis_client, RedisError\n'), ((72, 27, 72, 41), 'kitsune.customercare.models.Tweet.latest', 'Tweet.latest', ({}, {}), '()', False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((91, 12, 91, 55), 'statsd.statsd.incr', 'statsd.incr', ({(91, 24, 91, 54): '"""customercare.tweet.collected"""'}, {}), "('customercare.tweet.collected')", False, 'from statsd import statsd\n'), ((163, 12, 163, 71), 'statsd.statsd.incr', 'statsd.incr', ({(163, 24, 163, 70): '"""customercare.tweet.rejected.reply_or_mention"""'}, {}), "('customercare.tweet.rejected.reply_or_mention')", False, 'from statsd import statsd\n'), ((274, 23, 274, 52), 'json.dumps', 'json.dumps', ({(274, 34, 274, 51): 'contributor_stats'}, {}), '(contributor_stats)', False, 'import json\n'), ((276, 8, 276, 34), 'statsd.statsd.incr', 'statsd.incr', ({(276, 20, 276, 33): '"""redis.error"""'}, {}), "('redis.error')", False, 'from statsd import statsd\n'), ((106, 16, 106, 55), 'statsd.statsd.incr', 'statsd.incr', ({(106, 28, 106, 54): '"""customercare.tweet.saved"""'}, {}), "('customercare.tweet.saved')", False, 'from statsd import statsd\n'), ((180, 8, 181, 29), 'kitsune.customercare.models.TwitterAccount.objects.filter', 'TwitterAccount.objects.filter', (), '', False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((242, 22, 242, 48), 'json.loads', 'json.loads', ({(242, 33, 242, 47): 'reply.raw_json'}, {}), '(reply.raw_json)', False, 'import json\n'), ((98, 16, 98, 52), 'rfc822.parsedate', 'rfc822.parsedate', ({(98, 33, 98, 51): "item['created_at']"}, {}), "(item['created_at'])", False, 'import rfc822\n'), ((102, 56, 102, 72), 'json.dumps', 'json.dumps', ({(102, 67, 102, 71): 'item'}, {}), '(item)', False, 'import json\n'), ((132, 12, 133, 61), 'kitsune.customercare.models.Tweet.objects.filter', 'Tweet.objects.filter', (), '', False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n'), ((139, 15, 139, 50), 'kitsune.customercare.models.Tweet.objects.filter', 'Tweet.objects.filter', (), '', False, 'from kitsune.customercare.models import Tweet, TwitterAccount, Reply\n')] |
nrcmedia/pdfrw | setup.py | 2a3c9caded906b7ca71f1a338673a24f90eb0e5c | #!/usr/bin/env python
from distutils.core import setup
try:
import setuptools
except:
pass
setup(
name='pdfrw',
version='0.1',
description='PDF file reader/writer library',
long_description='''
pdfrw lets you read and write PDF files, including
compositing multiple pages together (e.g. to do watermarking,
or to copy an image or diagram from one PDF to another),
and can output by itself, or in conjunction with reportlab.
pdfrw will faithfully reproduce vector formats without
rasterization, so the rst2pdf package has used pdfrw
by default for PDF and SVG images by default since
March 2010. Several small examples are provided.
''',
author='Patrick Maupin',
author_email='[email protected]',
platforms='Independent',
url='http://code.google.com/p/pdfrw/',
packages=['pdfrw', 'pdfrw.objects'],
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'
],
keywords='pdf vector graphics',
)
| [((9, 0, 42, 1), 'distutils.core.setup', 'setup', (), '', False, 'from distutils.core import setup\n')] |
ZZIDZZ/pytorch-ssd | checkAnnotation.py | 8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9 | import argparse
import sys
import cv2
import os
import os.path as osp
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Training With Pytorch')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--root', help='Dataset root directory path')
args = parser.parse_args()
CLASSES = ( # always index 0
'helmet', 'vest', 'no_helmet')
annopath = osp.join('%s', 'Annotations', '%s.{}'.format("xml"))
imgpath = osp.join('%s', 'JPEGImages', '%s.{}'.format("jpg"))
def vocChecker(image_id, width, height, keep_difficult = False):
target = ET.parse(annopath % image_id).getroot()
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = float(cur_pt) / width if i % 2 == 0 else float(cur_pt) / height
bndbox.append(cur_pt)
print(name)
label_idx = dict(zip(CLASSES, range(len(CLASSES))))[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
print(res)
try :
print(np.array(res)[:,4])
print(np.array(res)[:,:4])
except IndexError:
print("\nINDEX ERROR HERE !\n")
exit(0)
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
if __name__ == '__main__' :
i = 0
for name in sorted(os.listdir(osp.join(args.root,'Annotations'))):
# as we have only one annotations file per image
i += 1
img = cv2.imread(imgpath % (args.root,name.split('.')[0]))
height, width, channels = img.shape
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("path : {}".format(annopath % (args.root,name.split('.')[0])))
res = vocChecker((args.root, name.split('.')[0]), height, width)
print("Total of annotations : {}".format(i)) | [((15, 12, 16, 78), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((30, 15, 30, 44), 'xml.etree.ElementTree.parse', 'ET.parse', ({(30, 24, 30, 43): 'annopath % image_id'}, {}), '(annopath % image_id)', True, 'import xml.etree.ElementTree as ET\n'), ((72, 34, 72, 67), 'os.path.join', 'osp.join', ({(72, 43, 72, 52): 'args.root', (72, 53, 72, 66): '"""Annotations"""'}, {}), "(args.root, 'Annotations')", True, 'import os.path as osp\n'), ((61, 14, 61, 27), 'numpy.array', 'np.array', ({(61, 23, 61, 26): 'res'}, {}), '(res)', True, 'import numpy as np\n'), ((62, 14, 62, 27), 'numpy.array', 'np.array', ({(62, 23, 62, 26): 'res'}, {}), '(res)', True, 'import numpy as np\n')] |
LaudateCorpus1/oci-python-sdk | src/oci/identity_data_plane/models/password_reset_authentication_request.py | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class PasswordResetAuthenticationRequest(object):
"""
PasswordResetAuthenticationRequest model.
"""
def __init__(self, **kwargs):
"""
Initializes a new PasswordResetAuthenticationRequest object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param user_id:
The value to assign to the user_id property of this PasswordResetAuthenticationRequest.
:type user_id: str
:param password_reset_token:
The value to assign to the password_reset_token property of this PasswordResetAuthenticationRequest.
:type password_reset_token: str
"""
self.swagger_types = {
'user_id': 'str',
'password_reset_token': 'str'
}
self.attribute_map = {
'user_id': 'userId',
'password_reset_token': 'passwordResetToken'
}
self._user_id = None
self._password_reset_token = None
@property
def user_id(self):
"""
**[Required]** Gets the user_id of this PasswordResetAuthenticationRequest.
The id of the user
:return: The user_id of this PasswordResetAuthenticationRequest.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this PasswordResetAuthenticationRequest.
The id of the user
:param user_id: The user_id of this PasswordResetAuthenticationRequest.
:type: str
"""
self._user_id = user_id
@property
def password_reset_token(self):
"""
**[Required]** Gets the password_reset_token of this PasswordResetAuthenticationRequest.
The password reset token
:return: The password_reset_token of this PasswordResetAuthenticationRequest.
:rtype: str
"""
return self._password_reset_token
@password_reset_token.setter
def password_reset_token(self, password_reset_token):
"""
Sets the password_reset_token of this PasswordResetAuthenticationRequest.
The password reset token
:param password_reset_token: The password_reset_token of this PasswordResetAuthenticationRequest.
:type: str
"""
self._password_reset_token = password_reset_token
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [((92, 15, 92, 40), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', ({(92, 35, 92, 39): 'self'}, {}), '(self)', False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')] |
vchiapaikeo/prophet | venv/lib/python3.7/site-packages/convertdate/dublin.py | e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7 | # -*- coding: utf-8 -*-
# This file is part of convertdate.
# http://github.com/fitnr/convertdate
# Licensed under the MIT license:
# http://opensource.org/licenses/MIT
# Copyright (c) 2016, fitnr <fitnr@fakeisthenewreal>
'''Convert to and from the Dublin day count'''
from . import daycount
EPOCH = 2415020 # Julian Day Count for Dublin Count 0
_dublin = daycount.DayCount(EPOCH)
to_gregorian = _dublin.to_gregorian
from_gregorian = _dublin.from_gregorian
to_jd = _dublin.to_jd
from_jd = _dublin.from_jd
from_julian = _dublin.from_julian
to_julian = _dublin.to_julian
to_datetime = _dublin.to_datetime
from_datetime = _dublin.from_datetime
| [] |
roscisz/TensorHive | tests/functional/controllers/test_group_controller_superuser.py | 4a680f47a0ee1ce366dc82ad9964e229d9749c4e | from tensorhive.models.Group import Group
from fixtures.controllers import API_URI as BASE_URI, HEADERS
from http import HTTPStatus
from importlib import reload
import json
import auth_patcher
ENDPOINT = BASE_URI + '/groups'
def setup_module(_):
auth_patches = auth_patcher.get_patches(superuser=True)
for auth_patch in auth_patches:
auth_patch.start()
for module in auth_patcher.CONTROLLER_MODULES:
reload(module)
for auth_patch in auth_patches:
auth_patch.stop()
# POST /groups
def test_create_group(tables, client):
group_name = 'TestGroup'
data = {'name': group_name}
resp = client.post(ENDPOINT, headers=HEADERS, data=json.dumps(data))
resp_json = json.loads(resp.data.decode('utf-8'))
assert resp.status_code == HTTPStatus.CREATED
assert resp_json['group']['id'] is not None
assert resp_json['group']['name'] == group_name
assert Group.get(int(resp_json['group']['id'])) is not None
# PUT /groups/{id}
def test_update_group(tables, client, new_group):
new_group.save()
new_group_name = new_group.name + '111'
resp = client.put(ENDPOINT + '/' + str(new_group.id), headers=HEADERS, data=json.dumps({'name': new_group_name}))
resp_json = json.loads(resp.data.decode('utf-8'))
assert resp.status_code == HTTPStatus.OK
assert resp_json['group']['name'] == new_group_name
assert Group.get(new_group.id).name == new_group_name
# PUT /groups/{id} - nonexistent id
def test_update_group_that_doesnt_exist(tables, client):
non_existent_id = '777'
resp = client.put(ENDPOINT + '/' + non_existent_id, headers=HEADERS, data=json.dumps({'name': 'test'}))
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}
def test_delete_group(tables, client, new_group):
new_group.save()
resp = client.delete(ENDPOINT + '/' + str(new_group.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
# Let's get all groups to verify
resp = client.get(ENDPOINT, headers=HEADERS)
resp_json = json.loads(resp.data.decode('utf-8'))
assert len(resp_json) == 0
# DELETE /groups/{id} - nonexistent id
def test_delete_group_that_doesnt_exist(tables, client):
non_existent_id = '777'
resp = client.delete(ENDPOINT + '/' + non_existent_id, headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}/users/{id}
def test_add_user_to_a_group(tables, client, new_group, new_user):
new_group.save()
new_user.save()
resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert new_group in new_user.groups
assert new_user in new_group.users
# DELETE /groups/{id}/users/{id}
def test_remove_user_from_a_group(tables, client, new_group_with_member):
new_group_with_member.save()
user = new_group_with_member.users[0]
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group_with_member.id, user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert new_group_with_member not in user.groups
assert user not in new_group_with_member.users
# PUT /groups/{id}/users/{id} - nonexistent user id
def test_add_nonexistent_user_to_a_group(tables, client, new_group):
new_group.save()
nonexistent_user_id = '777'
resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}/users/{id} - nonexistent group id
def test_add_user_to_nonexistent_group(tables, client, new_user):
new_user.save()
nonexistent_group_id = '777'
resp = client.put(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}/users/{id} - nonexistent user id
def test_remove_nonexistent_user_from_a_group(tables, client, new_group):
new_group.save()
nonexistent_user_id = '777'
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# DELETE /groups/{id}/users/{id} - nonexistent group id
def test_remove_user_from_a_nonexistent_group(tables, client, new_user):
new_user.save()
nonexistent_group_id = '777'
resp = client.delete(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS)
assert resp.status_code == HTTPStatus.NOT_FOUND
# PUT /groups/{id}
def test_set_group_as_a_default(tables, client, new_group):
new_group.save()
resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': True}), headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert Group.get(new_group.id).is_default
# PUT /groups/{id}
def test_mark_default_group_as_non_default(tables, client, new_group):
new_group.is_default = True
new_group.save()
resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': False}),
headers=HEADERS)
assert resp.status_code == HTTPStatus.OK
assert Group.get(new_group.id).is_default is False
| [((13, 19, 13, 59), 'auth_patcher.get_patches', 'auth_patcher.get_patches', (), '', False, 'import auth_patcher\n'), ((17, 8, 17, 22), 'importlib.reload', 'reload', ({(17, 15, 17, 21): 'module'}, {}), '(module)', False, 'from importlib import reload\n'), ((151, 11, 151, 34), 'tensorhive.models.Group.Group.get', 'Group.get', ({(151, 21, 151, 33): 'new_group.id'}, {}), '(new_group.id)', False, 'from tensorhive.models.Group import Group\n'), ((27, 55, 27, 71), 'json.dumps', 'json.dumps', ({(27, 66, 27, 70): 'data'}, {}), '(data)', False, 'import json\n'), ((41, 80, 41, 116), 'json.dumps', 'json.dumps', ({(41, 91, 41, 115): "{'name': new_group_name}"}, {}), "({'name': new_group_name})", False, 'import json\n'), ((46, 11, 46, 34), 'tensorhive.models.Group.Group.get', 'Group.get', ({(46, 21, 46, 33): 'new_group.id'}, {}), '(new_group.id)', False, 'from tensorhive.models.Group import Group\n'), ((52, 78, 52, 106), 'json.dumps', 'json.dumps', ({(52, 89, 52, 105): "{'name': 'test'}"}, {}), "({'name': 'test'})", False, 'import json\n'), ((148, 66, 148, 97), 'json.dumps', 'json.dumps', ({(148, 77, 148, 96): "{'isDefault': True}"}, {}), "({'isDefault': True})", False, 'import json\n'), ((159, 66, 159, 98), 'json.dumps', 'json.dumps', ({(159, 77, 159, 97): "{'isDefault': False}"}, {}), "({'isDefault': False})", False, 'import json\n'), ((163, 11, 163, 34), 'tensorhive.models.Group.Group.get', 'Group.get', ({(163, 21, 163, 33): 'new_group.id'}, {}), '(new_group.id)', False, 'from tensorhive.models.Group import Group\n')] |
midas-research/text2facegan | code/generate_thought_vectors.py | 3770333f16234fc9328d8254d1c1112fad15a16c | import os
from os.path import join, isfile
import re
import numpy as np
import pickle
import argparse
import skipthoughts
import h5py
def main():
parser = argparse.ArgumentParser()
#parser.add_argument('--caption_file', type=str, default='Data/sample_captions.txt',
# help='caption file')
parser.add_argument('--caption_file', type=str, default='/media/ssd_working_space/osaid/Data/sample_captions.txt',
help='caption file')
#parser.add_argument('--data_dir', type=str, default='Data',
# help='Data Directory')
parser.add_argument('--data_dir', type=str, default='/media/ssd_working_space/osaid/Data',
help='Data Directory')
args = parser.parse_args()
with open( args.caption_file ) as f:
captions = f.read().split('\n')
captions = [cap for cap in captions if len(cap) > 0]
print(captions)
model = skipthoughts.load_model()
caption_vectors = skipthoughts.encode(model, captions)
if os.path.isfile(join(args.data_dir, 'sample_caption_vectors.hdf5')):
os.remove(join(args.data_dir, 'sample_caption_vectors.hdf5'))
h = h5py.File(join(args.data_dir, 'sample_caption_vectors.hdf5'))
h.create_dataset('vectors', data=caption_vectors)
h.close()
if __name__ == '__main__':
main() | [((11, 10, 11, 35), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((30, 9, 30, 34), 'skipthoughts.load_model', 'skipthoughts.load_model', ({}, {}), '()', False, 'import skipthoughts\n'), ((31, 19, 31, 55), 'skipthoughts.encode', 'skipthoughts.encode', ({(31, 39, 31, 44): 'model', (31, 46, 31, 54): 'captions'}, {}), '(model, captions)', False, 'import skipthoughts\n'), ((33, 19, 33, 69), 'os.path.join', 'join', ({(33, 24, 33, 37): 'args.data_dir', (33, 39, 33, 68): '"""sample_caption_vectors.hdf5"""'}, {}), "(args.data_dir, 'sample_caption_vectors.hdf5')", False, 'from os.path import join, isfile\n'), ((35, 15, 35, 65), 'os.path.join', 'join', ({(35, 20, 35, 33): 'args.data_dir', (35, 35, 35, 64): '"""sample_caption_vectors.hdf5"""'}, {}), "(args.data_dir, 'sample_caption_vectors.hdf5')", False, 'from os.path import join, isfile\n'), ((34, 12, 34, 62), 'os.path.join', 'join', ({(34, 17, 34, 30): 'args.data_dir', (34, 32, 34, 61): '"""sample_caption_vectors.hdf5"""'}, {}), "(args.data_dir, 'sample_caption_vectors.hdf5')", False, 'from os.path import join, isfile\n')] |
Svesnav2/Discord-Bot-Minecraft-server-status | venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py | ee34948e741930567a3adb557197523f9d32ace1 | """Parsing responses from the difficulty command."""
from mcipc.rcon.functions import boolmap
__all__ = ['parse']
SET = 'The difficulty has been set to (\\w+)'
UNCHANGED = 'The difficulty did not change; it is already set to (\\w+)'
def parse(text: str) -> bool:
"""Parses a boolean value from the text
returned by the difficulty command.
"""
return boolmap(text, true=SET, false=UNCHANGED)
| [((18, 11, 18, 51), 'mcipc.rcon.functions.boolmap', 'boolmap', (), '', False, 'from mcipc.rcon.functions import boolmap\n')] |
Bhargavasomu/py-evm | eth/beacon/aggregation.py | ee8f72d5a70805575a967cde0a43942e1526264e | from typing import (
Iterable,
Tuple,
)
from cytoolz import (
pipe
)
from eth._utils import bls
from eth._utils.bitfield import (
set_voted,
)
from eth.beacon.enums import SignatureDomain
from eth.beacon.typing import (
BLSPubkey,
BLSSignature,
Bitfield,
CommitteeIndex,
)
def verify_votes(
message: bytes,
votes: Iterable[Tuple[CommitteeIndex, BLSSignature, BLSPubkey]],
domain: SignatureDomain
) -> Tuple[Tuple[BLSSignature, ...], Tuple[CommitteeIndex, ...]]:
"""
Verify the given votes.
vote: (committee_index, sig, public_key)
"""
sigs_with_committe_info = tuple(
(sig, committee_index)
for (committee_index, sig, public_key)
in votes
if bls.verify(message, public_key, sig, domain)
)
try:
sigs, committee_indices = zip(*sigs_with_committe_info)
except ValueError:
sigs = tuple()
committee_indices = tuple()
return sigs, committee_indices
def aggregate_votes(
bitfield: Bitfield,
sigs: Iterable[BLSSignature],
voting_sigs: Iterable[BLSSignature],
voting_committee_indices: Iterable[CommitteeIndex]
) -> Tuple[Bitfield, BLSSignature]:
"""
Aggregate the votes.
"""
# Update the bitfield and append the signatures
sigs = tuple(sigs) + tuple(voting_sigs)
bitfield = pipe(
bitfield,
*(
set_voted(index=committee_index)
for committee_index in voting_committee_indices
)
)
return bitfield, bls.aggregate_signatures(sigs)
| [((66, 21, 66, 51), 'eth._utils.bls.aggregate_signatures', 'bls.aggregate_signatures', ({(66, 46, 66, 50): 'sigs'}, {}), '(sigs)', False, 'from eth._utils import bls\n'), ((36, 11, 36, 55), 'eth._utils.bls.verify', 'bls.verify', ({(36, 22, 36, 29): 'message', (36, 31, 36, 41): 'public_key', (36, 43, 36, 46): 'sig', (36, 48, 36, 54): 'domain'}, {}), '(message, public_key, sig, domain)', False, 'from eth._utils import bls\n'), ((61, 12, 61, 44), 'eth._utils.bitfield.set_voted', 'set_voted', (), '', False, 'from eth._utils.bitfield import set_voted\n')] |
Cray-HPE/bos | src/server/bos/controllers/v1/components.py | a4a7fc58c884d951b6051093e1a4e2aeaba6740f | # Copyright 2021 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# (MIT License)
import connexion
from datetime import datetime
import logging
from bos import redis_db_utils as dbutils
LOGGER = logging.getLogger('bos.controllers.v1.components')
DB = dbutils.get_wrapper(db='components')
@dbutils.redis_error_handler
def get_components(ids="", enabled=None):
"""Used by the GET /components API operation
Allows filtering using a comma seperated list of ids.
"""
LOGGER.debug("GET /components invoked get_components")
id_list = []
if ids:
try:
id_list = ids.split(',')
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the ids provided.",
detail=str(err))
response = get_components_data(id_list=id_list, enabled=enabled)
return response, 200
def get_components_data(id_list=None, enabled=None):
"""Used by the GET /components API operation
Allows filtering using a comma separated list of ids.
"""
response = []
if id_list:
for component_id in id_list:
data = DB.get(component_id)
if data:
response.append(data)
else:
# TODO: On large scale systems, this response may be too large
# and require paging to be implemented
response = DB.get_all()
if enabled is not None:
response = [r for r in response if _matches_filter(r, enabled)]
return response
def _matches_filter(data, enabled):
if enabled is not None and data.get('enabled', None) != enabled:
return False
return True
@dbutils.redis_error_handler
def put_components():
"""Used by the PUT /components API operation"""
LOGGER.debug("PUT /components invoked put_components")
try:
data = connexion.request.get_json()
components = []
for component_data in data:
component_id = component_data['id']
components.append((component_id, component_data))
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
response = []
for component_id, component_data in components:
component_data = _set_auto_fields(component_data)
response.append(DB.put(component_id, component_data))
return response, 200
@dbutils.redis_error_handler
def patch_components():
"""Used by the PATCH /components API operation"""
LOGGER.debug("PATCH /components invoked patch_components")
try:
data = connexion.request.get_json()
components = []
for component_data in data:
component_id = component_data['id']
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
components.append((component_id, component_data))
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
response = []
for component_id, component_data in components:
component_data = _set_auto_fields(component_data)
response.append(DB.patch(component_id, component_data, _update_handler))
return response, 200
@dbutils.redis_error_handler
def get_component(component_id, config_details=False, v2=False):
"""Used by the GET /components/{component_id} API operation"""
LOGGER.debug("GET /components/id invoked get_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
component = DB.get(component_id)
return component, 200
@dbutils.redis_error_handler
def put_component(component_id):
"""Used by the PUT /components/{component_id} API operation"""
LOGGER.debug("PUT /components/id invoked put_component")
try:
data = connexion.request.get_json()
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
data['id'] = component_id
data = _set_auto_fields(data)
return DB.put(component_id, data), 200
@dbutils.redis_error_handler
def patch_component(component_id):
"""Used by the PATCH /components/{component_id} API operation"""
LOGGER.debug("PATCH /components/id invoked patch_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
try:
data = connexion.request.get_json()
except Exception as err:
return connexion.problem(
status=400, title="Error parsing the data provided.",
detail=str(err))
data = _set_auto_fields(data)
return DB.patch(component_id, data, _update_handler), 200
@dbutils.redis_error_handler
def delete_component(component_id):
"""Used by the DELETE /components/{component_id} API operation"""
LOGGER.debug("DELETE /components/id invoked delete_component")
if component_id not in DB:
return connexion.problem(
status=404, title="Component could not found.",
detail="Component {} could not be found".format(component_id))
return DB.delete(component_id), 204
def _set_auto_fields(data):
data = _set_last_updated(data)
return data
def _set_last_updated(data):
timestamp = datetime.utcnow().isoformat()
for section in ['actualState', 'desiredState', 'lastAction']:
if section in data and type(data[section]) == dict:
data[section]['lastUpdated'] = timestamp
return data
def _update_handler(data):
# Allows processing of data during common patch operation
return data
| [((29, 9, 29, 59), 'logging.getLogger', 'logging.getLogger', ({(29, 27, 29, 58): '"""bos.controllers.v1.components"""'}, {}), "('bos.controllers.v1.components')", False, 'import logging\n'), ((30, 5, 30, 41), 'bos.redis_db_utils.get_wrapper', 'dbutils.get_wrapper', (), '', True, 'from bos import redis_db_utils as dbutils\n'), ((83, 15, 83, 43), 'connexion.request.get_json', 'connexion.request.get_json', ({}, {}), '()', False, 'import connexion\n'), ((104, 15, 104, 43), 'connexion.request.get_json', 'connexion.request.get_json', ({}, {}), '()', False, 'import connexion\n'), ((141, 15, 141, 43), 'connexion.request.get_json', 'connexion.request.get_json', ({}, {}), '()', False, 'import connexion\n'), ((160, 15, 160, 43), 'connexion.request.get_json', 'connexion.request.get_json', ({}, {}), '()', False, 'import connexion\n'), ((186, 16, 186, 33), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n')] |
angelusualle/algorithms | cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py | 86286a49db2a755bc57330cb455bcbd8241ea6be | import unittest
from find_x_in_listy import find_x_in_listy, Listy
class Test_Case_Find_X_In_Listy(unittest.TestCase):
def test_case_find_x_in_listy(self):
listy = Listy(list(range(0, 1*10**8)))
self.assertEqual(find_x_in_listy(listy, 5678), 5678) | [((7, 25, 7, 53), 'find_x_in_listy.find_x_in_listy', 'find_x_in_listy', ({(7, 41, 7, 46): 'listy', (7, 48, 7, 52): '(5678)'}, {}), '(listy, 5678)', False, 'from find_x_in_listy import find_x_in_listy, Listy\n')] |
arminbahl/drosophila_phototaxis_paper | my_general_helpers.py | e01dc95675f835926c9104b34bf6cfd7244dee2b | from scipy.signal import butter,filtfilt
from numba import jit
import bisect
def is_number_in_sorted_vector(sorted_vector, num):
index = bisect.bisect_left(sorted_vector, num)
return index != len(sorted_vector) and sorted_vector[index] == num
# def butter_lowpass(cutoff, fs, order=5):
# nyq = 0.5 * fs
# normal_cutoff = cutoff / nyq
# b, a = butter(order, normal_cutoff, btype='low', analog=False)
# return b, a
def butter_lowpass_filter(data, cutoff, fs, order):
nyq = 0.5 * fs # Nyquist Frequency
normal_cutoff = cutoff / nyq
# Get the filter coefficients
b, a = butter(order, normal_cutoff, btype='low', analog=False)
y = filtfilt(b, a, data)
return y
@jit
def first_order_lowpass_filter(signal_in, signal_out, tau, dt):
alpha_lowpass = dt / (tau + dt)
signal_out[0] = signal_in[0]
for i in range(1, len(signal_in)):
signal_out[i] = alpha_lowpass*signal_in[i] + (1-alpha_lowpass)*signal_out[i-1]
| [((7, 12, 7, 50), 'bisect.bisect_left', 'bisect.bisect_left', ({(7, 31, 7, 44): 'sorted_vector', (7, 46, 7, 49): 'num'}, {}), '(sorted_vector, num)', False, 'import bisect\n'), ((22, 11, 22, 66), 'scipy.signal.butter', 'butter', (), '', False, 'from scipy.signal import butter, filtfilt\n'), ((23, 8, 23, 28), 'scipy.signal.filtfilt', 'filtfilt', ({(23, 17, 23, 18): 'b', (23, 20, 23, 21): 'a', (23, 23, 23, 27): 'data'}, {}), '(b, a, data)', False, 'from scipy.signal import butter, filtfilt\n')] |
KarlParkinson/mitmproxy | test/mitmproxy/addons/test_proxyserver.py | fd5caf40c75ca73c4b767170497abf6a5bf016a0 | import asyncio
from contextlib import asynccontextmanager
import pytest
from mitmproxy import exceptions
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.connection import Address
from mitmproxy.proxy import layers, server_hooks
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.test import taddons, tflow
from mitmproxy.test.tflow import tclient_conn, tserver_conn
class HelperAddon:
def __init__(self):
self.flows = []
self.layers = [
lambda ctx: layers.modes.HttpProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
lambda ctx: layers.TCPLayer(ctx),
]
def request(self, f):
self.flows.append(f)
def tcp_start(self, f):
self.flows.append(f)
def next_layer(self, nl):
nl.layer = self.layers.pop(0)(nl.context)
@asynccontextmanager
async def tcp_server(handle_conn) -> Address:
server = await asyncio.start_server(handle_conn, '127.0.0.1', 0)
await server.start_serving()
try:
yield server.sockets[0].getsockname()
finally:
server.close()
@pytest.mark.asyncio
async def test_start_stop():
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
assert await reader.readuntil(b"\r\n\r\n") == b"GET /hello HTTP/1.1\r\n\r\n"
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
writer.close()
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert not ps.server
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
assert ps.server
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 204 No Content\r\n\r\n"
assert repr(ps) == "ProxyServer(running, 1 active conns)"
tctx.configure(ps, server=False)
await tctx.master.await_log("Stopping server", level="info")
assert not ps.server
assert state.flows
assert state.flows[0].request.path == "/hello"
assert state.flows[0].response.status_code == 204
# Waiting here until everything is really torn down... takes some effort.
conn_handler = list(ps._connections.values())[0]
client_handler = conn_handler.transports[conn_handler.client].handler
writer.close()
await writer.wait_closed()
try:
await client_handler
except asyncio.CancelledError:
pass
for _ in range(5):
# Get all other scheduled coroutines to run.
await asyncio.sleep(0)
assert repr(ps) == "ProxyServer(stopped, 0 active conns)"
@pytest.mark.asyncio
async def test_inject() -> None:
async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
while s := await reader.read(1):
writer.write(s.upper())
ps = Proxyserver()
with taddons.context(ps) as tctx:
state = HelperAddon()
tctx.master.addons.add(state)
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening", level="info")
proxy_addr = ps.server.sockets[0].getsockname()[:2]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 200 Connection established\r\n\r\n"
writer.write(b"a")
assert await reader.read(1) == b"A"
ps.inject_tcp(state.flows[0], False, b"b")
assert await reader.read(1) == b"B"
ps.inject_tcp(state.flows[0], True, b"c")
assert await reader.read(1) == b"c"
@pytest.mark.asyncio
async def test_inject_fail() -> None:
ps = Proxyserver()
with taddons.context(ps) as tctx:
ps.inject_websocket(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject WebSocket messages into non-WebSocket flows.", level="warn")
ps.inject_tcp(
tflow.tflow(),
True,
b"test"
)
await tctx.master.await_log("Cannot inject TCP messages into non-TCP flows.", level="warn")
ps.inject_websocket(
tflow.twebsocketflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
ps.inject_websocket(
tflow.ttcpflow(),
True,
b"test"
)
await tctx.master.await_log("Flow is not from a live connection.", level="warn")
@pytest.mark.asyncio
async def test_warn_no_nextlayer():
"""
Test that we log an error if the proxy server is started without NextLayer addon.
That is a mean trap to fall into when writing end-to-end tests.
"""
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
ps.running()
await tctx.master.await_log("Proxy server listening at", level="info")
assert tctx.master.has_log("Warning: Running proxyserver without nextlayer addon!", level="warn")
await ps.shutdown_server()
def test_self_connect():
server = tserver_conn()
client = tclient_conn()
server.address = ("localhost", 8080)
ps = Proxyserver()
with taddons.context(ps) as tctx:
# not calling .running() here to avoid unnecessary socket
ps.options = tctx.options
ps.server_connect(
server_hooks.ServerConnectionHookData(server, client)
)
assert server.error == "Stopped mitmproxy from recursively connecting to itself."
def test_options():
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, body_size_limit="invalid")
tctx.configure(ps, body_size_limit="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, stream_large_bodies="invalid")
tctx.configure(ps, stream_large_bodies="1m")
| [((52, 9, 52, 22), 'mitmproxy.addons.proxyserver.Proxyserver', 'Proxyserver', ({}, {}), '()', False, 'from mitmproxy.addons.proxyserver import Proxyserver\n'), ((98, 9, 98, 22), 'mitmproxy.addons.proxyserver.Proxyserver', 'Proxyserver', ({}, {}), '()', False, 'from mitmproxy.addons.proxyserver import Proxyserver\n'), ((123, 9, 123, 22), 'mitmproxy.addons.proxyserver.Proxyserver', 'Proxyserver', ({}, {}), '()', False, 'from mitmproxy.addons.proxyserver import Proxyserver\n'), ((158, 9, 158, 22), 'mitmproxy.addons.proxyserver.Proxyserver', 'Proxyserver', ({}, {}), '()', False, 'from mitmproxy.addons.proxyserver import Proxyserver\n'), ((168, 13, 168, 27), 'mitmproxy.test.tflow.tserver_conn', 'tserver_conn', ({}, {}), '()', False, 'from mitmproxy.test.tflow import tclient_conn, tserver_conn\n'), ((169, 13, 169, 27), 'mitmproxy.test.tflow.tclient_conn', 'tclient_conn', ({}, {}), '()', False, 'from mitmproxy.test.tflow import tclient_conn, tserver_conn\n'), ((171, 9, 171, 22), 'mitmproxy.addons.proxyserver.Proxyserver', 'Proxyserver', ({}, {}), '()', False, 'from mitmproxy.addons.proxyserver import Proxyserver\n'), ((182, 9, 182, 22), 'mitmproxy.addons.proxyserver.Proxyserver', 'Proxyserver', ({}, {}), '()', False, 'from mitmproxy.addons.proxyserver import Proxyserver\n'), ((36, 19, 36, 68), 'asyncio.start_server', 'asyncio.start_server', ({(36, 40, 36, 51): 'handle_conn', (36, 53, 36, 64): '"""127.0.0.1"""', (36, 66, 36, 67): '(0)'}, {}), "(handle_conn, '127.0.0.1', 0)", False, 'import asyncio\n'), ((53, 9, 53, 28), 'mitmproxy.test.taddons.context', 'taddons.context', ({(53, 25, 53, 27): 'ps'}, {}), '(ps)', False, 'from mitmproxy.test import taddons, tflow\n'), ((99, 9, 99, 28), 'mitmproxy.test.taddons.context', 'taddons.context', ({(99, 25, 99, 27): 'ps'}, {}), '(ps)', False, 'from mitmproxy.test import taddons, tflow\n'), ((124, 9, 124, 28), 'mitmproxy.test.taddons.context', 'taddons.context', ({(124, 25, 124, 27): 'ps'}, {}), '(ps)', False, 'from mitmproxy.test import taddons, tflow\n'), ((159, 9, 159, 28), 'mitmproxy.test.taddons.context', 'taddons.context', ({(159, 25, 159, 27): 'ps'}, {}), '(ps)', False, 'from mitmproxy.test import taddons, tflow\n'), ((172, 9, 172, 28), 'mitmproxy.test.taddons.context', 'taddons.context', ({(172, 25, 172, 27): 'ps'}, {}), '(ps)', False, 'from mitmproxy.test import taddons, tflow\n'), ((183, 9, 183, 28), 'mitmproxy.test.taddons.context', 'taddons.context', ({(183, 25, 183, 27): 'ps'}, {}), '(ps)', False, 'from mitmproxy.test import taddons, tflow\n'), ((126, 12, 126, 25), 'mitmproxy.test.tflow.tflow', 'tflow.tflow', ({}, {}), '()', False, 'from mitmproxy.test import taddons, tflow\n'), ((132, 12, 132, 25), 'mitmproxy.test.tflow.tflow', 'tflow.tflow', ({}, {}), '()', False, 'from mitmproxy.test import taddons, tflow\n'), ((139, 12, 139, 34), 'mitmproxy.test.tflow.twebsocketflow', 'tflow.twebsocketflow', ({}, {}), '()', False, 'from mitmproxy.test import taddons, tflow\n'), ((145, 12, 145, 28), 'mitmproxy.test.tflow.ttcpflow', 'tflow.ttcpflow', ({}, {}), '()', False, 'from mitmproxy.test import taddons, tflow\n'), ((176, 12, 176, 65), 'mitmproxy.proxy.server_hooks.ServerConnectionHookData', 'server_hooks.ServerConnectionHookData', ({(176, 50, 176, 56): 'server', (176, 58, 176, 64): 'client'}, {}), '(server, client)', False, 'from mitmproxy.proxy import layers, server_hooks\n'), ((184, 13, 184, 51), 'pytest.raises', 'pytest.raises', ({(184, 27, 184, 50): 'exceptions.OptionsError'}, {}), '(exceptions.OptionsError)', False, 'import pytest\n'), ((188, 13, 188, 51), 'pytest.raises', 'pytest.raises', ({(188, 27, 188, 50): 'exceptions.OptionsError'}, {}), '(exceptions.OptionsError)', False, 'import pytest\n'), ((19, 24, 19, 51), 'mitmproxy.proxy.layers.modes.HttpProxy', 'layers.modes.HttpProxy', ({(19, 47, 19, 50): 'ctx'}, {}), '(ctx)', False, 'from mitmproxy.proxy import layers, server_hooks\n'), ((20, 24, 20, 63), 'mitmproxy.proxy.layers.HttpLayer', 'layers.HttpLayer', ({(20, 41, 20, 44): 'ctx', (20, 46, 20, 62): 'HTTPMode.regular'}, {}), '(ctx, HTTPMode.regular)', False, 'from mitmproxy.proxy import layers, server_hooks\n'), ((21, 24, 21, 44), 'mitmproxy.proxy.layers.TCPLayer', 'layers.TCPLayer', ({(21, 40, 21, 43): 'ctx'}, {}), '(ctx)', False, 'from mitmproxy.proxy import layers, server_hooks\n'), ((64, 35, 64, 71), 'asyncio.open_connection', 'asyncio.open_connection', ({(64, 59, 64, 70): '*proxy_addr'}, {}), '(*proxy_addr)', False, 'import asyncio\n'), ((107, 35, 107, 71), 'asyncio.open_connection', 'asyncio.open_connection', ({(107, 59, 107, 70): '*proxy_addr'}, {}), '(*proxy_addr)', False, 'import asyncio\n'), ((88, 22, 88, 38), 'asyncio.sleep', 'asyncio.sleep', ({(88, 36, 88, 37): '(0)'}, {}), '(0)', False, 'import asyncio\n')] |
mederrata/probability | tensorflow_probability/python/distributions/masked.py | bc6c411b0fbd83141f303f91a27343fe3c43a797 | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The MaskedIndependent distribution class."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.distributions import batch_broadcast
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import log_prob_ratio
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
def _add_event_dims_to_mask(validity_mask, *, dist=None, event_ndims=None):
validity_mask = tf.convert_to_tensor(validity_mask)
if event_ndims is None:
event_ndims = ps.rank_from_shape(dist.event_shape_tensor())
return tf.reshape(
validity_mask,
ps.concat([
ps.shape(validity_mask),
ps.ones(event_ndims, dtype=tf.int32)
], axis=0))
def _make_masked_fn(fn_name, n_event_shapes, safe_value,
make_arg0_safe=False):
"""Implements functions like mean, variance, etc.
Args:
fn_name: Name of the method called on the underlying distribution.
n_event_shapes: Number of event shape repeats in the shape of the underlying
function's output.
safe_value: The value to be placed in invalid locations. May be
`'safe_sample'` to specify we should use the "safe sample" value.
make_arg0_safe: If `True`, we will apply `self.safe_sample_fn` to ensure the
argument passed into the underlying routine is a "safe" sample.
Returns:
fn: Callable implementing the given function.
"""
def fn(self, *args, **kwargs):
if safe_value == 'safe_sample' or make_arg0_safe: # Only if needed.
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
validity_mask = tf.convert_to_tensor(self.validity_mask)
if make_arg0_safe:
x = args[0]
safe_x = tf.where(
_add_event_dims_to_mask(validity_mask, dist=self), x, safe_val)
args = (safe_x,) + args[1:]
val = getattr(self.distribution, fn_name)(*args, **kwargs)
if n_event_shapes:
validity_mask = tf.reshape(
validity_mask,
ps.concat(
[ps.shape(validity_mask)] +
[ps.ones_like(self.event_shape_tensor())] * n_event_shapes,
axis=0))
if safe_value == 'safe_sample':
sentinel = tf.cast(safe_val, val.dtype)
else:
sentinel = tf.cast(safe_value, val.dtype)
return tf.where(validity_mask, val, sentinel)
fn.__name__ = f'_{fn_name}'
return fn
def _fixed_sample(d):
return d.sample(seed=samplers.zeros_seed())
class _Masked(distribution_lib.Distribution):
"""A distribution that masks invalid underlying distributions.
Sometimes we may want a way of masking out a subset of distributions. Perhaps
we have labels for only a subset of batch members and want to evaluate a
log_prob. Or we may want to encode a sparse random variable as a dense
random variable with a mask applied. In single-program/multiple-data regimes,
it can be necessary to pad Distributions and the samples thereof to a given
size in order to achieve the "single-program" desideratum.
When computing a probability density in this regime, we would like to mask out
the contributions of invalid batch members. We may also want to ensure that
the values being sampled are valid parameters for descendant distributions in
a hierarchical model, even if they are ultimately masked out. This
distribution answers those requirements. Specifically, for invalid batch
elements:
- `log_prob(x) == 0.` for all `x`, with no gradients back to `x`, nor any
gradients to the parameters of `distribution`.
- `sample() == tf.stop_gradient(safe_value_fn(distribution))`, with no
gradients back to the parameters of `distribution`.
The distribution accepts a mask specified by `validity_mask`, a boolean tensor
broadcastable with the underlying distribution's batch shape which specifies
for each batch element whether or not it is valid.
Entries in `validity_mask` which are `False` denote missing distributions,
which means that the corresponding entries in the measures (e.g. `prob`)
and statistics (e.g. `mean`) must not be treated as coming from some real
distribution. Whenever doing a reduction across those quantites, make sure to
either mask out the invalid entries or make sure the returned value
corresponds to the identity element of the reduction. For a couple examples:
- OK: `reduce_sum(masked_dist.log_prob(x))`
- OK: `tfd.Independent(masked_dist, ...)`
- Not OK: `reduce_var(masked_dist.mean())` will underestimate the variance
because it uses too large an `N`.
- Not OK: `tf.linalg.cholesky(masked_dist.covariance())` will fail for invalid
batch elements.
The default `safe_value_fn` is to draw a fixed-seeded sample from the
underlying `distribution`. Since this may be expensive, it is suggested to
specify a computationally cheaper method. Some options might include:
- `tfd.Distribution.mode`
- `tfd.Distribution.mean`
- `lambda d: d.quantile(.5)` (median)
- `lambda _: 0.` (if zero is always in the support of d)
- `lambda d: d.experimental_default_event_space_bijector()(0.)`
Besides the output of `sample`, results from `safe_value_fn` may also appear
in (invalid batch members of) `masked.default_event_space_bijector().forward`.
#### Examples
```
# Use tf.sequence_mask for `range(n) < num_valid`.
num_valid = 3
num_entries = 4
d = tfd.Masked(
tfd.MultivariateNormalDiag(tf.zeros([2, num_entries, 5]), tf.ones([5])),
tf.sequence_mask(num_valid, num_entries))
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[nonzero, nonzero, nonzero, 0.],
# [nonzero, nonzero, nonzero, 0.]]
# Explicitly denote which elements are valid, adding a new batch dim of 2.
d = tfd.Masked(tfd.MultivariateNormalDiag(tf.zeros([4, 5]), tf.ones([5])),
[[False], [True]])
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[0., 0., 0., 0.],
# [nonzero, nonzero, nonzero, nonzero]]
# Use `BatchBroadcast` and `Independent` to achieve the equivalent of adding
# positional mask functionality to `tfd.Sample`.
# Suppose we wanted to achieve this:
# `tfd.Sample(tfd.Normal(tf.zeros(2), 1), [3, 4], validity_mask=mask)`
# We can write:
d = tfd.Independent(
tfd.Masked(tfd.BatchBroadcast(tfd.Normal(0, 1), [2, 3, 4]), mask),
reinterpreted_batch_ndims=2)
d.batch_shape # [2]
d.event_shape # [3, 4]
d.log_prob(tf.ones([3, 4])) # shape [2]
```
"""
def __init__(self,
distribution,
validity_mask,
safe_sample_fn=_fixed_sample,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Constructs a Masked distribution.
Args:
distribution: The underlying distribution, which will be masked.
validity_mask: Boolean mask where `True` indicates an element is valid.
`validity_mask` must broadcast with the batch shape of the underlying
distribution. Invalid batch elements are masked so that sampling returns
`safe_sample_fn(dist)` in invalid positions and `log_prob(x)` returns
`0.` for invalid positions.
safe_sample_fn: A callable which takes a distribution (namely,
the `distribution` argument) and returns a determinstic, safe sample
value. This helps to avoid `nan` gradients and allows downstream usage
of samples from a `Masked` distribution to assume a "safe" even if
invalid value. (Be careful to ensure that such downstream usages are
themselves masked!) Note that the result of this function will be
wrapped in a `tf.stop_gradient` call.
validate_args: Boolean indicating whether argument assertions should be
run. May impose performance penalties.
allow_nan_stats: Boolean indicating whether statistical functions may
return `nan`, or should instead use asserts where possible.
name: Optional name for operation scoping.
"""
parameters = dict(locals())
with tf.name_scope(name or f'Masked{distribution.name}') as name:
self._distribution = distribution
self._validity_mask = tensor_util.convert_nonref_to_tensor(
validity_mask, dtype_hint=tf.bool)
self._safe_sample_fn = safe_sample_fn
super(_Masked, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
distribution=parameter_properties.BatchedComponentProperties(),
validity_mask=parameter_properties.ParameterProperties(
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED))
@property
def distribution(self):
return self._distribution
@property
def validity_mask(self):
return self._validity_mask
@property
def safe_sample_fn(self):
return self._safe_sample_fn
@property
def experimental_is_sharded(self):
return self.distribution.experimental_is_sharded
def _event_shape(self):
return self.distribution.event_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _sample_n(self, n, seed=None, **kwargs):
validity_mask = tf.convert_to_tensor(self.validity_mask)
# To avoid the shape gymnastics of drawing extra samples, we delegate
# sampling to the BatchBroadcast distribution.
bb = batch_broadcast.BatchBroadcast(self.distribution,
ps.shape(validity_mask))
samples = bb.sample(n, seed=seed, **kwargs)
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
return tf.where(_add_event_dims_to_mask(validity_mask, dist=self),
samples, safe_val)
_log_prob = _make_masked_fn(
'log_prob', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_prob = _make_masked_fn(
'prob', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_cdf = _make_masked_fn(
'log_cdf', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_cdf = _make_masked_fn(
'cdf', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_survival_function = _make_masked_fn(
'log_survival_function', n_event_shapes=0, safe_value=-float('inf'),
make_arg0_safe=True)
_survival_function = _make_masked_fn(
'survival_function', n_event_shapes=0, safe_value=0.,
make_arg0_safe=True)
_entropy = _make_masked_fn(
'entropy', n_event_shapes=0, safe_value=0.)
_mode = _make_masked_fn(
'mode', n_event_shapes=1, safe_value='safe_sample')
_mean = _make_masked_fn(
'mean', n_event_shapes=1, safe_value='safe_sample')
_variance = _make_masked_fn(
'variance', n_event_shapes=1, safe_value=0.)
_stddev = _make_masked_fn(
'stddev', n_event_shapes=1, safe_value=0.)
_covariance = _make_masked_fn(
'covariance', n_event_shapes=2, safe_value=0.)
_quantile = _make_masked_fn(
'quantile', n_event_shapes=1, safe_value='safe_sample')
def _default_event_space_bijector(self, *args, **kwargs):
underlying_bijector = (
self.distribution.experimental_default_event_space_bijector())
if underlying_bijector is None:
return None
return _MaskedBijector(self, underlying_bijector)
class Masked(_Masked, distribution_lib.AutoCompositeTensorDistribution):
def __new__(cls, *args, **kwargs):
"""Maybe return a non-`CompositeTensor` `_Masked`."""
if cls is Masked:
if args:
distribution = args[0]
else:
distribution = kwargs.get('distribution')
if not isinstance(distribution, tf.__internal__.CompositeTensor):
return _Masked(*args, **kwargs)
return super(Masked, cls).__new__(cls)
Masked.__doc__ = _Masked.__doc__ + '\n' + (
'If `distribution` is a `CompositeTensor`, then the resulting `Masked` '
'instance is a `CompositeTensor` as well. Otherwise, a '
'non-`CompositeTensor` `_Masked` instance is created instead. Distribution '
'subclasses that inherit from `Masked` will also inherit from '
'`CompositeTensor`.')
@kullback_leibler.RegisterKL(_Masked, _Masked)
def _kl_masked_masked(a, b, name=None):
"""KL divergence between Masked distributions."""
with tf.name_scope(name or 'kl_masked_masked'):
a_valid = tf.convert_to_tensor(a.validity_mask)
b_valid = tf.convert_to_tensor(b.validity_mask)
underlying_kl = kullback_leibler.kl_divergence(
a.distribution, b.distribution)
# The treatment for KL is as follows:
# When both random variables are valid, the underlying KL applies.
# When neither random variable is valid, the KL is 0., i.e.
# `a log a - a log b = 0` because log a and log b are everywhere 0.
# When exactly one is valid, we (a) raise an assertion error, if either
# distribution's allow_nan_stats is set to False, or (b) return nan in
# such positions.
asserts = []
if not (a.allow_nan_stats and b.allow_nan_stats):
asserts.append(assert_util.assert_equal(
a_valid, b_valid,
message='KL is only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (a_valid & b_valid)
neither_valid = (~a_valid) & (~b_valid)
dtype = underlying_kl.dtype
return tf.where(both_valid, underlying_kl,
tf.where(neither_valid,
tf.zeros([], dtype), float('nan')))
@log_prob_ratio.RegisterLogProbRatio(_Masked)
def _masked_log_prob_ratio(p, x, q, y, name=None):
"""Computes log p(x) - log q(y) for Masked p, q."""
with tf.name_scope(name or 'masked_log_prob_ratio'):
p_valid = tf.convert_to_tensor(p.validity_mask)
safe_x = tf.where(_add_event_dims_to_mask(p_valid, dist=p),
x, tf.stop_gradient(p.safe_sample_fn(p.distribution)))
q_valid = tf.convert_to_tensor(q.validity_mask)
safe_y = tf.where(_add_event_dims_to_mask(q_valid, dist=q),
y, tf.stop_gradient(q.safe_sample_fn(q.distribution)))
underlying = log_prob_ratio.log_prob_ratio(
p.distribution, safe_x, q.distribution, safe_y)
asserts = []
# As with KL, we return the underlying log_prob_ratio where both are valid,
# `0.` where neither is valid, and `nan` otherwise (or an assertion if
# either distribution does not `allow_nan_stats`).
if not (p.allow_nan_stats and p.allow_nan_stats):
asserts.append(assert_util.assert_equal(
p_valid, q_valid,
message='Masked log_prob_ratio only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (p_valid & q_valid)
neither_valid = (~p_valid) & (~q_valid)
return tf.where(both_valid, underlying,
tf.where(neither_valid,
tf.zeros([], dtype=underlying.dtype),
float('nan')))
class _NonCompositeTensorMaskedBijector(bijector_lib.Bijector):
"""Event space bijector for Masked distributions."""
def __init__(self, masked, underlying_bijector):
self._masked = masked
self._bijector = underlying_bijector
super(_NonCompositeTensorMaskedBijector, self).__init__(
validate_args=underlying_bijector.validate_args,
dtype=underlying_bijector.dtype,
forward_min_event_ndims=underlying_bijector.forward_min_event_ndims,
inverse_min_event_ndims=underlying_bijector.inverse_min_event_ndims)
def _forward_event_shape(self, x):
return self._bijector.forward_event_shape(x)
def _forward_event_shape_tensor(self, x):
return self._bijector.forward_event_shape_tensor(x)
def _inverse_event_shape(self, y):
return self._bijector.inverse_event_shape(y)
def _inverse_event_shape_tensor(self, y):
return self._bijector.inverse_event_shape_tensor(y)
def _make_safe_x(self, x, validity_mask):
bij = self._bijector
masked = self._masked
pullback_event_ndims = ps.rank_from_shape(
lambda: bij.inverse_event_shape_tensor(masked.event_shape_tensor()),
self._bijector.inverse_event_shape(masked.event_shape))
pullback_event_mask = _add_event_dims_to_mask(
validity_mask, event_ndims=pullback_event_ndims)
# We presume that 0 in unconstrained space is safe.
return tf.where(pullback_event_mask, x, 0.)
def _forward(self, x):
mask = self._masked.validity_mask
safe_x = self._make_safe_x(x, mask)
return self._make_safe_y(self._bijector.forward(safe_x), mask)
def _forward_log_det_jacobian(self, x):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_x = self._make_safe_x(x, validity_mask)
return tf.where(validity_mask,
self._bijector.forward_log_det_jacobian(safe_x),
0.)
def _make_safe_y(self, y, validity_mask):
safe_val = tf.stop_gradient(
self._masked.safe_sample_fn(self._masked.distribution))
event_mask = _add_event_dims_to_mask(validity_mask, dist=self._masked)
return tf.where(event_mask, y, safe_val)
def _inverse(self, y):
safe_y = self._make_safe_y(y, self._masked.validity_mask)
return self._bijector.inverse(safe_y)
def _inverse_log_det_jacobian(self, y):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_y = self._make_safe_y(y, validity_mask)
return tf.where(validity_mask,
self._bijector.inverse_log_det_jacobian(safe_y),
0.)
class _MaskedBijector(_NonCompositeTensorMaskedBijector,
bijector_lib.AutoCompositeTensorBijector):
"""Event space bijector for Masked distributions."""
def __new__(cls, *args, **kwargs):
"""Maybe return a `_NonCompositeTensorMaskedBijector`."""
if cls is _MaskedBijector:
if args:
masked = args[0]
else:
masked = kwargs.get('masked')
if len(args) > 1:
bijector = args[1]
else:
bijector = kwargs.get('underlying_bijector')
if not (isinstance(masked, tf.__internal__.CompositeTensor)
and isinstance(bijector, tf.__internal__.CompositeTensor)):
return _NonCompositeTensorMaskedBijector(*args, **kwargs)
return super(_MaskedBijector, cls).__new__(cls)
| [((325, 1, 325, 46), 'tensorflow_probability.python.distributions.kullback_leibler.RegisterKL', 'kullback_leibler.RegisterKL', ({(325, 29, 325, 36): '_Masked', (325, 38, 325, 45): '_Masked'}, {}), '(_Masked, _Masked)', False, 'from tensorflow_probability.python.distributions import kullback_leibler\n'), ((355, 1, 355, 45), 'tensorflow_probability.python.distributions.log_prob_ratio.RegisterLogProbRatio', 'log_prob_ratio.RegisterLogProbRatio', ({(355, 37, 355, 44): '_Masked'}, {}), '(_Masked)', False, 'from tensorflow_probability.python.distributions import log_prob_ratio\n'), ((32, 18, 32, 53), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', ({(32, 39, 32, 52): 'validity_mask'}, {}), '(validity_mask)', True, 'import tensorflow.compat.v2 as tf\n'), ((63, 20, 63, 60), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', ({(63, 41, 63, 59): 'self.validity_mask'}, {}), '(self.validity_mask)', True, 'import tensorflow.compat.v2 as tf\n'), ((82, 11, 82, 49), 'tensorflow.compat.v2.where', 'tf.where', ({(82, 20, 82, 33): 'validity_mask', (82, 35, 82, 38): 'val', (82, 40, 82, 48): 'sentinel'}, {}), '(validity_mask, val, sentinel)', True, 'import tensorflow.compat.v2 as tf\n'), ((253, 20, 253, 60), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', ({(253, 41, 253, 59): 'self.validity_mask'}, {}), '(self.validity_mask)', True, 'import tensorflow.compat.v2 as tf\n'), ((328, 7, 328, 48), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', ({(328, 21, 328, 47): "(name or 'kl_masked_masked')"}, {}), "(name or 'kl_masked_masked')", True, 'import tensorflow.compat.v2 as tf\n'), ((329, 14, 329, 51), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', ({(329, 35, 329, 50): 'a.validity_mask'}, {}), '(a.validity_mask)', True, 'import tensorflow.compat.v2 as tf\n'), ((330, 14, 330, 51), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', ({(330, 35, 330, 50): 'b.validity_mask'}, {}), '(b.validity_mask)', True, 'import tensorflow.compat.v2 as tf\n'), ((331, 20, 332, 39), 'tensorflow_probability.python.distributions.kullback_leibler.kl_divergence', 'kullback_leibler.kl_divergence', ({(332, 8, 332, 22): 'a.distribution', (332, 24, 332, 38): 'b.distribution'}, {}), '(a.distribution, b.distribution)', False, 'from tensorflow_probability.python.distributions import kullback_leibler\n'), ((358, 7, 358, 53), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', ({(358, 21, 358, 52): "(name or 'masked_log_prob_ratio')"}, {}), "(name or 'masked_log_prob_ratio')", True, 'import tensorflow.compat.v2 as tf\n'), ((359, 14, 359, 51), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', ({(359, 35, 359, 50): 'p.validity_mask'}, {}), '(p.validity_mask)', True, 'import tensorflow.compat.v2 as tf\n'), ((362, 14, 362, 51), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', ({(362, 35, 362, 50): 'q.validity_mask'}, {}), '(q.validity_mask)', True, 'import tensorflow.compat.v2 as tf\n'), ((365, 17, 366, 55), 'tensorflow_probability.python.distributions.log_prob_ratio.log_prob_ratio', 'log_prob_ratio.log_prob_ratio', ({(366, 8, 366, 22): 'p.distribution', (366, 24, 366, 30): 'safe_x', (366, 32, 366, 46): 'q.distribution', (366, 48, 366, 54): 'safe_y'}, {}), '(p.distribution, safe_x, q.distribution, safe_y)', False, 'from tensorflow_probability.python.distributions import log_prob_ratio\n'), ((417, 11, 417, 47), 'tensorflow.compat.v2.where', 'tf.where', ({(417, 20, 417, 39): 'pullback_event_mask', (417, 41, 417, 42): 'x', (417, 44, 417, 46): '(0.0)'}, {}), '(pullback_event_mask, x, 0.0)', True, 'import tensorflow.compat.v2 as tf\n'), ((425, 20, 425, 68), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', ({(425, 41, 425, 67): 'self._masked.validity_mask'}, {}), '(self._masked.validity_mask)', True, 'import tensorflow.compat.v2 as tf\n'), ((435, 11, 435, 44), 'tensorflow.compat.v2.where', 'tf.where', ({(435, 20, 435, 30): 'event_mask', (435, 32, 435, 33): 'y', (435, 35, 435, 43): 'safe_val'}, {}), '(event_mask, y, safe_val)', True, 'import tensorflow.compat.v2 as tf\n'), ((442, 20, 442, 68), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', ({(442, 41, 442, 67): 'self._masked.validity_mask'}, {}), '(self._masked.validity_mask)', True, 'import tensorflow.compat.v2 as tf\n'), ((79, 17, 79, 45), 'tensorflow.compat.v2.cast', 'tf.cast', ({(79, 25, 79, 33): 'safe_val', (79, 35, 79, 44): 'val.dtype'}, {}), '(safe_val, val.dtype)', True, 'import tensorflow.compat.v2 as tf\n'), ((81, 17, 81, 47), 'tensorflow.compat.v2.cast', 'tf.cast', ({(81, 25, 81, 35): 'safe_value', (81, 37, 81, 46): 'val.dtype'}, {}), '(safe_value, val.dtype)', True, 'import tensorflow.compat.v2 as tf\n'), ((89, 23, 89, 44), 'tensorflow_probability.python.internal.samplers.zeros_seed', 'samplers.zeros_seed', ({}, {}), '()', False, 'from tensorflow_probability.python.internal import samplers\n'), ((210, 9, 210, 60), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', ({(210, 23, 210, 59): "(name or f'Masked{distribution.name}')"}, {}), "(name or f'Masked{distribution.name}')", True, 'import tensorflow.compat.v2 as tf\n'), ((212, 28, 213, 44), 'tensorflow_probability.python.internal.tensor_util.convert_nonref_to_tensor', 'tensor_util.convert_nonref_to_tensor', (), '', False, 'from tensorflow_probability.python.internal import tensor_util\n'), ((257, 40, 257, 63), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', ({(257, 49, 257, 62): 'validity_mask'}, {}), '(validity_mask)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((346, 9, 346, 41), 'tensorflow.compat.v2.control_dependencies', 'tf.control_dependencies', ({(346, 33, 346, 40): 'asserts'}, {}), '(asserts)', True, 'import tensorflow.compat.v2 as tf\n'), ((375, 9, 375, 41), 'tensorflow.compat.v2.control_dependencies', 'tf.control_dependencies', ({(375, 33, 375, 40): 'asserts'}, {}), '(asserts)', True, 'import tensorflow.compat.v2 as tf\n'), ((38, 10, 38, 33), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', ({(38, 19, 38, 32): 'validity_mask'}, {}), '(validity_mask)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((39, 10, 39, 46), 'tensorflow_probability.python.internal.prefer_static.ones', 'ps.ones', (), '', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((226, 21, 226, 70), 'tensorflow_probability.python.internal.parameter_properties.BatchedComponentProperties', 'parameter_properties.BatchedComponentProperties', ({}, {}), '()', False, 'from tensorflow_probability.python.internal import parameter_properties\n'), ((227, 22, 228, 67), 'tensorflow_probability.python.internal.parameter_properties.ParameterProperties', 'parameter_properties.ParameterProperties', (), '', False, 'from tensorflow_probability.python.internal import parameter_properties\n'), ((343, 21, 345, 62), 'tensorflow_probability.python.internal.assert_util.assert_equal', 'assert_util.assert_equal', (), '', False, 'from tensorflow_probability.python.internal import assert_util\n'), ((372, 21, 374, 78), 'tensorflow_probability.python.internal.assert_util.assert_equal', 'assert_util.assert_equal', (), '', False, 'from tensorflow_probability.python.internal import assert_util\n'), ((352, 31, 352, 50), 'tensorflow.compat.v2.zeros', 'tf.zeros', ({(352, 40, 352, 42): '[]', (352, 44, 352, 49): 'dtype'}, {}), '([], dtype)', True, 'import tensorflow.compat.v2 as tf\n'), ((380, 31, 380, 67), 'tensorflow.compat.v2.zeros', 'tf.zeros', (), '', True, 'import tensorflow.compat.v2 as tf\n'), ((75, 15, 75, 38), 'tensorflow_probability.python.internal.prefer_static.shape', 'ps.shape', ({(75, 24, 75, 37): 'validity_mask'}, {}), '(validity_mask)', True, 'from tensorflow_probability.python.internal import prefer_static as ps\n')] |
kaija/taiwan_stockloader | download.py | 637244c3b0bc96093cc5a7b3df093a829f9e3c2d | import datetime
import httplib
import urllib
from datetime import timedelta
#now = datetime.datetime.now();
#today = now.strftime('%Y-%m-%d')
#print today
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def convfloat(value):
try:
return float(value)
except ValueError:
return -1
today = datetime.date.today()
one_day = timedelta(days=1);
#start_day = datetime.date(2004, 2, 11);
start_day = datetime.date(2010, 8, 21);
print "Download from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d")
dl_date = start_day
while dl_date < today:
httpreq = httplib.HTTPConnection('www.twse.com.tw')
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
date_str = str(dl_date.year - 1911 ) + dl_date.strftime("/%m/%d")
form = urllib.urlencode({'download': 'csv', 'qdate': date_str, 'selectType': 'ALLBUT0999'})
httpreq.request("POST", "/ch/trading/exchange/MI_INDEX/MI_INDEX.php", form, headers);
httpres = httpreq.getresponse()
stock_csv = httpres.read()
file_name = "data/" + dl_date.strftime("%Y%m%d") + ".csv"
print "downloading " + file_name
f = open(file_name, "w")
f.write(stock_csv)
dl_date += one_day
print "Download Finish!"
| [] |
annalunde/master | heuristic/improvement/reopt/disruption_updater.py | 2552d43713e8ebca0b0e57bc5bebd1eaeeac1875 | import copy
import pandas as pd
from decouple import config
from heuristic.construction.construction import ConstructionHeuristic
from config.construction_config import *
from simulation.simulator import Simulator
from heuristic.improvement.reopt.new_request_updater import NewRequestUpdater
class DisruptionUpdater:
def __init__(self, new_request_updater):
self.new_request_updater = new_request_updater
def update_route_plan(self, current_route_plan, disruption_type, disruption_info, sim_clock):
# adding current position for each vehicle
vehicle_clocks, artificial_depot = self.update_vehicle_clocks(
current_route_plan, sim_clock, disruption_type, disruption_info)
updated_route_plan = copy.deepcopy(current_route_plan)
if disruption_type == 'request':
self.new_request_updater.set_parameters(disruption_info)
elif disruption_type == 'delay':
updated_route_plan = self.update_with_delay(
current_route_plan, disruption_info)
elif disruption_type == 'cancel':
# update capacities
updated_vehicle_route = self.update_capacities(
updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2],
updated_route_plan[disruption_info[0]][disruption_info[1]][5])
updated_route_plan[disruption_info[0]] = updated_vehicle_route
if artificial_depot:
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
else:
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
# remove pickup node
del updated_route_plan[disruption_info[0]][disruption_info[1]]
else:
# no show
# update capacities
updated_vehicle_route = self.update_capacities(
updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2],
updated_route_plan[disruption_info[0]][disruption_info[1]][5])
updated_route_plan[disruption_info[0]] = updated_vehicle_route
# remove dropoff node
del updated_route_plan[disruption_info[0]][disruption_info[2]]
return updated_route_plan, vehicle_clocks
def update_with_delay(self, current_route_plan, disruption_info):
delay_duration = disruption_info[2]
route_plan = copy.deepcopy(current_route_plan)
start_idx = disruption_info[1]
for node in route_plan[disruption_info[0]][disruption_info[1]:]:
t = node[1] + delay_duration
d = node[2] + delay_duration
node = (node[0], t, d, node[3], node[4], node[5])
route_plan[disruption_info[0]][start_idx] = node
start_idx += 1
return route_plan
@staticmethod
def recalibrate_solution(current_route_plan, disruption_info, still_delayed_nodes):
delay_duration = disruption_info[2]
route_plan = copy.deepcopy(current_route_plan)
for node in still_delayed_nodes:
idx = next(i for i, (node_test, *_)
in enumerate(route_plan[disruption_info[0]]) if node_test == node)
node_route = route_plan[disruption_info[0]][idx]
d = node_route[2] - delay_duration
node_route = (node_route[0], node_route[1], d,
node_route[3], node_route[4], node_route[5])
route_plan[disruption_info[0]][idx] = node_route
return route_plan
def update_vehicle_clocks(self, current_route_plan, sim_clock, disruption_type, disruption_info):
artificial_depot = False
# find index for next node after sim_clock and corresponding time of service
vehicle_clocks = []
for vehicle_route in current_route_plan:
if len(vehicle_route) > 1:
if vehicle_route[0][1] < sim_clock:
prev_idx = 0
for idx, (node, time, deviation, passenger, wheelchair, _) in enumerate(vehicle_route):
if time <= sim_clock:
prev_idx = idx
if prev_idx == len(vehicle_route) - 1:
vehicle_clocks.append(sim_clock)
else:
next_idx = prev_idx + 1
vehicle_clocks.append(vehicle_route[next_idx][1])
if disruption_type == 'cancel':
# check whether next node after sim_clock is the request that is cancelled
if current_route_plan[disruption_info[0]][disruption_info[1]] == vehicle_route[next_idx]:
artificial_depot = True
else:
vehicle_clocks.append(sim_clock)
else:
vehicle_clocks.append(sim_clock)
return vehicle_clocks, artificial_depot
def update_capacities(self, vehicle_route, start_id, dropoff_id, request):
idx = start_id
for n, t, d, p, w, _ in vehicle_route[start_id:dropoff_id]:
p -= request["Number of Passengers"]
w -= request["Wheelchair"]
vehicle_route[idx] = (n, t, d, p, w, _)
idx += 1
return vehicle_route
| [((20, 29, 20, 62), 'copy.deepcopy', 'copy.deepcopy', ({(20, 43, 20, 61): 'current_route_plan'}, {}), '(current_route_plan)', False, 'import copy\n'), ((62, 21, 62, 54), 'copy.deepcopy', 'copy.deepcopy', ({(62, 35, 62, 53): 'current_route_plan'}, {}), '(current_route_plan)', False, 'import copy\n'), ((77, 21, 77, 54), 'copy.deepcopy', 'copy.deepcopy', ({(77, 35, 77, 53): 'current_route_plan'}, {}), '(current_route_plan)', False, 'import copy\n')] |
Jaykingamez/evennia | evennia/scripts/migrations/0013_auto_20191025_0831.py | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | # Generated by Django 2.2.6 on 2019-10-25 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("scripts", "0012_auto_20190128_1820")]
operations = [
migrations.AlterField(
model_name="scriptdb",
name="db_typeclass_path",
field=models.CharField(
db_index=True,
help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.",
max_length=255,
null=True,
verbose_name="typeclass",
),
)
]
| [((14, 18, 20, 13), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
dbajar/segno | tests/test_pyqrcodeng_issue13.py | f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>.
The initial test was created by Mathieu <https://github.com/albatros69>,
see the above mentioned pull request.
Adapted for Segno to check if it suffers from the same problem.
"""
from __future__ import absolute_import, unicode_literals
import segno
def test_autodetect():
data = 'Émetteur'
qr = segno.make(data)
assert qr.mode == 'byte'
def test_encoding():
encoding = 'iso-8859-15'
data = 'Émetteur'
qr = segno.make(data.encode(encoding))
assert qr.mode == 'byte'
qr2 = segno.make(data, encoding=encoding)
assert qr2 == qr
if __name__ == '__main__':
import pytest
pytest.main([__file__])
| [((22, 9, 22, 25), 'segno.make', 'segno.make', ({(22, 20, 22, 24): 'data'}, {}), '(data)', False, 'import segno\n'), ((31, 10, 31, 45), 'segno.make', 'segno.make', (), '', False, 'import segno\n'), ((37, 4, 37, 27), 'pytest.main', 'pytest.main', ({(37, 16, 37, 26): '[__file__]'}, {}), '([__file__])', False, 'import pytest\n')] |
jagunnels/qiskit-sdk-py | qiskit/quantum_info/operators/__init__.py | 153cdde972e65c0f23675bbe17c93e18be27bd51 | # -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Quantum Operators."""
from .operator import Operator
from .unitary import Unitary
from .pauli import Pauli, pauli_group
from .channel import Choi, SuperOp, Kraus, Stinespring, Chi, PTM
| [] |
Gaurav-Zaiswal/iw-acad-iocms-be | iocms/iocms/urls.py | a133f120eed93433925608f08c5145d2d0d1db39 | from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('class/', include('classroom.urls')),
path('assignment-api/', include('assignment.urls', namespace='assignment')),
path('feed/', include('feed.urls', namespace='feed')),
path('users/', include('users.urls'), name="user-register")
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [((7, 4, 7, 35), 'django.urls.path', 'path', ({(7, 9, 7, 17): '"""admin/"""', (7, 19, 7, 34): 'admin.site.urls'}, {}), "('admin/', admin.site.urls)", False, 'from django.urls import include, path\n'), ((14, 19, 14, 80), 'django.conf.urls.static.static', 'static', (), '', False, 'from django.conf.urls.static import static\n'), ((8, 19, 8, 44), 'django.urls.include', 'include', ({(8, 27, 8, 43): '"""classroom.urls"""'}, {}), "('classroom.urls')", False, 'from django.urls import include, path\n'), ((9, 28, 9, 78), 'django.urls.include', 'include', (), '', False, 'from django.urls import include, path\n'), ((10, 18, 10, 56), 'django.urls.include', 'include', (), '', False, 'from django.urls import include, path\n'), ((11, 19, 11, 40), 'django.urls.include', 'include', ({(11, 27, 11, 39): '"""users.urls"""'}, {}), "('users.urls')", False, 'from django.urls import include, path\n')] |
slippers/blogging_security_flatpage | src/security/__init__.py | 53644978b798c66369416b1e5625cc04d89c0a87 | from src import app, db
from .models import User, Role, RoleUsers
from .security_admin import UserAdmin, RoleAdmin
from flask_security import Security, SQLAlchemyUserDatastore, \
login_required, roles_accepted
from flask_security.utils import encrypt_password
def config_security_admin(admin):
admin.add_view(UserAdmin(db.session))
admin.add_view(RoleAdmin(db.session))
def configure_security():
# Create the Roles "admin" and "end-user" -- unless they already exist
user_datastore.find_or_create_role(name='admin', description='Administrator')
user_datastore.find_or_create_role(name='end-user', description='End user')
user_datastore.find_or_create_role(name='blogger', description='Blogger')
# Create two Users for testing purposes -- unless they already exists.
# In each case, use Flask-Security utility function to encrypt the password.
pw = encrypt_password('password')
# pw = 'password'
if not user_datastore.get_user('[email protected]'):
user_datastore.create_user(email='[email protected]', password=pw)
if not user_datastore.get_user('[email protected]'):
user_datastore.create_user(email='[email protected]', password=pw)
# Give one User has the "end-user" role, while the other has the "admin" role.
#(This will have no effect if the
# Users already have these Roles.) Again, commit any database changes.
user_datastore.add_role_to_user('[email protected]', 'end-user')
user_datastore.add_role_to_user('[email protected]', 'blogger')
user_datastore.add_role_to_user('[email protected]', 'admin')
user_datastore.add_role_to_user('[email protected]', 'blogger')
db.session.commit()
# Setup Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# Create any database tables that don't exist yet.
db.create_all()
| [((41, 17, 41, 56), 'flask_security.SQLAlchemyUserDatastore', 'SQLAlchemyUserDatastore', ({(41, 41, 41, 43): 'db', (41, 45, 41, 49): 'User', (41, 51, 41, 55): 'Role'}, {}), '(db, User, Role)', False, 'from flask_security import Security, SQLAlchemyUserDatastore, login_required, roles_accepted\n'), ((42, 11, 42, 40), 'flask_security.Security', 'Security', ({(42, 20, 42, 23): 'app', (42, 25, 42, 39): 'user_datastore'}, {}), '(app, user_datastore)', False, 'from flask_security import Security, SQLAlchemyUserDatastore, login_required, roles_accepted\n'), ((45, 0, 45, 15), 'src.db.create_all', 'db.create_all', ({}, {}), '()', False, 'from src import app, db\n'), ((22, 9, 22, 37), 'flask_security.utils.encrypt_password', 'encrypt_password', ({(22, 26, 22, 36): '"""password"""'}, {}), "('password')", False, 'from flask_security.utils import encrypt_password\n'), ((38, 4, 38, 23), 'src.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from src import app, db\n')] |
lenjonemcse/usaspending-api | usaspending_api/download/lookups.py | cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce | """
This file defines a series of constants that represent the values used in
the API's "helper" tables.
Rather than define the values in the db setup scripts and then make db calls to
lookup the surrogate keys, we'll define everything here, in a file that can be
used by the db setup scripts *and* the application code.
"""
from collections import namedtuple, OrderedDict
from usaspending_api.accounts.models import AppropriationAccountBalances
from usaspending_api.accounts.v2.filters.account_download import account_download_filter
from usaspending_api.awards.models import Award, TransactionNormalized
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.download.helpers.elasticsearch_download_functions import (
AwardsElasticsearchDownload,
TransactionsElasticsearchDownload,
)
from usaspending_api.download.helpers.disaster_filter_functions import disaster_filter_function
from usaspending_api.search.models import AwardSearchView, TransactionSearch, SubawardView
from usaspending_api.awards.v2.filters.idv_filters import (
idv_order_filter,
idv_transaction_filter,
idv_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.award_filters import (
awards_transaction_filter,
awards_subaward_filter,
awards_treasury_account_funding_filter,
)
from usaspending_api.awards.v2.filters.search import (
universal_award_matview_filter,
transaction_search_filter,
)
from usaspending_api.awards.v2.filters.sub_award import subaward_download
from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass
from usaspending_api.download.helpers.download_annotation_functions import (
transaction_search_annotations,
universal_award_matview_annotations,
subaward_annotations,
idv_order_annotations,
idv_transaction_annotations,
)
LookupType = namedtuple("LookupType", ["id", "name", "desc"])
JOB_STATUS = [
LookupType(1, "ready", "job is ready to be run"),
LookupType(2, "running", "job is currently in progress"),
LookupType(3, "finished", "job is complete"),
LookupType(4, "failed", "job failed to complete"),
LookupType(5, "queued", "job sent to queue for async processing"),
LookupType(6, "resumed", "job is being reprocessed after a failure"),
LookupType(7, "created", "job product has been created and stored locally"),
LookupType(8, "uploading", "job is being uploaded to public storage"),
]
JOB_STATUS_DICT = {item.name: item.id for item in JOB_STATUS}
VALUE_MAPPINGS = {
# Award Level
"awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": universal_award_matview_filter,
"annotations_function": universal_award_matview_annotations,
},
# Elasticsearch Award Level
"elasticsearch_awards": {
"source_type": "award",
"table": AwardSearchView,
"table_name": "award",
"type_name": "PrimeAwardSummaries",
"download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": AwardsElasticsearchDownload.query,
"annotations_function": universal_award_matview_annotations,
},
# Transaction Level
"transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": transaction_search_filter,
"annotations_function": transaction_search_annotations,
},
# Elasticsearch Transaction Level
"elasticsearch_transactions": {
"source_type": "award",
"table": TransactionSearch,
"table_name": "transaction",
"type_name": "PrimeTransactions",
"download_name": "{agency}{type}_PrimeTransactions_{timestamp}",
"contract_data": "transaction__contract_data",
"assistance_data": "transaction__assistance_data",
"filter_function": TransactionsElasticsearchDownload.query,
"annotations_function": transaction_search_annotations,
},
# SubAward Level
"sub_awards": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"type_name": "Subawards",
"download_name": "{agency}{type}_Subawards_{timestamp}",
"contract_data": "award__latest_transaction__contract_data",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": subaward_download,
"annotations_function": subaward_annotations,
},
# Appropriations Account Data
"account_balances": {
"source_type": "account",
"table": AppropriationAccountBalances,
"table_name": "account_balances",
"download_name": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}",
"filter_function": account_download_filter,
},
# Object Class Program Activity Account Data
"object_class_program_activity": {
"source_type": "account",
"table": FinancialAccountsByProgramActivityObjectClass,
"table_name": "object_class_program_activity",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}",
"filter_function": account_download_filter,
},
"award_financial": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}",
"filter_function": account_download_filter,
},
"idv_orders": {
"source_type": "award",
"table": Award,
"table_name": "idv_orders",
"download_name": "IDV_{piid}_Orders",
"contract_data": "latest_transaction__contract_data",
"filter_function": idv_order_filter,
"is_for_idv": True,
"annotations_function": idv_order_annotations,
},
"idv_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "IDV_{piid}_FederalAccountFunding",
"filter_function": idv_treasury_account_funding_filter,
"is_for_idv": True,
},
"idv_transaction_history": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "IDV_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": idv_transaction_filter,
"is_for_idv": True,
"annotations_function": idv_transaction_annotations,
},
"contract_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Contract_{piid}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_contract": True,
},
"assistance_federal_account_funding": {
"source_type": "account",
"table": FinancialAccountsByAwards,
"table_name": "award_financial",
"download_name": "Assistance_{assistance_id}_FederalAccountFunding",
"filter_function": awards_treasury_account_funding_filter,
"is_for_assistance": True,
},
"sub_contracts": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Contract_{piid}_Sub-Awards",
"contract_data": "award__latest_transaction__contract_data",
"filter_function": awards_subaward_filter,
"is_for_contract": True,
"annotations_function": subaward_annotations,
},
"sub_grants": {
"source_type": "award",
"table": SubawardView,
"table_name": "subaward",
"download_name": "Assistance_{assistance_id}_Sub-Awards",
"assistance_data": "award__latest_transaction__assistance_data",
"filter_function": awards_subaward_filter,
"is_for_assistance": True,
"annotations_function": subaward_annotations,
},
"contract_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "idv_transaction_history",
"download_name": "Contract_{piid}_TransactionHistory",
"contract_data": "contract_data",
"filter_function": awards_transaction_filter,
"is_for_contract": True,
"annotations_function": idv_transaction_annotations,
},
"assistance_transactions": {
"source_type": "award",
"table": TransactionNormalized,
"table_name": "assistance_transaction_history",
"download_name": "Assistance_{assistance_id}_TransactionHistory",
"assistance_data": "assistance_data",
"filter_function": awards_transaction_filter,
"is_for_assistance": True,
"annotations_function": idv_transaction_annotations,
},
"disaster_recipient": {
"source_type": "disaster",
"table": AwardSearchView,
"table_name": "recipient",
"download_name": "COVID-19_Recipients_{award_category}_{timestamp}",
"filter_function": disaster_filter_function,
"base_fields": ["recipient_name", "recipient_unique_id"],
},
}
# Bulk Download still uses "prime awards" instead of "transactions"
VALUE_MAPPINGS["prime_awards"] = VALUE_MAPPINGS["transactions"]
# List of CFO CGACS for list agencies viewset in the correct order, names included for reference
# TODO: Find a solution that marks the CFO agencies in the database AND have the correct order
CFO_CGACS_MAPPING = OrderedDict(
[
("012", "Department of Agriculture"),
("013", "Department of Commerce"),
("097", "Department of Defense"),
("091", "Department of Education"),
("089", "Department of Energy"),
("075", "Department of Health and Human Services"),
("070", "Department of Homeland Security"),
("086", "Department of Housing and Urban Development"),
("015", "Department of Justice"),
("1601", "Department of Labor"),
("019", "Department of State"),
("014", "Department of the Interior"),
("020", "Department of the Treasury"),
("069", "Department of Transportation"),
("036", "Department of Veterans Affairs"),
("068", "Environmental Protection Agency"),
("047", "General Services Administration"),
("080", "National Aeronautics and Space Administration"),
("049", "National Science Foundation"),
("031", "Nuclear Regulatory Commission"),
("024", "Office of Personnel Management"),
("073", "Small Business Administration"),
("028", "Social Security Administration"),
("072", "Agency for International Development"),
]
)
CFO_CGACS = list(CFO_CGACS_MAPPING.keys())
FILE_FORMATS = {
"csv": {"delimiter": ",", "extension": "csv", "options": "WITH CSV HEADER"},
"tsv": {"delimiter": "\t", "extension": "tsv", "options": r"WITH CSV DELIMITER E'\t' HEADER"},
"pstxt": {"delimiter": "|", "extension": "txt", "options": "WITH CSV DELIMITER '|' HEADER"},
}
VALID_ACCOUNT_SUBMISSION_TYPES = ("account_balances", "object_class_program_activity", "award_financial")
| [((48, 13, 48, 61), 'collections.namedtuple', 'namedtuple', ({(48, 24, 48, 36): '"""LookupType"""', (48, 38, 48, 60): "['id', 'name', 'desc']"}, {}), "('LookupType', ['id', 'name', 'desc'])", False, 'from collections import namedtuple, OrderedDict\n'), ((249, 20, 276, 1), 'collections.OrderedDict', 'OrderedDict', ({(250, 4, 275, 5): "[('012', 'Department of Agriculture'), ('013', 'Department of Commerce'), (\n '097', 'Department of Defense'), ('091', 'Department of Education'), (\n '089', 'Department of Energy'), ('075',\n 'Department of Health and Human Services'), ('070',\n 'Department of Homeland Security'), ('086',\n 'Department of Housing and Urban Development'), ('015',\n 'Department of Justice'), ('1601', 'Department of Labor'), ('019',\n 'Department of State'), ('014', 'Department of the Interior'), ('020',\n 'Department of the Treasury'), ('069', 'Department of Transportation'),\n ('036', 'Department of Veterans Affairs'), ('068',\n 'Environmental Protection Agency'), ('047',\n 'General Services Administration'), ('080',\n 'National Aeronautics and Space Administration'), ('049',\n 'National Science Foundation'), ('031', 'Nuclear Regulatory Commission'\n ), ('024', 'Office of Personnel Management'), ('073',\n 'Small Business Administration'), ('028',\n 'Social Security Administration'), ('072',\n 'Agency for International Development')]"}, {}), "([('012', 'Department of Agriculture'), ('013',\n 'Department of Commerce'), ('097', 'Department of Defense'), ('091',\n 'Department of Education'), ('089', 'Department of Energy'), ('075',\n 'Department of Health and Human Services'), ('070',\n 'Department of Homeland Security'), ('086',\n 'Department of Housing and Urban Development'), ('015',\n 'Department of Justice'), ('1601', 'Department of Labor'), ('019',\n 'Department of State'), ('014', 'Department of the Interior'), ('020',\n 'Department of the Treasury'), ('069', 'Department of Transportation'),\n ('036', 'Department of Veterans Affairs'), ('068',\n 'Environmental Protection Agency'), ('047',\n 'General Services Administration'), ('080',\n 'National Aeronautics and Space Administration'), ('049',\n 'National Science Foundation'), ('031', 'Nuclear Regulatory Commission'\n ), ('024', 'Office of Personnel Management'), ('073',\n 'Small Business Administration'), ('028',\n 'Social Security Administration'), ('072',\n 'Agency for International Development')])", False, 'from collections import namedtuple, OrderedDict\n')] |
91-jinrong/-91_monitor | python/modules/mysql_server.py | e0325229bffbb0df20d9337925b591eee8ac0289 | #!/bin/env python
#-*-coding:utf-8-*-
import os
import sys
import string
import time
import datetime
import MySQLdb
class MySQL:
def __int__(self,host,port,user,passwd,dbname,timeout,charset):
self.host = host
self.port = port
self.user = user
self.passwd = passwd
self.dbname = test
self.timeout = timeout
self.charset = charset
def db_connect(self):
connect=MySQLdb.connect(host=self.host,user=self.user,passwd=self.passwd,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
return connect
def execute(self,sql,param):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=self.passwd,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
conn.select_db(self.dbname)
cursor = conn.cursor()
if param <> '':
cursor.execute(sql,param)
else:
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
def query(self,sql):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=self.passwd,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
conn.select_db(self.dbname)
cursor = conn.cursor()
count=cursor.execute(sql)
if count == 0 :
result=0
else:
result=cursor.fetchall()
return result
cursor.close()
conn.close()
def get_option(self,key):
conn=MySQLdb.connect(host=self.host,user=self.user,passwd=self.passwd,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset)
conn.select_db(self.dbname)
cursor = conn.cursor()
sql="select value from options where name=+'"+key+"'"
count=cursor.execute(sql)
if count == 0 :
result=0
else:
result=cursor.fetchone()
return result[0]
cursor.close()
conn.close()
| [] |
hklhfong/Car-Rental-System | Ethan File/Carrentsystem/Carrentsystem/test.py | 3a4844eea8e9dbf85f4ce62b5115772f48277240 | import sqlite3
conn = sqlite3.connect("db")
cur = conn.cursor()
cur.execute("select * from CAR_ID limit 5;")
results = cur.fetchall()
print(results)
| [((2, 7, 2, 28), 'sqlite3.connect', 'sqlite3.connect', ({(2, 23, 2, 27): '"""db"""'}, {}), "('db')", False, 'import sqlite3\n')] |
abreu4/jina | tests/integration/hub_usage/dummyhub_slow/__init__.py | d1d045e9e0933dffb3bd668cb9cfebab6cd52202 | import time
from jina.executors.crafters import BaseCrafter
from .helper import foo
class DummyHubExecutorSlow(BaseCrafter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
time.sleep(15)
foo()
| [((11, 8, 11, 22), 'time.sleep', 'time.sleep', ({(11, 19, 11, 21): '(15)'}, {}), '(15)', False, 'import time\n')] |
philipp-hess/deep-learning-for-heavy-rainfall | src/evaluation_utils.py | dbec03245dd8db0c5f2f53af014b8dd8d80f245c | import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from sklearn.metrics import f1_score, precision_score, recall_score
from IPython.display import display, clear_output
from sklearn.metrics import confusion_matrix
import scipy.stats as st
def continuous_to_categorical_with_quantiles(data: np.ndarray, quantiles:list ) -> np.ndarray:
""" Converts continuous data into binar classes using quantiles
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing quantiles
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(quantiles), shape[0], shape[1], shape[2]))
for i, quantile in enumerate(quantiles):
threshold = np.quantile(data, quantile)
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def global_thresholds_from_quantiles(data: np.ndarray, quantiles:list) -> list:
thresholds = [np.quantile(data, quantile) for quantile in quantiles]
return thresholds
def local_thresholds_from_percentiles(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
threshold_map = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if not np.isnan(threshold):
threshold_map[lat, lon] = threshold
return threshold_map
def get_threshold_mask(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray:
n_lat = data.shape[1]
n_lon = data.shape[2]
mask = np.zeros((n_lat, n_lon))
for lat in range(n_lat):
for lon in range(n_lon):
tmp = data[:, lat, lon]
threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile)
if np.isnan(threshold):
mask[lat, lon] = 1
return mask
def continuous_to_categorical_with_thresholds(data: np.ndarray, thresholds: list) -> np.ndarray:
""" Converts continuous data into binar classes using thresholds
Args:
data: shape [n_time, n_lat, n_lon]
quantiles:
list containing thresholds
Returns:
tmp: shape [n_quantiles, n_time*n_lat*n_lon]
binary data
"""
shape = data.shape
tmp = np.zeros((len(thresholds), shape[0], shape[1], shape[2]))
for i, threshold in enumerate(thresholds):
binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1))
tmp[i] = binary.squeeze()
return tmp
def categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str, mask=None) -> pd.DataFrame:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, X]
target: shape [n_classes, X]
X can be any other number of dimensions > 0
Returns:
scores (list):
List with an element per class
"""
n_classes = prediction.shape[0]
prediction = prediction.reshape(n_classes, -1)
target = target.reshape(n_classes, -1)
scores = []
for c in range(n_classes):
forecast_skill = ForecastSkill(prediction[c], target[c])
forecast_skill.compute_categories(mask=mask)
scores.append(getattr(forecast_skill, f'get_{metric_name}')())
return scores
def geographic_categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str) -> np.ndarray:
"""
Evaluates a regression prediction with the F1 score
on quantile-based categories
Args:
prediction: shape [n_classes, n_time, n_lat, n_lon]
target: shape [n_classes, n_time, n_lat, n_lon]
Returns:
scores: shape [n_classes, n_lat, n_lon]
"""
n_classes = prediction.shape[0]
n_lat = prediction.shape[2]
n_lon = prediction.shape[3]
scores = np.zeros((n_classes, n_lat, n_lon))
for c in range(n_classes):
for lat in range(n_lat):
for lon in range(n_lon):
grid_cell_prediction = prediction[c, :, lat, lon]
grid_cell_target = target[c, :, lat, lon]
if sum(grid_cell_prediction) == 0 and sum(grid_cell_target) == 0:
scores[c, lat, lon] = -999
else:
forecast_skill = ForecastSkill(prediction[c, :, lat, lon], target[c, :, lat, lon])
forecast_skill.compute_categories()
scores[c, lat, lon] = getattr(forecast_skill, f'get_{metric_name}')()
print(f'Progress {int((lat * lon)/(n_lat*n_lon)*100):2d}%')
clear_output(wait=True)
return scores
class ForecastSkill:
""" A collection of categorical forecast skill metrics """
def __init__(self, prediction, target):
self.prediction = prediction
self.target = target
self.true_positive = 0
self.false_positive = 0
self.false_negative = 0
self.true_negative = 0
def compute_categories(self, mask=None):
self.target = self.target.flatten().astype('int')
self.prediction = self.prediction.flatten().astype('int')
if mask is not None:
mask = mask.flatten()
indices_to_remove = np.where(mask==1)
self.target = np.delete(self.target, indices_to_remove)
self.prediction = np.delete(self.prediction, indices_to_remove)
categories = confusion_matrix(self.target, self.prediction)
self.true_negative, self.false_positive, self.false_negative, self.true_positive = categories.ravel()
def print_category_sums(self):
total = self.target.size
print(f'tp: {self.true_positive/total*100:2.3f}')
print(f'fp: {self.false_positive/total*100:2.3f}')
print(f'fn: {self.false_negative/total*100:2.3f}')
print(f'tn: {self.true_negative/total*100:2.3f}')
def get_category_sums(self):
return self.true_positive, self.false_positive, self.false_negative, self.true_negative
def get_heidke_skill_score(self) -> float:
tp = self.true_positive
fp = self.false_positive
fn = self.false_negative
tn = self.true_negative
nominator = 2*(tp*tn - fp*fn)
denominator = ((tp + fn)*(fn + tn) + (tp + fp)*(fp + tn))
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_critical_success_index(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_false_alarm_ratio(self) -> float:
hits = self.true_positive
false_alarms = self.false_positive
nominator = false_alarms
denominator = hits + false_alarms
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_probability_of_detection(self) -> float:
hits = self.true_positive
misses = self.false_negative
nominator = hits
denominator = hits + misses
if denominator > 0:
return nominator/denominator
else:
raise ValueError('devision by zero')
def get_f1(self) -> float:
return f1_score(self.target, self.prediction, average='binary')
def get_recall(self) -> float:
return recall_score(self.target, self.prediction, average='binary')
def get_precision(self) -> float:
return precision_score(self.target, self.prediction, average='binary')
def rmse(output, target):
return np.sqrt(((output-target)**2).mean(axis=0))
def me(output, target):
return (output-target).mean(axis=0)
def corr(output, target):
result = np.zeros((output.shape[1], output.shape[2]))
for i in range(output.shape[1]):
for j in range(output.shape[2]):
result[i,j] = spearmanr(output[:,i,j], target[:,i,j])[0]
return result
| [((38, 20, 38, 44), 'numpy.zeros', 'np.zeros', ({(38, 29, 38, 43): '(n_lat, n_lon)'}, {}), '((n_lat, n_lon))', True, 'import numpy as np\n'), ((51, 11, 51, 35), 'numpy.zeros', 'np.zeros', ({(51, 20, 51, 34): '(n_lat, n_lon)'}, {}), '((n_lat, n_lon))', True, 'import numpy as np\n'), ((127, 13, 127, 48), 'numpy.zeros', 'np.zeros', ({(127, 22, 127, 47): '(n_classes, n_lat, n_lon)'}, {}), '((n_classes, n_lat, n_lon))', True, 'import numpy as np\n'), ((263, 14, 263, 58), 'numpy.zeros', 'np.zeros', ({(263, 23, 263, 57): '(output.shape[1], output.shape[2])'}, {}), '((output.shape[1], output.shape[2]))', True, 'import numpy as np\n'), ((24, 20, 24, 47), 'numpy.quantile', 'np.quantile', ({(24, 32, 24, 36): 'data', (24, 38, 24, 46): 'quantile'}, {}), '(data, quantile)', True, 'import numpy as np\n'), ((31, 18, 31, 45), 'numpy.quantile', 'np.quantile', ({(31, 30, 31, 34): 'data', (31, 36, 31, 44): 'quantile'}, {}), '(data, quantile)', True, 'import numpy as np\n'), ((168, 21, 168, 67), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', ({(168, 38, 168, 49): 'self.target', (168, 51, 168, 66): 'self.prediction'}, {}), '(self.target, self.prediction)', False, 'from sklearn.metrics import confusion_matrix\n'), ((243, 15, 243, 71), 'sklearn.metrics.f1_score', 'f1_score', (), '', False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((247, 15, 247, 75), 'sklearn.metrics.recall_score', 'recall_score', (), '', False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((251, 15, 251, 78), 'sklearn.metrics.precision_score', 'precision_score', (), '', False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((42, 24, 42, 75), 'scipy.stats.scoreatpercentile', 'st.scoreatpercentile', ({(42, 45, 42, 62): 'tmp[tmp > data_min]', (42, 64, 42, 74): 'percentile'}, {}), '(tmp[tmp > data_min], percentile)', True, 'import scipy.stats as st\n'), ((55, 24, 55, 75), 'scipy.stats.scoreatpercentile', 'st.scoreatpercentile', ({(55, 45, 55, 62): 'tmp[tmp > data_min]', (55, 64, 55, 74): 'percentile'}, {}), '(tmp[tmp > data_min], percentile)', True, 'import scipy.stats as st\n'), ((56, 15, 56, 34), 'numpy.isnan', 'np.isnan', ({(56, 24, 56, 33): 'threshold'}, {}), '(threshold)', True, 'import numpy as np\n'), ((164, 32, 164, 49), 'numpy.where', 'np.where', ({(164, 41, 164, 48): 'mask == 1'}, {}), '(mask == 1)', True, 'import numpy as np\n'), ((165, 26, 165, 67), 'numpy.delete', 'np.delete', ({(165, 36, 165, 47): 'self.target', (165, 49, 165, 66): 'indices_to_remove'}, {}), '(self.target, indices_to_remove)', True, 'import numpy as np\n'), ((166, 30, 166, 75), 'numpy.delete', 'np.delete', ({(166, 40, 166, 55): 'self.prediction', (166, 57, 166, 74): 'indices_to_remove'}, {}), '(self.prediction, indices_to_remove)', True, 'import numpy as np\n'), ((25, 17, 25, 49), 'numpy.where', 'np.where', ({(25, 26, 25, 42): 'data > threshold', (25, 44, 25, 45): '1', (25, 47, 25, 48): '0'}, {}), '(data > threshold, 1, 0)', True, 'import numpy as np\n'), ((43, 19, 43, 38), 'numpy.isnan', 'np.isnan', ({(43, 28, 43, 37): 'threshold'}, {}), '(threshold)', True, 'import numpy as np\n'), ((76, 17, 76, 49), 'numpy.where', 'np.where', ({(76, 26, 76, 42): 'data > threshold', (76, 44, 76, 45): '1', (76, 47, 76, 48): '0'}, {}), '(data > threshold, 1, 0)', True, 'import numpy as np\n'), ((140, 16, 140, 39), 'IPython.display.clear_output', 'clear_output', (), '', False, 'from IPython.display import display, clear_output\n'), ((266, 26, 266, 65), 'scipy.stats.spearmanr', 'spearmanr', ({(266, 36, 266, 49): 'output[:, (i), (j)]', (266, 51, 266, 64): 'target[:, (i), (j)]'}, {}), '(output[:, (i), (j)], target[:, (i), (j)])', False, 'from scipy.stats import spearmanr\n')] |
xJuggl3r/anapolo | poloniex_apis/api_models/deposit_withdrawal_history.py | 5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b | from collections import defaultdict
from poloniex_apis.api_models.ticker_price import TickerData
class DWHistory:
def __init__(self, history):
self.withdrawals = defaultdict(float)
self.deposits = defaultdict(float)
self.history = history
def get_dw_history(self):
for deposit in self.history['deposits']:
if deposit['currency'] in self.deposits:
self.deposits[deposit['currency']] += float(deposit['amount'])
else:
self.deposits[deposit['currency']] = float(deposit['amount'])
for withdrawal in self.history['withdrawals']:
if withdrawal['currency'] in self.withdrawals:
self.withdrawals[withdrawal['currency']] += float(withdrawal['amount'])
else:
self.withdrawals[withdrawal['currency']] = float(withdrawal['amount'])
return self.deposits, self.withdrawals
def get_btc_balance(self, ticker):
balance = 0
for deposit_symbol, amount in self.deposits.items():
if deposit_symbol == u"USDT":
balance += amount * ticker.get_price("USDT_BTC")
if deposit_symbol != u'BTC':
balance += amount * ticker.get_price("BTC_" + deposit_symbol)
else:
balance += amount
for withdrawal_symbol, amount in self.withdrawals.items():
if withdrawal_symbol == u"USDT":
balance -= amount * ticker.get_price("USDT_BTC")
if withdrawal_symbol != u'BTC':
balance -= amount * ticker.get_price("BTC_" + withdrawal_symbol)
else:
balance -= amount
return balance
| [((8, 27, 8, 45), 'collections.defaultdict', 'defaultdict', ({(8, 39, 8, 44): 'float'}, {}), '(float)', False, 'from collections import defaultdict\n'), ((9, 24, 9, 42), 'collections.defaultdict', 'defaultdict', ({(9, 36, 9, 41): 'float'}, {}), '(float)', False, 'from collections import defaultdict\n')] |
vnrag/aws-pipeline-dashboard | app/handler.py | 679af73f8e777990840bc829a014e205f0c94ac0 | from datetime import datetime,timezone
import sys
import boto3
import json
def pipeline_event(event, context):
state = get_final_state(event)
if state is None:
return
event_time = datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)
metric_data = []
if event['detail-type'] == "CodePipeline Pipeline Execution State Change":
# Write green/red time based on last execution state
prior_execution = get_prior_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if prior_execution is not None:
last_execution_state = prior_execution['status']
seconds_since_last_execution = (event_time - prior_execution['lastUpdateTime']).total_seconds()
if last_execution_state == "Succeeded":
append_metric(metric_data, "GreenTime", event, seconds=seconds_since_last_execution)
elif last_execution_state == "Failed":
append_metric(metric_data, "RedTime", event, seconds=seconds_since_last_execution)
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
current_execution = get_execution(event['detail']['pipeline'], event['detail']['execution-id'])
if current_execution is not None:
duration = (event_time - current_execution['startTime']).total_seconds()
append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Stage Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
#append_metric(metric_data, "LeadTime", event, seconds=duration)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
elif event['detail-type'] == "CodePipeline Action Execution State Change":
if state == "SUCCEEDED":
append_metric(metric_data, "SuccessCount", event, count=1)
elif state == "FAILED":
append_metric(metric_data, "FailureCount", event, count=1)
if len(metric_data) > 0:
client = boto3.client('cloudwatch')
client.put_metric_data(
Namespace='Pipeline',
MetricData=metric_data
)
# Return the state from the event iff it's one of SUCCEEDED or FAILED
def get_final_state(event):
if 'detail' in event and 'state' in event['detail']:
if any(event['detail']['state'] in s for s in ['SUCCEEDED', 'FAILED']):
return event['detail']['state']
return None
# Return the execution summary for a given execution id
def get_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
for e in response['pipelineExecutionSummaries']:
if e['pipelineExecutionId'] == execution_id:
return e
return None
# Return the execution summary for the most prior final execution before a given execution id
def get_prior_execution(pipeline_name, execution_id):
client = boto3.client('codepipeline')
response = client.list_pipeline_executions(pipelineName=pipeline_name)
found_current = False
for e in response['pipelineExecutionSummaries']:
if found_current and any(e['status'] in s for s in ['Succeeded', 'Failed']):
return e
elif e['pipelineExecutionId'] == execution_id:
found_current = True
return None
def append_metric(metric_list, metric_name, event, seconds=0, count=0):
data = {
'MetricName': metric_name,
'Dimensions': [],
'Timestamp': datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ'),
}
resource_parts = []
if 'pipeline' in event['detail']:
data['Dimensions'].append({
'Name': 'PipelineName',
'Value': event['detail']['pipeline']
})
resource_parts.append(event['detail']['pipeline'])
if 'stage' in event['detail']:
data['Dimensions'].append({
'Name': 'StageName',
'Value': event['detail']['stage']
})
resource_parts.append(event['detail']['stage'])
if 'action' in event['detail']:
data['Dimensions'].append({
'Name': 'ActionName',
'Value': event['detail']['action']
})
resource_parts.append(event['detail']['action'])
if seconds > 0:
data['Value'] = seconds
data['Unit'] = 'Seconds'
elif count > 0:
data['Value'] = count
data['Unit'] = 'Count'
else:
# no metric to add
return
print("resource=%s metric=%s value=%s" % ('.'.join(resource_parts), metric_name, data['Value']))
metric_list.append(data)
def generate_dashboard(client):
paginator = client.get_paginator('list_metrics')
response_iterator = paginator.paginate(
Namespace='Pipeline'
)
pipeline_names = set()
for response in response_iterator:
for metric in response['Metrics']:
for dim in metric['Dimensions']:
if dim['Name'] == 'PipelineName':
pipeline_names.add(dim['Value'])
widgets = []
dashboard = {
"widgets": widgets
}
y = 0
for pipeline_name in sorted(pipeline_names):
widgets.append({
"type": "metric",
"x": 0,
"y": y,
"width": 18,
"height": 3,
"properties": {
"view": "singleValue",
"metrics": [
[ "Pipeline", "SuccessCount", "PipelineName", pipeline_name, { "stat": "Sum", "period": 2592000 } ],
[ ".", "FailureCount", ".", ".", { "stat": "Sum", "period": 2592000 } ],
[ ".", "LeadTime", ".", ".", { "period": 2592000, "color": "#9467bd" } ],
[ ".", "RedTime", ".", ".", { "stat": "Sum", "period": 2592000, "yAxis": "left", "color": "#d62728" } ],
[ ".", "GreenTime", ".", ".", { "period": 2592000, "stat": "Sum", "color": "#2ca02c" } ]
],
"region": "eu-central-1",
"title": pipeline_name,
"period": 300
}
})
y += 3
widgets.append({
"type": "text",
"x": 18,
"y": 0,
"width": 6,
"height": 6,
"properties": {
"markdown": "\nAll metrics are calculated over the past 30 days\n\n* **SuccessCount** - count of all successful pipeline executions\n* **FailureCount** - count of all failed pipeline executions\n* **LeadTime** - average pipeline time for successful executions\n* **RedTime** - sum of all time spent with a red pipeline\n* **GreenTime** - sum of all time spent with a green pipeline\n"
}
})
return dashboard
def dashboard_event(event, context):
client = boto3.client('cloudwatch')
dashboard = generate_dashboard(client)
client.put_dashboard(
DashboardName='Pipeline',
DashboardBody=json.dumps(dashboard)
)
if __name__ == '__main__':
dashboard_event(None, None)
| [((69, 13, 69, 41), 'boto3.client', 'boto3.client', ({(69, 26, 69, 40): '"""codepipeline"""'}, {}), "('codepipeline')", False, 'import boto3\n'), ((80, 13, 80, 41), 'boto3.client', 'boto3.client', ({(80, 26, 80, 40): '"""codepipeline"""'}, {}), "('codepipeline')", False, 'import boto3\n'), ((194, 13, 194, 39), 'boto3.client', 'boto3.client', ({(194, 26, 194, 38): '"""cloudwatch"""'}, {}), "('cloudwatch')", False, 'import boto3\n'), ((52, 17, 52, 43), 'boto3.client', 'boto3.client', ({(52, 30, 52, 42): '"""cloudwatch"""'}, {}), "('cloudwatch')", False, 'import boto3\n'), ((96, 21, 96, 75), 'datetime.datetime.strptime', 'datetime.strptime', ({(96, 39, 96, 52): "event['time']", (96, 54, 96, 74): '"""%Y-%m-%dT%H:%M:%SZ"""'}, {}), "(event['time'], '%Y-%m-%dT%H:%M:%SZ')", False, 'from datetime import datetime, timezone\n'), ((13, 17, 13, 71), 'datetime.datetime.strptime', 'datetime.strptime', ({(13, 35, 13, 48): "event['time']", (13, 50, 13, 70): '"""%Y-%m-%dT%H:%M:%SZ"""'}, {}), "(event['time'], '%Y-%m-%dT%H:%M:%SZ')", False, 'from datetime import datetime, timezone\n'), ((198, 22, 198, 43), 'json.dumps', 'json.dumps', ({(198, 33, 198, 42): 'dashboard'}, {}), '(dashboard)', False, 'import json\n')] |
sudo-do/discord-chatbot | cogs/commands.py | 970af7d8b9275a518396648ebe5c33c291370d6a | import discord
import sqlite3
from discord.ext import commands
conn= sqlite3.connect("dbs/main.db")
class Commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.guild)
@commands.has_permissions(manage_channels=True)
async def setchannel(self, ctx, *, cbchannel: discord.TextChannel = None):
if cbchannel == None:
await ctx.send(":warning: You have to mention the channel that you want as the channel in which users will talk to me. Example: `!!setchannel #channel-name`")
return
elif cbchannel != None:
try:
cur= conn.cursor()
guildID= str(ctx.guild.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row != None:
await ctx.send(f":warning: The channel is already setup to <#{row[0]}>. Use `!!settings channel` to change it.")
elif row == None:
guildID= str(ctx.guild.id)
channelID= str(cbchannel.id)
cur.execute("INSERT INTO main(guild_id, channel_id, toggle) VALUES('"+guildID+"', '"+channelID+"', '1')")
conn.commit()
await ctx.send(f":tada: Start talking to me in {cbchannel.mention}!")
except discord.NotFound:
await ctx.send(":warning: I can't find that channel. Make sure I can access it or channel is valid.")
return
except discord.MissingPermissions:
await ctx.send(":warning: I can't send messages in that channel.")
return
@commands.group(invoke_without_command=True)
async def settings(self, ctx):
em= discord.Embed(title="Discord Chat Bot Settings", description="Welcome to Discord Chat Bot Settings! Here are the list of commands you can use to setup the bot. If this is your first time with this bot, Use the `!!setchannel` command first. **Arguments enclosed in `<>` are required!**")
em.add_field(name="`!!settings channel <channel_mention>`", value="Updates the chatting channel.")
em.add_field(name="`!!settings toggle <toggle>`", value="Toggles the bot chat on or off. This doesn't disable commands.")
await ctx.send(embed=em)
@settings.command()
@commands.has_permissions(manage_channels=True)
@commands.cooldown(1, 30, commands.BucketType.guild)
async def channel(self, ctx, *, cbchannel: discord.TextChannel = None):
cur= conn.cursor()
if cbchannel == None:
guildID= str(ctx.guild.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row != None:
await ctx.send(f"I'm currently waiting for messages in <#{row[0]}>. Run `!!settings channel #channel-mention` to change this.")
elif row == None:
await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.")
elif cbchannel != None:
guildID= str(ctx.guild.id)
channelID= str(cbchannel.id)
r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row == None:
await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.")
elif row != None:
cur.execute("UPDATE main SET channel_id = '"+channelID+"' where guild_id = '"+guildID+"'")
conn.commit()
await ctx.send(f":tada: Channel has been updated to {cbchannel.mention}!")
@settings.command()
@commands.has_permissions(manage_channels=True)
@commands.cooldown(1, 30, commands.BucketType.guild)
async def toggle(self, ctx, *, toggle = None):
if toggle == None:
await ctx.send(":warning: Use the command again but mention the toggle i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.")
elif toggle != None:
if toggle.lower() == "on":
toggle = '1'
elif toggle.lower() == 'off':
toggle = '0'
else:
await ctx.send(":warning: Use the command again but mention the toggle correctly. i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.")
return
guildID= str(ctx.guild.id)
cur= conn.cursor()
r= cur.execute("SELECT toggle FROM main WHERE guild_id = '"+guildID+"'")
row= None
for row in r:
...
if row == None:
await ctx.send("Channel is not setup yet! Use `!!setchannel` to set a channel.")
elif row != None:
cur.execute("UPDATE main SET toggle = '"+toggle+"' where guild_id = '"+guildID+"'")
conn.commit()
await ctx.send(f":tada: Toggle updated!")
def setup(bot):
bot.add_cog(Commands(bot))
| [((5, 6, 5, 36), 'sqlite3.connect', 'sqlite3.connect', ({(5, 22, 5, 35): '"""dbs/main.db"""'}, {}), "('dbs/main.db')", False, 'import sqlite3\n'), ((11, 2, 11, 20), 'discord.ext.commands.command', 'commands.command', ({}, {}), '()', False, 'from discord.ext import commands\n'), ((12, 2, 12, 53), 'discord.ext.commands.cooldown', 'commands.cooldown', ({(12, 20, 12, 21): '(1)', (12, 23, 12, 25): '(30)', (12, 27, 12, 52): 'commands.BucketType.guild'}, {}), '(1, 30, commands.BucketType.guild)', False, 'from discord.ext import commands\n'), ((13, 2, 13, 48), 'discord.ext.commands.has_permissions', 'commands.has_permissions', (), '', False, 'from discord.ext import commands\n'), ((46, 2, 46, 45), 'discord.ext.commands.group', 'commands.group', (), '', False, 'from discord.ext import commands\n'), ((55, 2, 55, 48), 'discord.ext.commands.has_permissions', 'commands.has_permissions', (), '', False, 'from discord.ext import commands\n'), ((56, 2, 56, 53), 'discord.ext.commands.cooldown', 'commands.cooldown', ({(56, 20, 56, 21): '(1)', (56, 23, 56, 25): '(30)', (56, 27, 56, 52): 'commands.BucketType.guild'}, {}), '(1, 30, commands.BucketType.guild)', False, 'from discord.ext import commands\n'), ((87, 2, 87, 48), 'discord.ext.commands.has_permissions', 'commands.has_permissions', (), '', False, 'from discord.ext import commands\n'), ((88, 2, 88, 53), 'discord.ext.commands.cooldown', 'commands.cooldown', ({(88, 20, 88, 21): '(1)', (88, 23, 88, 25): '(30)', (88, 27, 88, 52): 'commands.BucketType.guild'}, {}), '(1, 30, commands.BucketType.guild)', False, 'from discord.ext import commands\n'), ((48, 6, 48, 292), 'discord.Embed', 'discord.Embed', (), '', False, 'import discord\n')] |
mgasner/poetry | poetry/console/commands/self/update.py | 44221689e05feb0cc93c231096334f8eefbf86fc | import hashlib
import os
import shutil
import subprocess
import sys
import tarfile
from functools import cmp_to_key
from gzip import GzipFile
try:
from urllib.error import HTTPError
from urllib.request import urlopen
except ImportError:
from urllib2 import HTTPError
from urllib2 import urlopen
from cleo import argument
from cleo import option
from ..command import Command
class SelfUpdateCommand(Command):
name = "update"
description = "Updates poetry to the latest version."
arguments = [argument("version", "The version to update to.", optional=True)]
options = [option("preview", None, "Install prereleases.")]
BASE_URL = "https://github.com/sdispater/poetry/releases/download"
@property
def home(self):
from poetry.utils._compat import Path
from poetry.utils.appdirs import expanduser
home = Path(expanduser("~"))
return home / ".poetry"
@property
def lib(self):
return self.home / "lib"
@property
def lib_backup(self):
return self.home / "lib-backup"
def handle(self):
from poetry.__version__ import __version__
from poetry.repositories.pypi_repository import PyPiRepository
from poetry.semver import Version
from poetry.utils._compat import Path
current = Path(__file__)
try:
current.relative_to(self.home)
except ValueError:
raise RuntimeError(
"Poetry was not installed with the recommended installer. "
"Cannot update automatically."
)
version = self.argument("version")
if not version:
version = ">=" + __version__
repo = PyPiRepository(fallback=False)
packages = repo.find_packages(
"poetry", version, allow_prereleases=self.option("preview")
)
if not packages:
self.line("No release found for the specified version")
return
packages.sort(
key=cmp_to_key(
lambda x, y: 0
if x.version == y.version
else int(x.version < y.version or -1)
)
)
release = None
for package in packages:
if package.is_prerelease():
if self.option("preview"):
release = package
break
continue
release = package
break
if release is None:
self.line("No new release found")
return
if release.version == Version.parse(__version__):
self.line("You are using the latest version")
return
self.update(release)
def update(self, release):
version = release.version
self.line("Updating to <info>{}</info>".format(version))
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
# Backup the current installation
if self.lib.exists():
shutil.copytree(str(self.lib), str(self.lib_backup))
shutil.rmtree(str(self.lib))
try:
self._update(version)
except Exception:
if not self.lib_backup.exists():
raise
shutil.copytree(str(self.lib_backup), str(self.lib))
shutil.rmtree(str(self.lib_backup))
raise
finally:
if self.lib_backup.exists():
shutil.rmtree(str(self.lib_backup))
self.line("")
self.line("")
self.line(
"<info>Poetry</info> (<comment>{}</comment>) is installed now. Great!".format(
version
)
)
def _update(self, version):
from poetry.utils.helpers import temporary_directory
platform = sys.platform
if platform == "linux2":
platform = "linux"
checksum = "poetry-{}-{}.sha256sum".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, checksum))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(checksum))
raise
checksum = r.read().decode()
# We get the payload from the remote host
name = "poetry-{}-{}.tar.gz".format(version, platform)
try:
r = urlopen(self.BASE_URL + "/{}/{}".format(version, name))
except HTTPError as e:
if e.code == 404:
raise RuntimeError("Could not find {} file".format(name))
raise
meta = r.info()
size = int(meta["Content-Length"])
current = 0
block_size = 8192
bar = self.progress_bar(max=size)
bar.set_format(" - Downloading <info>{}</> <comment>%percent%%</>".format(name))
bar.start()
sha = hashlib.sha256()
with temporary_directory(prefix="poetry-updater-") as dir_:
tar = os.path.join(dir_, name)
with open(tar, "wb") as f:
while True:
buffer = r.read(block_size)
if not buffer:
break
current += len(buffer)
f.write(buffer)
sha.update(buffer)
bar.set_progress(current)
bar.finish()
# Checking hashes
if checksum != sha.hexdigest():
raise RuntimeError(
"Hashes for {} do not match: {} != {}".format(
name, checksum, sha.hexdigest()
)
)
gz = GzipFile(tar, mode="rb")
try:
with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f:
f.extractall(str(self.lib))
finally:
gz.close()
def process(self, *args):
return subprocess.check_output(list(args), stderr=subprocess.STDOUT)
def _bin_path(self, base_path, bin):
if sys.platform == "win32":
return (base_path / "Scripts" / bin).with_suffix(".exe")
return base_path / "bin" / bin
| [((29, 17, 29, 80), 'cleo.argument', 'argument', (), '', False, 'from cleo import argument\n'), ((30, 15, 30, 62), 'cleo.option', 'option', ({(30, 22, 30, 31): '"""preview"""', (30, 33, 30, 37): 'None', (30, 39, 30, 61): '"""Install prereleases."""'}, {}), "('preview', None, 'Install prereleases.')", False, 'from cleo import option\n'), ((57, 18, 57, 32), 'poetry.utils._compat.Path', 'Path', ({(57, 23, 57, 31): '__file__'}, {}), '(__file__)', False, 'from poetry.utils._compat import Path\n'), ((70, 15, 70, 45), 'poetry.repositories.pypi_repository.PyPiRepository', 'PyPiRepository', (), '', False, 'from poetry.repositories.pypi_repository import PyPiRepository\n'), ((182, 14, 182, 30), 'hashlib.sha256', 'hashlib.sha256', ({}, {}), '()', False, 'import hashlib\n'), ((39, 20, 39, 35), 'poetry.utils.appdirs.expanduser', 'expanduser', ({(39, 31, 39, 34): '"""~"""'}, {}), "('~')", False, 'from poetry.utils.appdirs import expanduser\n'), ((104, 30, 104, 56), 'poetry.semver.Version.parse', 'Version.parse', ({(104, 44, 104, 55): '__version__'}, {}), '(__version__)', False, 'from poetry.semver import Version\n'), ((183, 13, 183, 58), 'poetry.utils.helpers.temporary_directory', 'temporary_directory', (), '', False, 'from poetry.utils.helpers import temporary_directory\n'), ((184, 18, 184, 42), 'os.path.join', 'os.path.join', ({(184, 31, 184, 35): 'dir_', (184, 37, 184, 41): 'name'}, {}), '(dir_, name)', False, 'import os\n'), ((207, 17, 207, 41), 'gzip.GzipFile', 'GzipFile', (), '', False, 'from gzip import GzipFile\n'), ((209, 21, 209, 80), 'tarfile.TarFile', 'tarfile.TarFile', (), '', False, 'import tarfile\n')] |
davidmcclure/open-syllabus-project | osp/test/corpus/syllabus/test_text.py | 078cfd4c5a257fbfb0901d43bfbc6350824eed4e |
from osp.corpus.syllabus import Syllabus
from osp.test.utils import requires_tika
def test_empty(mock_osp):
"""
Should return None if the file is empty.
"""
path = mock_osp.add_file(content='', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == None
def test_plaintext(mock_osp):
"""
Should extract text from vanilla text files.
"""
path = mock_osp.add_file(content='text', ftype='plain')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_html(mock_osp):
"""
Should extract text from HTML files.
"""
path = mock_osp.add_file(content='<p>text</p>', ftype='html')
syllabus = Syllabus(path)
assert syllabus.text == 'text'
def test_pdf(mock_osp):
"""
Should extract text from PDF files.
"""
path = mock_osp.add_file(content='text', ftype='pdf')
syllabus = Syllabus(path)
assert syllabus.text.strip() == 'text'
@requires_tika
def test_office(mock_osp):
"""
Should extract text from office files.
"""
path = mock_osp.add_file(content='text', ftype='docx')
syllabus = Syllabus(path)
assert syllabus.text.strip() == 'text'
| [((14, 15, 14, 29), 'osp.corpus.syllabus.Syllabus', 'Syllabus', ({(14, 24, 14, 28): 'path'}, {}), '(path)', False, 'from osp.corpus.syllabus import Syllabus\n'), ((26, 15, 26, 29), 'osp.corpus.syllabus.Syllabus', 'Syllabus', ({(26, 24, 26, 28): 'path'}, {}), '(path)', False, 'from osp.corpus.syllabus import Syllabus\n'), ((38, 15, 38, 29), 'osp.corpus.syllabus.Syllabus', 'Syllabus', ({(38, 24, 38, 28): 'path'}, {}), '(path)', False, 'from osp.corpus.syllabus import Syllabus\n'), ((50, 15, 50, 29), 'osp.corpus.syllabus.Syllabus', 'Syllabus', ({(50, 24, 50, 28): 'path'}, {}), '(path)', False, 'from osp.corpus.syllabus import Syllabus\n'), ((63, 15, 63, 29), 'osp.corpus.syllabus.Syllabus', 'Syllabus', ({(63, 24, 63, 28): 'path'}, {}), '(path)', False, 'from osp.corpus.syllabus import Syllabus\n')] |
mixbee/neo-boa | boa_test/tests/test_ico_template.py | da7366c26c7b8e60afb9ac27439a1da37b0be355 | from boa_test.tests.boa_test import BoaFixtureTest
from boa.compiler import Compiler
from neo.Core.TX.Transaction import Transaction
from neo.Prompt.Commands.BuildNRun import TestBuild
from neo.EventHub import events
from neo.SmartContract.SmartContractEvent import SmartContractEvent, NotifyEvent
from neo.Settings import settings
from neo.Prompt.Utils import parse_param
from neo.Core.FunctionCode import FunctionCode
from neocore.Fixed8 import Fixed8
from boa_test.example.demo.nex.token import *
import shutil
import os
from logzero import logger
settings.USE_DEBUG_STORAGE = True
settings.DEBUG_STORAGE_PATH = './fixtures/debugstorage'
class TestContract(BoaFixtureTest):
dispatched_events = []
dispatched_logs = []
@classmethod
def tearDownClass(cls):
super(BoaFixtureTest, cls).tearDownClass()
try:
if os.path.exists(settings.debug_storage_leveldb_path):
shutil.rmtree(settings.debug_storage_leveldb_path)
else:
logger.error("debug storage path doesn't exist")
except Exception as e:
logger.error("couldn't remove debug storage %s " % e)
@classmethod
def setUpClass(cls):
super(TestContract, cls).setUpClass()
def on_notif(evt):
print(evt)
cls.dispatched_events.append(evt)
print("dispatched events %s " % cls.dispatched_events)
def on_log(evt):
print(evt)
cls.dispatched_logs.append(evt)
events.on(SmartContractEvent.RUNTIME_NOTIFY, on_notif)
events.on(SmartContractEvent.RUNTIME_LOG, on_log)
def test_ICOTemplate_1(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# print(output.to_s())
tx, results, total_ops, engine = TestBuild(out, ['name', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), TOKEN_NAME)
tx, results, total_ops, engine = TestBuild(out, ['symbol', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), TOKEN_SYMBOL)
tx, results, total_ops, engine = TestBuild(out, ['decimals', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_DECIMALS)
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['nonexistentmethod', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetString(), 'unknown operation')
# deploy with wallet 2 should fail CheckWitness
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# second time, it should already be deployed and return false
tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now total supply should be equal to the initial owner amount
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT)
# now the owner should have a balance of the TOKEN_INITIAL_AMOUNT
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT)
def test_ICOTemplate_2(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# now transfer tokens to wallet 2
TestContract.dispatched_events = []
test_transfer_amount = 2400000001
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, test_transfer_amount])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.addr_from.Data, bytearray(TOKEN_OWNER))
self.assertEqual(evt.addr_to, self.wallet_2_script_hash)
self.assertEqual(evt.amount, test_transfer_amount)
# now get balance of wallet 2
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), test_transfer_amount)
# now the owner should have less
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT - test_transfer_amount)
# now this transfer should fail
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# this transfer should fail because it is not signed by the 'from' address
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, 10000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now this transfer should fail, this is from address with no tokens
tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# get balance of bad data
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param(['abc'])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# get balance no params
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
def test_ICOTemplate_3_KYC(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
print(output.to_s())
# now transfer tokens to wallet 2
TestContract.dispatched_events = []
# test mint tokens without being kyc verified
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# Try to register as a non owner
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# Get status of non registered address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
TestContract.dispatched_events = []
# register an address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertEqual(evt.event_payload.Value[0].Value, b'kyc_registration')
# register 2 addresses at once
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 2)
# now check reg status
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
def test_ICOTemplate_4_attachments(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# test mint tokens without being kyc verified
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
fn = FunctionCode(out, '0705', '05')
self.assertEqual(attachments[0].GetByteArray(), fn.ScriptHash().Data)
self.assertEqual(attachments[1].GetByteArray(), self.wallet_3_script_hash.Data)
self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(10).value)
self.assertEqual(attachments[3].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
self.assertEqual(attachments[1].GetByteArray(), bytearray())
self.assertEqual(attachments[2].GetBigInteger(), 0)
self.assertEqual(attachments[3].GetBigInteger(), 0)
tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=3', '--attach-gas=3.12'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
attachments = results[0].GetArray()
self.assertEqual(len(attachments), 4)
self.assertEqual(attachments[1].GetByteArray(), self.wallet_1_script_hash.Data)
self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(3).value)
self.assertEqual(attachments[3].GetBigInteger(), Fixed8.FromDecimal(3.12).value)
def test_ICOTemplate_5_mint(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# register an address
tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1)
TestContract.dispatched_events = []
# test mint tokens, this should return true
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.amount, 10 * TOKENS_PER_NEO)
self.assertEqual(evt.addr_to, self.wallet_3_script_hash)
# test mint tokens again, this should be false since you can't do it twice
tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# now the minter should have a balance
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 10 * TOKENS_PER_NEO)
# now the total circulation should be bigger
tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), (10 * TOKENS_PER_NEO) + TOKEN_INITIAL_AMOUNT)
def test_ICOTemplate_6_approval(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# tranfer_from, approve, allowance
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# try to transfer from
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# try to approve from someone not yourself
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
# try to approve more than you have
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 0)
TestContract.dispatched_events = []
# approve should work
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1234])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# it should dispatch an event
self.assertEqual(len(TestContract.dispatched_events), 1)
evt = TestContract.dispatched_events[0]
self.assertIsInstance(evt, NotifyEvent)
self.assertEqual(evt.notify_type, b'approve')
self.assertEqual(evt.amount, 1234)
# check allowance
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1234)
# approve should not be additive, it should overwrite previous approvals
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 133234])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 133234)
# now you can transfer from
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
# now the recevier should have a balance
# it is equal to 10000 plus test_transfer_amount = 2400000001
tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 10000 + 2400000001)
# now the allowance should be less
tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 133234 - 10000)
# try to transfer too much, even with approval
tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 14440000])], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
# cant approve negative amounts
tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, -1000])], self.GetWallet3(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), False)
def test_many_ops(self):
output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default
out = output.write()
# tranfer_from, approve, allowance
tx, results, total_ops, engine = TestBuild(out, ['another_op_5', bytearray()], self.GetWallet2(), '0705', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 6)
| [((50, 8, 50, 62), 'neo.EventHub.events.on', 'events.on', ({(50, 18, 50, 51): 'SmartContractEvent.RUNTIME_NOTIFY', (50, 53, 50, 61): 'on_notif'}, {}), '(SmartContractEvent.RUNTIME_NOTIFY, on_notif)', False, 'from neo.EventHub import events\n'), ((51, 8, 51, 57), 'neo.EventHub.events.on', 'events.on', ({(51, 18, 51, 48): 'SmartContractEvent.RUNTIME_LOG', (51, 50, 51, 56): 'on_log'}, {}), '(SmartContractEvent.RUNTIME_LOG, on_log)', False, 'from neo.EventHub import events\n'), ((216, 13, 216, 44), 'neo.Core.FunctionCode.FunctionCode', 'FunctionCode', ({(216, 26, 216, 29): 'out', (216, 31, 216, 37): '"""0705"""', (216, 39, 216, 43): '"""05"""'}, {}), "(out, '0705', '05')", False, 'from neo.Core.FunctionCode import FunctionCode\n'), ((30, 15, 30, 66), 'os.path.exists', 'os.path.exists', ({(30, 30, 30, 65): 'settings.debug_storage_leveldb_path'}, {}), '(settings.debug_storage_leveldb_path)', False, 'import os\n'), ((32, 16, 32, 66), 'shutil.rmtree', 'shutil.rmtree', ({(32, 30, 32, 65): 'settings.debug_storage_leveldb_path'}, {}), '(settings.debug_storage_leveldb_path)', False, 'import shutil\n'), ((34, 16, 34, 64), 'logzero.logger.error', 'logger.error', ({(34, 29, 34, 63): '"""debug storage path doesn\'t exist"""'}, {}), '("debug storage path doesn\'t exist")', False, 'from logzero import logger\n'), ((36, 12, 36, 65), 'logzero.logger.error', 'logger.error', ({(36, 25, 36, 64): '("couldn\'t remove debug storage %s " % e)'}, {}), '("couldn\'t remove debug storage %s " % e)', False, 'from logzero import logger\n'), ((125, 70, 125, 115), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(125, 82, 125, 114): '[self.wallet_2_script_hash.Data]'}, {}), '([self.wallet_2_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((145, 69, 145, 152), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(145, 81, 145, 151): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1000]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n 1000])', False, 'from neo.Prompt.Utils import parse_param\n'), ((150, 70, 150, 90), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(150, 82, 150, 89): "['abc']"}, {}), "(['abc'])", False, 'from neo.Prompt.Utils import parse_param\n'), ((155, 70, 155, 85), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(155, 82, 155, 84): '[]'}, {}), '([])', False, 'from neo.Prompt.Utils import parse_param\n'), ((174, 79, 174, 124), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(174, 91, 174, 123): '[self.wallet_3_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((179, 77, 179, 122), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(179, 89, 179, 121): '[self.wallet_3_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((186, 79, 186, 124), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(186, 91, 186, 123): '[self.wallet_3_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((196, 79, 196, 156), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(196, 91, 196, 155): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((201, 77, 201, 122), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(201, 89, 201, 121): '[self.wallet_3_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((220, 57, 220, 79), 'neocore.Fixed8.Fixed8.FromDecimal', 'Fixed8.FromDecimal', ({(220, 76, 220, 78): '(10)'}, {}), '(10)', False, 'from neocore.Fixed8 import Fixed8\n'), ((237, 57, 237, 78), 'neocore.Fixed8.Fixed8.FromDecimal', 'Fixed8.FromDecimal', ({(237, 76, 237, 77): '(3)'}, {}), '(3)', False, 'from neocore.Fixed8 import Fixed8\n'), ((238, 57, 238, 81), 'neocore.Fixed8.Fixed8.FromDecimal', 'Fixed8.FromDecimal', ({(238, 76, 238, 80): '(3.12)'}, {}), '(3.12)', False, 'from neocore.Fixed8 import Fixed8\n'), ((246, 79, 246, 124), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(246, 91, 246, 123): '[self.wallet_3_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((270, 70, 270, 115), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(270, 82, 270, 114): '[self.wallet_3_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((285, 70, 285, 147), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(285, 82, 285, 146): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((290, 73, 290, 157), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(290, 85, 290, 156): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n 10000])', False, 'from neo.Prompt.Utils import parse_param\n'), ((295, 68, 295, 152), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(295, 80, 295, 151): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n 10000])', False, 'from neo.Prompt.Utils import parse_param\n'), ((300, 68, 300, 167), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(300, 80, 300, 166): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n TOKEN_INITIAL_AMOUNT]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n TOKEN_INITIAL_AMOUNT])', False, 'from neo.Prompt.Utils import parse_param\n'), ((307, 68, 307, 151), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(307, 80, 307, 150): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1234]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n 1234])', False, 'from neo.Prompt.Utils import parse_param\n'), ((319, 70, 319, 147), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(319, 82, 319, 146): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((324, 68, 324, 153), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(324, 80, 324, 152): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 133234]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n 133234])', False, 'from neo.Prompt.Utils import parse_param\n'), ((328, 70, 328, 147), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(328, 82, 328, 146): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((333, 73, 333, 157), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(333, 85, 333, 156): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n 10000])', False, 'from neo.Prompt.Utils import parse_param\n'), ((340, 70, 340, 115), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(340, 82, 340, 114): '[self.wallet_2_script_hash.Data]'}, {}), '([self.wallet_2_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((345, 70, 345, 147), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(345, 82, 345, 146): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])', False, 'from neo.Prompt.Utils import parse_param\n'), ((350, 73, 350, 160), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(350, 85, 350, 159): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 14440000]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n 14440000])', False, 'from neo.Prompt.Utils import parse_param\n'), ((355, 68, 355, 152), 'neo.Prompt.Utils.parse_param', 'parse_param', ({(355, 80, 355, 151): '[self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, -1000]'}, {}), '([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data,\n -1000])', False, 'from neo.Prompt.Utils import parse_param\n'), ((55, 17, 55, 36), 'boa.compiler.Compiler.instance', 'Compiler.instance', ({}, {}), '()', False, 'from boa.compiler import Compiler\n'), ((105, 17, 105, 36), 'boa.compiler.Compiler.instance', 'Compiler.instance', ({}, {}), '()', False, 'from boa.compiler import Compiler\n'), ((161, 17, 161, 36), 'boa.compiler.Compiler.instance', 'Compiler.instance', ({}, {}), '()', False, 'from boa.compiler import Compiler\n'), ((207, 17, 207, 36), 'boa.compiler.Compiler.instance', 'Compiler.instance', ({}, {}), '()', False, 'from boa.compiler import Compiler\n'), ((242, 17, 242, 36), 'boa.compiler.Compiler.instance', 'Compiler.instance', ({}, {}), '()', False, 'from boa.compiler import Compiler\n'), ((281, 17, 281, 36), 'boa.compiler.Compiler.instance', 'Compiler.instance', ({}, {}), '()', False, 'from boa.compiler import Compiler\n'), ((361, 17, 361, 36), 'boa.compiler.Compiler.instance', 'Compiler.instance', ({}, {}), '()', False, 'from boa.compiler import Compiler\n')] |
lvijay/ilc | regexem.py | 1c3b1381e7e5a5064bda829e3d34bfaf24745d1a | #!/usr/bin/python
# -*- mode: python; -*-
## This file is part of Indian Language Converter
## Copyright (C) 2006 Vijay Lakshminarayanan <[email protected]>
## Indian Language Converter is free software; you can redistribute it
## and/or modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2 of
## the License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA.
## $Id: regexem.py,v 1.4 2006-03-26 03:15:24 vijay Exp $
## Author: Vijay Lakshminarayanan
## $Date: 2006-03-26 03:15:24 $
import sys
from re import escape
def regexem (strlst):
"""Returns a single string which is the regular expression to
identify any single word in the given argument.
See the Examples given at the end of this file."""
return regexem_internal([escape(s) for s in strlst])
def regexem_internal (strlst):
strlst.sort()
s, rest = strlst[0], strlst[1:]
groups = {}
groups[s] = [s]
for string in rest:
if string.startswith(s) and len(s) < len(string): # avoid duplicates
groups[s].append(string[len(s):]) # add the suffix to the group
else:
s = string # a fresh prefix
groups[s] = [s]
regex = ''
for prefix, words in groups.items():
inreg = ''
if len(words) == 2: # i.e. words[0] is a subset of words[1]
inreg += words[0] + '(' + words[1] + ')' + '?'
elif len(words) > 2:
inreg += words[0] + '(' + regexem_internal(words[1:]) + ')' + '?'
else:
inreg += prefix # since prefix == words[0] in this case.
regex += '(' + inreg + ')' + '|'
return regex[:-1] # we don't need the last '|'
if __name__ == '__main__':
print ''.join(regexem(sys.argv[1:]))
## Examples
#
# $ ./regexem.py emacs vi ed
# (ed)|(emacs)|(vi)
#
# $ ./regexem.py batsman bats well
# (well)|(bats(man)?)
#
# $ ./regexem.py houses housefly
# (houses)|(housefly) ## Note that they aren't grouped together
#
## a slightly complicated example
# $ ./regexem.py an anteater and an ant
# (an((d)|(t(eater)?))?)
| [] |
rohit-k-das/crowdstrike-alerts | main.py | 48c23357f819f90134f76cefb58f1355967363d4 | import requests
import crowdstrike_detection as crowdstrike
import logging
import click
import urllib.parse
import ConfigParser
import os
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Crowdstrike_creds'))
# Create your own slackbot
hubot_webhook_url = Config.get('Settings', 'Slackbot_Url')
# Send slack alert via hubot for each high or critical detection in crowdstrike
def send_hubot_alert_crowdstrike(detection):
logger.info("Send hubot alert for detection %s" % detection.detection_id)
# Emoji for slack based on action taken
green_alerts = ['Kill process', 'Kill subprocess', 'Quarantine file', 'Kill parent', 'Process blocked',
'Operation blocked']
red_alerts = ['Policy disabled']
amber_alerts = []
actions = []
for behavior in detection.behavior:
actions.extend(behavior['action_taken'])
if actions:
actions = list(set(actions))
alerts = []
if actions:
if list(set(actions).intersection(red_alerts)):
alerts.append(':red-alert: Allowed')
if list(set(actions).intersection(green_alerts)):
alerts.append(':green-alert: Blocked')
else:
alerts.append(':red-alert: Allowed')
if ':green-alert: Blocked' in alerts and ':red-alert: Allowed' in alerts:
alerts = [':amber-alert: Suspicious']
message_to_send = ":crowd-strike: *%s* Alert: <%s|%s> ---> %s\n" % (
detection.severity, detection.link, detection.detection_id.split(':')[2], str(alerts).strip('[').strip(']').replace("'", ""))
message_to_send = "%sDevice: %s\n" % (message_to_send, detection.device)
for behavior in detection.behavior:
message_to_send = "%sBad Behavior: %s\n" % (message_to_send, behavior['bad_behavior'].replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;'))
message_to_send = "%sHash: %s\n" % (message_to_send, behavior['hash'])
message_to_send = "%sParent Cmd: %s\n" % (message_to_send, behavior['parent_commandline'])
message_to_send = "%sTactic-Technique: %s\n" % (message_to_send, behavior['tactic + technique'])
if behavior['action_taken']:
message_to_send = "%sAction Taken: %s" % (
message_to_send, str(behavior['action_taken']).strip('[').strip(']').replace("'", ""))
else:
message_to_send = "%sAction Taken: %s" % (message_to_send, 'None')
if len(detection.behavior) > 1:
message_to_send = "%s\n" % message_to_send
# Whom to send the alert
send_to = 'yourchannel or a user'
data = {'message': message_to_send, 'users': send_to}
data = urllib.parse.urlencode(data)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
resp = requests.post(hubot_webhook_url, headers=headers, data=data)
if resp.ok:
logger.info("Sent alert to user/channel %s" % send_to)
else:
logger.critical("Unable to connect to hubot.")
logger.info("Hubot Error %d:%s" % (resp.status_code, resp.text))
@click.command()
@click.option("-d", "--duration", default=600, show_default=True, nargs=1, type=int, required=False, help="Crowdstrike detections that were last seen since 'duration' seconds")
def main(duration):
crowdstrike_detections = crowdstrike.fetch_detections(duration)
if crowdstrike_detections:
logger.info("Sending alerts")
for detection in crowdstrike_detections:
send_hubot_alert_crowdstrike(detection)
if __name__ == '__main__':
main()
| [((9, 0, 10, 55), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((11, 9, 11, 36), 'logging.getLogger', 'logging.getLogger', ({(11, 27, 11, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((13, 9, 13, 36), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ({}, {}), '()', False, 'import ConfigParser\n'), ((79, 1, 79, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((80, 1, 80, 176), 'click.option', 'click.option', (), '', False, 'import click\n'), ((71, 11, 71, 71), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((82, 29, 82, 67), 'crowdstrike_detection.fetch_detections', 'crowdstrike.fetch_detections', ({(82, 58, 82, 66): 'duration'}, {}), '(duration)', True, 'import crowdstrike_detection as crowdstrike\n'), ((14, 41, 14, 66), 'os.path.dirname', 'os.path.dirname', ({(14, 57, 14, 65): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
lumikanta/connexion | connexion/http_facts.py | b6530d32aaee92ebbdfef501540d642a26185174 | FORM_CONTENT_TYPES = [
'application/x-www-form-urlencoded',
'multipart/form-data'
]
| [] |
klepik1990/YandexTestAPI | Test3/yandexAPI3.py | ded41ff607c0b209b51efbcaa13c8008156a5e0a | import requests
import json
HEADERS = {"Authorization": "OAuth AgAAAAA00Se2AAW1W1yCegavqkretMXBGkoUUQk", "Accept": "*/*"}
URL = "https://cloud-api.yandex.net:443/v1/disk/"
def get_folder_info(folder_name_1, folder_name_2, url=None, headers=None):
"""Получение информации о статусе папок на диске
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках: путь до папок, если созданы успешно. В противном случае описание ошибки.
"""
info = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "&fields=path", headers=HEADERS)
dict_response = json.loads(info.content)
if info.status_code == 404:
return dict_response["description"]
else:
return dict_response["path"]
def get_file_info(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Получение информации о файле
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Путь до файла.
"""
file_info_json = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&fields=path", headers = HEADERS)
file_info_dict = json.loads(file_info_json.content)
if file_info_json.status_code == 404:
return file_info_dict["description"]
else:
return file_info_dict["path"]
def create_folder(folder_name_1, folder_name_2, url=None, headers=None):
"""Создание папок на диске.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информация о папках через вызов другой функции.
"""
response_code = [202, 204]
new_folder = requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
if new_folder.status_code == 409:
new_folder = requests.delete(url= URL + "resources?path=" + folder_name_1 + "&permanently=true", headers=HEADERS)
if new_folder.status_code in response_code:
requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS)
requests.put(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2, headers=HEADERS)
return get_folder_info(folder_name_1, folder_name_2)
def create_file(folder_name_1, folder_name_2, file_name, url=None, headers=None):
"""Загрузка файла на диск.
Args:
folder_name_1: имя корневой папки.
folder_name_2: имя вложенной папки.
file_name: имя файла.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Информацию о созданном файле через вызов другой функции.
"""
assert len(file_name) > 0, "Не введено имя файла"
new_file = requests.get(url= URL + "resources/upload?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name +
".jpg&overwrite=true", headers=HEADERS)
get_link = new_file.content
link = json.loads(get_link)
requests.put(url=link["href"])
return get_file_info(folder_name_1, folder_name_2, file_name)
def move_to_bucket(folder_name, url=None, headers=None):
"""Перемещение папки с содержимым в корзину.
Args:
folder_name: имя корневой папки.
url: адрес для запроса.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Ссылку для проверки статуса.
"""
order_response = requests.delete(url= URL + "resources?path=" + folder_name, headers=HEADERS)
return json.loads(order_response.content)["href"]
def get_status(link, headers=None):
"""Получение статуса операции по ссылке.
Args:
link: ссылка, для которой проверяется статус.
headers: заголовки запроса, содержащие токен авторизации.
Returns:
Статус операции.
"""
status_response = requests.get(url=link, headers=HEADERS)
return json.loads(status_response.content)["status"]
def clean_bucket():
"""Очистка корзины.
Returns:
Ссылку для проверки статуса.
"""
remove_folder = requests.delete(url= URL + "trash/resources", headers=HEADERS)
return json.loads(remove_folder.content)["href"]
| [((22, 11, 22, 125), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((23, 20, 23, 44), 'json.loads', 'json.loads', ({(23, 31, 23, 43): 'info.content'}, {}), '(info.content)', False, 'import json\n'), ((44, 21, 45, 72), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((46, 21, 46, 55), 'json.loads', 'json.loads', ({(46, 32, 46, 54): 'file_info_json.content'}, {}), '(file_info_json.content)', False, 'import json\n'), ((67, 17, 67, 92), 'requests.put', 'requests.put', (), '', False, 'import requests\n'), ((72, 4, 72, 101), 'requests.put', 'requests.put', (), '', False, 'import requests\n'), ((90, 15, 91, 67), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((93, 11, 93, 31), 'json.loads', 'json.loads', ({(93, 22, 93, 30): 'get_link'}, {}), '(get_link)', False, 'import json\n'), ((94, 4, 94, 34), 'requests.put', 'requests.put', (), '', False, 'import requests\n'), ((110, 21, 110, 97), 'requests.delete', 'requests.delete', (), '', False, 'import requests\n'), ((125, 22, 125, 61), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((136, 20, 136, 82), 'requests.delete', 'requests.delete', (), '', False, 'import requests\n'), ((69, 21, 69, 121), 'requests.delete', 'requests.delete', (), '', False, 'import requests\n'), ((111, 11, 111, 45), 'json.loads', 'json.loads', ({(111, 22, 111, 44): 'order_response.content'}, {}), '(order_response.content)', False, 'import json\n'), ((126, 11, 126, 46), 'json.loads', 'json.loads', ({(126, 22, 126, 45): 'status_response.content'}, {}), '(status_response.content)', False, 'import json\n'), ((137, 11, 137, 44), 'json.loads', 'json.loads', ({(137, 22, 137, 43): 'remove_folder.content'}, {}), '(remove_folder.content)', False, 'import json\n'), ((71, 12, 71, 87), 'requests.put', 'requests.put', (), '', False, 'import requests\n')] |
trinanda/AQUR | app/users/operator/views.py | 2a415b05ba4c0113b05b6fa14fb454af2bad52ec | import os
from collections import defaultdict
from flask import render_template
from flask_login import login_required
from sqlalchemy import and_
from app import db
from app.decorators import operator_required
from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule
from app.users.operator import operator
@operator.route('/')
@login_required
@operator_required
def index():
title = os.environ.get('APP_NAME')
# get all students data on schedule, except if the student tuition payment is None, PENDING, REJECTED or WARNING_3
students_courses_data = db.session.query(Schedule, Payment).join(Payment).filter(
and_(Payment.status_of_payment is not None,
Payment.status_of_payment != PaymentStatus.PENDING.name,
Payment.status_of_payment != PaymentStatus.REJECTED.name,
Payment.status_of_payment != PaymentStatus.WARNING_3.name))
# get the amount of Teachers and Students
total_students = Student.query.count()
total_teachers = Teacher.query.count()
month_name_list = []
for data in MonthNameList:
month_name_list.append(str(data))
# make a query object for "Tahsin" and "Arabic Language" course
tahsin = students_courses_data.join(Course).filter(Course.name == "Tahsin")
arabic = students_courses_data.join(Course).filter(Course.name == "Bahasa Arab")
# the total payment for the courses each month
tahsin_course_data = []
arabic_course_data = []
for data in tahsin:
for month_name in month_name_list:
tahsin_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
for data in arabic:
for month_name in month_name_list:
arabic_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)})
# merge and sum the total value from the dictionary on the same month from the _courses_data result above
total_tahsin_students_per_month = defaultdict(int)
total_arabic_students_per_month = defaultdict(int)
for d in tahsin_course_data:
for key, value in d.items():
total_tahsin_students_per_month[key] += value
for d in arabic_course_data:
for key, value in d.items():
total_arabic_students_per_month[key] += value
# store all of the month values on a list for each course
tahsin_values = []
arabic_values = []
for key, value in total_tahsin_students_per_month.items():
tahsin_values.append(value)
for key, value in total_arabic_students_per_month.items():
arabic_values.append(value)
# make a dictionary to represent course name with the matching total student that do the payment for each month
data_courses_each_month = [
{
'Tahsin': tahsin_values,
},
{
'Bahasa Arab': arabic_values
}
]
return render_template('main/operator/operator-dashboard.html', title=title, total_teachers=total_teachers,
total_students=total_students, month_name_list=month_name_list,
data_courses_each_month=data_courses_each_month)
| [((14, 1, 14, 20), 'app.users.operator.operator.route', 'operator.route', ({(14, 16, 14, 19): '"""/"""'}, {}), "('/')", False, 'from app.users.operator import operator\n'), ((18, 12, 18, 38), 'os.environ.get', 'os.environ.get', ({(18, 27, 18, 37): '"""APP_NAME"""'}, {}), "('APP_NAME')", False, 'import os\n'), ((28, 21, 28, 42), 'app.models.Student.query.count', 'Student.query.count', ({}, {}), '()', False, 'from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule\n'), ((29, 21, 29, 42), 'app.models.Teacher.query.count', 'Teacher.query.count', ({}, {}), '()', False, 'from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule\n'), ((50, 38, 50, 54), 'collections.defaultdict', 'defaultdict', ({(50, 50, 50, 53): 'int'}, {}), '(int)', False, 'from collections import defaultdict\n'), ((51, 38, 51, 54), 'collections.defaultdict', 'defaultdict', ({(51, 50, 51, 53): 'int'}, {}), '(int)', False, 'from collections import defaultdict\n'), ((77, 11, 79, 75), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template\n'), ((22, 8, 25, 71), 'sqlalchemy.and_', 'and_', ({(22, 13, 22, 50): 'Payment.status_of_payment is not None', (23, 13, 23, 68): 'Payment.status_of_payment != PaymentStatus.PENDING.name', (24, 13, 24, 69): 'Payment.status_of_payment != PaymentStatus.REJECTED.name', (25, 13, 25, 70): 'Payment.status_of_payment != PaymentStatus.WARNING_3.name'}, {}), '(Payment.status_of_payment is not None, Payment.status_of_payment !=\n PaymentStatus.PENDING.name, Payment.status_of_payment != PaymentStatus.\n REJECTED.name, Payment.status_of_payment != PaymentStatus.WARNING_3.name)', False, 'from sqlalchemy import and_\n'), ((21, 28, 21, 63), 'app.db.session.query', 'db.session.query', ({(21, 45, 21, 53): 'Schedule', (21, 55, 21, 62): 'Payment'}, {}), '(Schedule, Payment)', False, 'from app import db\n')] |
jskinn/arvet | arvet/core/metric.py | 742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9 | # Copyright (c) 2017, John Skinner
import abc
import typing
import bson
import pymodm
import pymodm.fields as fields
import arvet.database.pymodm_abc as pymodm_abc
from arvet.database.reference_list_field import ReferenceListField
import arvet.core.trial_result
class Metric(pymodm.MongoModel, metaclass=pymodm_abc.ABCModelMeta):
"""
A class that measures results
This is an abstract base class defining an interface for all metrics,
to allow them to be called easily and in a structured way.
"""
@property
def identifier(self) -> bson.ObjectId:
"""
Get the id for this metric
:return:
"""
return self._id
@abc.abstractmethod
def is_trial_appropriate(self, trial_result: arvet.core.trial_result.TrialResult) -> bool:
"""
Fine-grained filtering for trial results, to make sure this class can measure this trial result.
:return:
"""
pass
@abc.abstractmethod
def measure_results(self, trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \
-> 'MetricResult':
"""
Measure the results of running a particular system on a particular image source.
We take a collection of trials to allow for multiple repeats of the system on the same data,
which allows us to account for and measure random variation in the system.
A helper to check this is provided below, call it in any implementation.
The trial result MUST include the ground truth along with the system estimates,
which must be the same for all trials.
:param trial_results: A collection of trial results to measure.
These are assumed to be repeat runs of the same system on the same data.
:return: A MetricResult object containing either the results, or explaining the error
:rtype: MetricResult
"""
pass
@abc.abstractmethod
def get_columns(self) -> typing.Set[str]:
"""
Get the set of available properties for this metric. Pass these to "get_properties", below.
:return:
"""
pass
@abc.abstractmethod
def get_properties(self, columns: typing.Iterable[str] = None) -> typing.Mapping[str, typing.Any]:
"""
Get the values of the requested properties
:param columns:
:return:
"""
pass
@classmethod
def get_pretty_name(cls) -> str:
"""
Get a human-readable name for this metric
:return:
"""
return cls.__module__ + '.' + cls.__name__
@classmethod
def get_instance(cls) -> 'Metric':
"""
Get an instance of this vision system, with some parameters, pulling from the database if possible,
or construct a new one if needed.
It is the responsibility of subclasses to ensure that as few instances of each system as possible exist
within the database.
Does not save the returned object, you'll usually want to do that straight away.
:return:
"""
all_objects = cls.objects.all()
if all_objects.count() > 0:
return all_objects.first()
obj = cls()
return obj
class MetricResult(pymodm.MongoModel):
"""
A general superclass for metric results for all metrics
"""
metric = fields.ReferenceField(Metric, required=True, on_delete=fields.ReferenceField.CASCADE)
trial_results = ReferenceListField(arvet.core.trial_result.TrialResult,
required=True, on_delete=fields.ReferenceField.CASCADE)
success = fields.BooleanField(required=True)
message = fields.CharField()
# The set of plots available to visualize_results.
available_plots = set()
@property
def identifier(self) -> bson.ObjectId:
"""
Get the id of this metric result
:return:
"""
return self._id
def get_columns(self) -> typing.Set[str]:
"""
Get a list of available results columns, which are the possible keys in dictionaries returned by get_results.
Should delegate to the linked trial results, systems, etc for the full list.
:return:
"""
return set()
def get_results(self, columns: typing.Iterable[str] = None) -> typing.List[dict]:
"""
Get the results from this metric result, as a list of dictionaries we can turn into a Pandas data frame.
Each dictionary should include as much data as possible, including data about the system, the image source,
the particular image, etc...
Use the argument to restrict the columns to a limited set, should return all by default.
This must return a non-empty list for any trial result where success is True.
:return:
"""
return []
def check_trial_collection(trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \
-> typing.Union[str, None]:
"""
A helper function to check that all the given trial results come from the same system and image source.
Call this at the start of Metric.measure_results
:param trial_results: A collection of trial results passed to Metric.measure_results
:return: None if all the trials are OK, string explaining the problem if they are not
"""
first_trial = None
for idx, trial in enumerate(trial_results):
if not trial.success:
return "Trial {0} (1) is failed".format(idx, trial.pk)
if first_trial is None:
first_trial = trial
else:
if trial.image_source != first_trial.image_source:
return "Trial {0} ({1}) does not have the same image source as the first trial".format(idx, trial.pk)
if trial.system != first_trial.system:
return "Trial {0} ({1}) does not have the same system as the first trial".format(idx, trial.pk)
| [((101, 13, 101, 98), 'pymodm.fields.ReferenceField', 'fields.ReferenceField', (), '', True, 'import pymodm.fields as fields\n'), ((102, 20, 103, 94), 'arvet.database.reference_list_field.ReferenceListField', 'ReferenceListField', (), '', False, 'from arvet.database.reference_list_field import ReferenceListField\n'), ((104, 14, 104, 48), 'pymodm.fields.BooleanField', 'fields.BooleanField', (), '', True, 'import pymodm.fields as fields\n'), ((105, 14, 105, 32), 'pymodm.fields.CharField', 'fields.CharField', ({}, {}), '()', True, 'import pymodm.fields as fields\n')] |
thorwhalen/ut | pfile/accessor.py | 353a4629c35a2cca76ef91a4d5209afe766433b4 | """File access utils"""
__author__ = 'thorwhalen'
# from ut.datapath import datapath
import pickle
import os
from ut.util.importing import get_environment_variable
import pandas as pd
import ut.pfile.to as file_to
import ut.pfile.name as pfile_name
import ut.pstr.to as pstr_to
from ut.serialize.local import Local
from ut.serialize.s3 import S3
from os import environ # does this load the whole array? Can we just take MS_DATA instead?
import ut.pstr.trans as pstr_trans
import shutil
try:
MS_DATA = get_environment_variable('MS_DATA')
except KeyError:
MS_DATA = ''
LOCATION_LOCAL = 'LOCAL'
LOCATION_S3 = 'S3'
####################################################################################################################
# Quick Utils
def ms_data_path(relative_root, root_folder=MS_DATA):
return os.path.join(pfile_name.ensure_slash_suffix(root_folder), relative_root)
####################################################################################################################
# FACTORIES
def for_local(relative_root='', read_only=False, extension=None, force_extension=False, root_folder=MS_DATA, **kwargs):
# if a full path (i.e. starting with "/" is entered as a relative_root, then take it as the sound_file_root_folder
if relative_root and ((relative_root[0] == '/') or (relative_root[0] == '~')):
root_folder = relative_root
relative_root = ''
elif relative_root == 'test': # if relative root is test...
relative_root = 'test'
print("you asked for a local test, so I forced the root to be %s" % relative_root)
# ensure that sound_file_root_folder ends with a "/"
file_handler = FilepathHandler(relative_root=pfile_name.ensure_slash_suffix(root_folder)+relative_root)
# take care of extensions
if extension:
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
file_loc_proc = lambda x: file_handler.process(extension_handler.process(x))
else:
file_loc_proc = file_handler.process
instance = Accessor(
relative_root=relative_root,
extension=extension,
force_extension=force_extension,
file_loc_proc=file_loc_proc,
location=LOCATION_LOCAL,
read_only=read_only,
**kwargs
)
instance._set_local_defaults()
return instance
def for_s3(relative_root='loc-data', read_only=False, extension=None, force_extension=False, **kwargs):
if relative_root == 'test':
relative_root = 'loc-data/test'
print("you asked for a s3 test, so I forced the root to be %s" % relative_root)
file_handler = FilepathHandler(relative_root=relative_root)
if extension:
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
file_loc_proc = lambda x: file_handler.process(extension_handler.process(x))
else:
file_loc_proc = file_handler.process
instance = Accessor(
relative_root=relative_root,
extension=extension,
force_extension=force_extension,
file_loc_proc=file_loc_proc,
location=LOCATION_S3,
read_only=read_only,
**kwargs
)
save_kwargs = instance.mk_save_kwargs(relative_root)
try:
bucket_name = save_kwargs['bucket_name']
base_folder = save_kwargs['key_name']
except:
print("couldn't get bucket_name and key_name for relative_root")
instance.s3 = S3(bucket_name=bucket_name, base_folder=base_folder)
instance._set_s3_defaults()
return instance
####################################################################################################################
class Accessor(object):
LOCATION_LOCAL = LOCATION_LOCAL
LOCATION_S3 = LOCATION_S3
def __init__(self,
file_loc_proc=None,
location=LOCATION_LOCAL,
mk_save_kwargs=None,
pre_save_proc=None,
save_fun=None,
mk_load_kwargs=None,
load_fun=None,
post_load_proc=None,
read_only=False,
**kwargs):
# if file_loc_proc:
# self.file_loc_proc = file_loc_proc
# else:
# self.file_loc_proc = FilepathHandler().process
self.file_loc_proc = file_loc_proc
self.location = location
self.mk_save_kwargs = mk_save_kwargs
self.pre_save_proc = pre_save_proc
self.save_fun = save_fun
self.mk_load_kwargs = mk_load_kwargs
self.load_fun = load_fun
self.post_load_proc = post_load_proc
self.read_only = read_only
for k, v in list(kwargs.items()):
self.__setattr__(k,v)
self._guess_missing_attributes()
def __call__(self, *args, **kwargs):
return self.filepath(*args, **kwargs)
####################################################################################################################
# INSTANCE METHODS
def root_folder(self):
if self.extension:
return self.file_loc_proc('')[:(-len(self.extension))]
else:
return self.file_loc_proc('')
def filepath(self, file_spec):
return self.file_loc_proc(file_spec)
def exists(self, file_spec):
return os.path.exists(self.filepath(file_spec))
def save(self, obj, file_spec, **kwargs):
if self.read_only:
raise BaseException("read_only was set to True, so you can't save anything")
else:
# make the dict specifying the input to the save_fun
file_spec = self.file_loc_proc(file_spec)
if self.pre_save_proc:
obj = self.pre_save_proc(obj)
if self.mk_save_kwargs:
file_spec_kwargs = self.mk_save_kwargs(file_spec)
self.save_fun(obj, **file_spec_kwargs)
else:
self.save_fun(obj, file_spec)
def append(self, obj, file_spec, **kwargs): # TODO: Write this code someday
"""
Intent of this function is to append data to a file's data without having to specify how to do so.
For example, if the obj is a string and the file is a text file, use file append.
If obj is a pickled dataframe, the effect (however you do it--hopefully there's a better way than loading the
data, appending, and saving the final result) should be to have a pickled version of the old and new dataframes
appended.
Etc.
"""
pass
# if isinstance(obj, basestring):
# raise ValueError("strings not implemented yet")
# elif isinstance(obj, (pd.DataFrame, pd.Series)):
# pass
def load(self, file_spec, **kwargs):
file_spec = self.file_loc_proc(file_spec)
if pfile_name.get_extension(file_spec) not in ['.xls', '.xlsx']:
if self.mk_load_kwargs:
file_spec_kwargs = self.mk_load_kwargs(file_spec)
obj = self.load_fun(**file_spec_kwargs)
else:
obj = self.load_fun(file_spec)
if self.post_load_proc:
obj = self.post_load_proc(obj)
else:
# obj = pd.read_excel(file_spec, **kwargs)
xls = pd.ExcelFile(file_spec)
kwargs = dict({'sheetname': xls.sheet_names[0]}, **kwargs) # take first sheet if sheet not specified
obj = pd.read_excel(file_spec, **kwargs)
#obj = xls.parse(**kwargs)
return obj
def copy_local_file_to(self, local_file_path, target_file_spec):
'''
Copies a file from the local computer to self.filepath(target_file_spec)
:param local_file_path:
:param target_file_spec:
:return:
'''
if self.read_only:
raise BaseException("read_only was set to True, so you can't copy anything to this location")
else:
if self.location == LOCATION_LOCAL:
if not os.path.exists(local_file_path):
local_file_path = self.filepath(local_file_path)
shutil.copyfile(local_file_path, self.filepath(target_file_spec))
elif self.location == LOCATION_S3:
# make the dict specifying the input to the save_fun
target_file_spec = self.file_loc_proc(target_file_spec)
if self.pre_save_proc:
local_file_path = self.pre_save_proc(local_file_path)
if self.mk_save_kwargs:
file_spec_kwargs = self.mk_save_kwargs(target_file_spec)
self.copy_local_file_to_fun(local_file_path, **file_spec_kwargs)
else:
raise ("this shouldn't happen")
else:
raise ValueError("unknown location")
def copy_to(self, target_relative_root, file_spec, target_location=None):
if isinstance(target_relative_root, str):
target_relative_root, target_location = \
_make_a_file_loc_proc_and_location_from_string_specifications(target_relative_root, target_location)
# make a file accessor for the (target_location, target_relative_root)
facc = Accessor(relative_root=target_relative_root, location=target_location)
####################################################################################################################
# PARTIAL FACTORIES
def _add_extension_handler(self, extension, force_extension=False):
extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension)
self.file_loc_proc = lambda x : self.file_loc_proc(extension_handler.process(x))
def _guess_missing_attributes(self):
if self.file_loc_proc is None: # if no file_loc_proc is given
if self.location is not None and isinstance(self.location, str):
self.file_loc_proc==self.location
else:
self.file_loc_proc==LOCATION_LOCAL
elif isinstance(self.file_loc_proc, str): # if file_loc_proc is a string
self.file_loc_proc, self.location = \
_make_a_file_loc_proc_and_location_from_string_specifications(self.file_loc_proc, self.location)
# if self.file_loc_proc==LOCATION_LOCAL:
# self.location = LOCATION_LOCAL
# self.file_loc_proc = ''
# elif self.file_loc_proc==LOCATION_S3:
# self.location = LOCATION_S3
# self.file_loc_proc = ''
# else:
# if self.location==LOCATION_LOCAL:
# self.file_loc_proc = FilepathHandler(relative_root=os.path.join(MS_DATA,self.file_loc_proc)).process
# elif self.location==LOCATION_S3:
# self.file_loc_proc = FilepathHandler(relative_root=os.path.join('loc-data',self.file_loc_proc)).process
# set defaults for remaining missing attributes
self._set_defaults()
def _set_defaults(self):
if self.location is None:
print("setting location to LOCAL (because you didn't specify a location)")
self.location = LOCATION_LOCAL
if self.location == LOCATION_LOCAL:
self._set_local_defaults()
elif self.location == LOCATION_S3:
self._set_s3_defaults()
def _set_local_defaults(self, root_folder=MS_DATA):
# set defaults for local if attr is None
self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root=os.path.join(root_folder)).process
self.save_fun = self.save_fun or LocalIOMethods().unicode_save
self.load_fun = self.load_fun or LocalIOMethods().unicode_load
# self.pre_save_proc = self.pre_save_proc or FilepathHandler().process
# self.post_load_proc = self.post_load_proc or FilepathHandler().process
def _set_s3_defaults(self):
# set defaults for local if attr is None
self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root='loc-data').process
self.mk_save_kwargs = fullpath_to_s3_kargs
self.mk_load_kwargs = fullpath_to_s3_kargs
self.save_fun = self.save_fun or S3IOMethods().unicode_save
self.load_fun = self.load_fun or S3IOMethods().unicode_load
self.copy_local_file_to_fun = S3IOMethods().copy_local_file_to_fun
####################################################################################################################
# OBJECT UTILS
def local_file_loc_proc_simple(self, file_spec):
# add extension
file_spec = self.handle_extension(file_spec)
# remove slash suffix if present (because self.sound_file_root_folder ends with / already)
if file_spec.startswith('/'):
file_spec = file_spec[1:]
def handle_extension(self, file_spec):
if self.extension:
if self.force_extension:
file_spec = pfile_name.replace_extension(file_spec, self.extension)
else:
file_spec = pfile_name.add_extension_if_not_present(file_spec, self.extension)
return os.path.join(self.root_folder, file_spec)
####################################################################################################################
# OTHER UTILS
def _make_a_file_loc_proc_and_location_from_string_specifications(file_loc_proc, location):
if file_loc_proc is None and isinstance(location, str):
file_loc_proc = location + "/"
location = None
elif location is None and isinstance(file_loc_proc, str):
first_folder = pfile_name.get_highest_level_folder(location)
if first_folder in [LOCATION_LOCAL, LOCATION_S3]:
location = first_folder # set the location to first_folder
file_loc_proc.replace(location+"/","") # remove the first_folder
else:
raise ValueError("location was not specified and couldn't be guessed from the file_loc_proc")
else:
raise ValueError("you've neither specified a file_loc_proc (as a file_loc_proc) nor a location")
# make a file accessor for the (location, target_relative_root)
file_loc_proc = FilepathHandler(relative_root=os.path.join(location,file_loc_proc)).process
return (file_loc_proc, location)
def file_loc_proc_from_full_path(fullpath):
return FilepathHandler(relative_root=fullpath).process
def fullpath_to_s3_kargs(filename):
# remove slash suffix if present (because self.sound_file_root_folder ends with / already)
if filename.startswith('/'):
filename = filename[1:]
mother_root = pfile_name.get_highest_level_folder(filename)
rest_of_the_filepath = filename.replace(mother_root + '/','',1)
return {
'bucket_name': mother_root,
'key_name': rest_of_the_filepath
}
class ExtensionHandler(object):
def __init__(self, extension=None, force_extension=False):
self.extension = extension
self.force_extension = force_extension
def process(self, file_spec):
if self.force_extension:
return pfile_name.replace_extension(file_spec, self.extension)
else:
return pfile_name.add_extension_if_not_present(file_spec, self.extension)
class FilepathHandler(object):
def __init__(self, relative_root=''):
self.relative_root = relative_root
def process(self, filepath=''):
return os.path.join(self.relative_root, filepath)
##### LOCAL METHODS
class LocalIOMethods(object):
def __init__(self, encoding="UTF-8"):
self.encoding = encoding
def unicode_save(self, obj, filepath=None, **kwargs):
if isinstance(obj, str):
# pstr_to.file(string=pstr_trans.to_unicode_or_bust(obj), tofile=filepath, encoding=self.encoding)
# pstr_to.file(string=pstr_trans.to_utf8_or_bust_iter(obj), tofile=filepath, encoding=self.encoding)
# pstr_to.file(string=pstr_trans.str_to_utf8_or_bust(obj), tofile=filepath, encoding=self.encoding)
pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding)
else:
pickle.dump(obj=obj, file=open(filepath, 'w'))
def simple_save(self, obj, filepath=None, **kwargs):
if isinstance(obj, str):
pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding)
else:
pickle.dump(obj=obj, file=open(filepath, 'w'))
def unicode_load(self, filepath=None, **kwargs):
"""
try pd.from_pickle, then pickle.loading, and if it doesn't work, try file_to.string
"""
return pstr_trans.to_unicode_or_bust(self.simple_load(filepath=filepath, **kwargs))
# try:
# try: # getting it as a pandas object
# return pstr_trans.to_unicode_or_bust(pd.read_pickle(path=filepath))
# except Exception: # getting it as a pickled object
# return pstr_trans.to_unicode_or_bust(pickle.load(file=open(filepath, 'r')))
# except Exception: # getting it as a string
# return pstr_trans.to_unicode_or_bust(file_to.string(filename=filepath))
def simple_load(self, filepath=None, **kwargs):
"""
try pd.read_pickle, pickle.load, and file_to.string in that order
"""
try:
try: # getting it as a pandas object
return pd.read_pickle(path=filepath)
except Exception: # getting it as a pickled object
return pickle.load(file=open(filepath, 'r'))
except Exception: # getting it as a string
return file_to.string(filename=filepath)
##### S3 METHODS
class S3IOMethods(object):
def __init__(self, **kwargs):
self.s3 = S3(**kwargs)
def unicode_save(self, obj, key_name, bucket_name):
if isinstance(obj, str):
self.s3.dumps(the_str=pstr_trans.to_unicode_or_bust(obj), key_name=key_name, bucket_name=bucket_name)
else:
self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name)
def simple_save(self, obj, key_name, bucket_name):
if isinstance(obj, str):
self.s3.dumps(the_str=obj, key_name=key_name, bucket_name=bucket_name)
else:
self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name)
def unicode_load(self, key_name, bucket_name):
"""
try pickle.loading, and if it doesn't work, try file_to.string
"""
try:
return self.s3.loado(key_name=key_name, bucket_name=bucket_name)
except:
return pstr_trans.to_unicode_or_bust(self.s3.loads(key_name=key_name, bucket_name=bucket_name))
def simple_load(self, key_name, bucket_name):
"""
try pickle.loading, and if it doesn't work, try file_to.string
"""
try:
return self.s3.loado(key_name=key_name, bucket_name=bucket_name)
except:
return self.s3.loads(key_name=key_name, bucket_name=bucket_name)
def copy_local_file_to_fun(self, filepath, key_name, bucket_name):
return self.s3.dumpf(f=filepath, key_name=key_name, bucket_name=bucket_name)
| [((19, 14, 19, 49), 'ut.util.importing.get_environment_variable', 'get_environment_variable', ({(19, 39, 19, 48): '"""MS_DATA"""'}, {}), "('MS_DATA')", False, 'from ut.util.importing import get_environment_variable\n'), ((89, 18, 89, 70), 'ut.serialize.s3.S3', 'S3', (), '', False, 'from ut.serialize.s3 import S3\n'), ((344, 18, 344, 63), 'ut.pfile.name.get_highest_level_folder', 'pfile_name.get_highest_level_folder', ({(344, 54, 344, 62): 'filename'}, {}), '(filename)', True, 'import ut.pfile.name as pfile_name\n'), ((30, 24, 30, 67), 'ut.pfile.name.ensure_slash_suffix', 'pfile_name.ensure_slash_suffix', ({(30, 55, 30, 66): 'root_folder'}, {}), '(root_folder)', True, 'import ut.pfile.name as pfile_name\n'), ((308, 15, 308, 56), 'os.path.join', 'os.path.join', ({(308, 28, 308, 44): 'self.root_folder', (308, 46, 308, 55): 'file_spec'}, {}), '(self.root_folder, file_spec)', False, 'import os\n'), ((370, 15, 370, 57), 'os.path.join', 'os.path.join', ({(370, 28, 370, 46): 'self.relative_root', (370, 48, 370, 56): 'filepath'}, {}), '(self.relative_root, filepath)', False, 'import os\n'), ((424, 18, 424, 30), 'ut.serialize.s3.S3', 'S3', ({}, {}), '(**kwargs)', False, 'from ut.serialize.s3 import S3\n'), ((183, 11, 183, 46), 'ut.pfile.name.get_extension', 'pfile_name.get_extension', ({(183, 36, 183, 45): 'file_spec'}, {}), '(file_spec)', True, 'import ut.pfile.name as pfile_name\n'), ((193, 18, 193, 41), 'pandas.ExcelFile', 'pd.ExcelFile', ({(193, 31, 193, 40): 'file_spec'}, {}), '(file_spec)', True, 'import pandas as pd\n'), ((195, 18, 195, 53), 'pandas.read_excel', 'pd.read_excel', ({(195, 32, 195, 41): 'file_spec'}, {}), '(file_spec, **kwargs)', True, 'import pandas as pd\n'), ((325, 23, 325, 68), 'ut.pfile.name.get_highest_level_folder', 'pfile_name.get_highest_level_folder', ({(325, 59, 325, 67): 'location'}, {}), '(location)', True, 'import ut.pfile.name as pfile_name\n'), ((360, 19, 360, 74), 'ut.pfile.name.replace_extension', 'pfile_name.replace_extension', ({(360, 48, 360, 57): 'file_spec', (360, 59, 360, 73): 'self.extension'}, {}), '(file_spec, self.extension)', True, 'import ut.pfile.name as pfile_name\n'), ((362, 19, 362, 85), 'ut.pfile.name.add_extension_if_not_present', 'pfile_name.add_extension_if_not_present', ({(362, 59, 362, 68): 'file_spec', (362, 70, 362, 84): 'self.extension'}, {}), '(file_spec, self.extension)', True, 'import ut.pfile.name as pfile_name\n'), ((384, 12, 384, 77), 'ut.pstr.to.file', 'pstr_to.file', (), '', True, 'import ut.pstr.to as pstr_to\n'), ((390, 12, 390, 77), 'ut.pstr.to.file', 'pstr_to.file', (), '', True, 'import ut.pstr.to as pstr_to\n'), ((44, 49, 44, 92), 'ut.pfile.name.ensure_slash_suffix', 'pfile_name.ensure_slash_suffix', ({(44, 80, 44, 91): 'root_folder'}, {}), '(root_folder)', True, 'import ut.pfile.name as pfile_name\n'), ((305, 28, 305, 83), 'ut.pfile.name.replace_extension', 'pfile_name.replace_extension', ({(305, 57, 305, 66): 'file_spec', (305, 68, 305, 82): 'self.extension'}, {}), '(file_spec, self.extension)', True, 'import ut.pfile.name as pfile_name\n'), ((307, 28, 307, 94), 'ut.pfile.name.add_extension_if_not_present', 'pfile_name.add_extension_if_not_present', ({(307, 68, 307, 77): 'file_spec', (307, 79, 307, 93): 'self.extension'}, {}), '(file_spec, self.extension)', True, 'import ut.pfile.name as pfile_name\n'), ((334, 50, 334, 86), 'os.path.join', 'os.path.join', ({(334, 63, 334, 71): 'location', (334, 72, 334, 85): 'file_loc_proc'}, {}), '(location, file_loc_proc)', False, 'import os\n'), ((413, 23, 413, 52), 'pandas.read_pickle', 'pd.read_pickle', (), '', True, 'import pandas as pd\n'), ((417, 19, 417, 52), 'ut.pfile.to.string', 'file_to.string', (), '', True, 'import ut.pfile.to as file_to\n'), ((210, 23, 210, 54), 'os.path.exists', 'os.path.exists', ({(210, 38, 210, 53): 'local_file_path'}, {}), '(local_file_path)', False, 'import os\n'), ((428, 34, 428, 68), 'ut.pstr.trans.to_unicode_or_bust', 'pstr_trans.to_unicode_or_bust', ({(428, 64, 428, 67): 'obj'}, {}), '(obj)', True, 'import ut.pstr.trans as pstr_trans\n'), ((274, 81, 274, 106), 'os.path.join', 'os.path.join', ({(274, 94, 274, 105): 'root_folder'}, {}), '(root_folder)', False, 'import os\n')] |
cstenkamp/MastersThesisText | scripts/statistics.py | d026f9c19819c83d99dfff12b594db9d061bfb31 | import subprocess
import git
from os.path import dirname, join, abspath
import pandas as pd
from matplotlib import pyplot as plt
import requests
import io
import zipfile
import tempfile
from datetime import timedelta
FILENAME = join(dirname(__file__), "..", "thesis.tex")
DISP_PAGESMAX = 80
DISP_WORDSMAX = 10000
def return_piped_cmd(cmd, stdin=None):
cmd = cmd.split("|")
if not stdin:
ps = subprocess.Popen(cmd[0].strip().split(" "), stdout=subprocess.PIPE)
else:
ps = subprocess.Popen(cmd[0].strip().split(" "), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ps.stdin.write(stdin.encode("UTF-8"))
ps.stdin.close()
if len(cmd) == 1:
return ps.stdout.read().decode("UTF-8")
output = subprocess.check_output(cmd[1].strip().split(" "), stdin=ps.stdout).decode("UTF-8")
ps.wait()
return output
def get_todos(fname=None, txt=None):
if fname:
with open(fname, "r") as rfile:
txt = rfile.read()
txt = txt.replace("% ", "%").lower()
return txt.count("%todo")
def get_npages(fname):
tmp = return_piped_cmd(f'pdfinfo {fname.replace(".tex", ".pdf")}')
return int([i for i in tmp.split("\n") if "Pages:" in i][0][len("Pages:"):].strip())
def github_get_npages(owner, repo, pdfname):
date_pages = {}
resp = requests.get(f"https://api.github.com/repos/{owner}/{repo}/actions/artifacts", headers=dict(Accept="application/vnd.github.v3+json"))
for i in resp.json()["artifacts"]:
art_id = i["url"][i["url"].rfind("/")+1:]
re2 = requests.get(f"https://nightly.link/{owner}/{repo}/actions/artifacts/{art_id}.zip")
if re2.status_code != 404:
# print(i["created_at"])
archive = zipfile.ZipFile(io.BytesIO(re2.content))
with tempfile.NamedTemporaryFile(suffix=".pdf") as wfile:
wfile.write(archive.read(pdfname))
n_pages = get_npages(wfile.name)
# print(f"Pages: {n_pages}")
date_pages[pd.to_datetime([i["created_at"]]).to_pydatetime()[0]] = n_pages
return pd.Series(date_pages)
def plot_df(df):
ax1 = df["Words"].plot(color="red", linestyle="-", marker="o", ylabel="Words")
ax1.set_ylim(0, max(df["Words"].max(), DISP_WORDSMAX))
ax2 = ax1.twinx()
ax2.spines['right'].set_position(('axes', 1.0))
df["Todos"].plot(ax=ax2, color="blue", linestyle="-", marker="x", ylabel="Todos")
ax3 = ax1.twinx()
df["Pages"].plot(ax=ax3, color="yellow", linestyle="", marker="s", ylabel="Pages")
for ax in [ax2, ax3]: ax.set_ylim((0, max(df["Todos"].max(), df["Pages"].max(), DISP_PAGESMAX)))
ax3.yaxis.set_ticklabels([])
lines, labels = list(zip(*[[i[0] for i in ax.get_legend_handles_labels()] for ax in [ax1, ax2, ax3]]))
plt.legend(lines, labels, loc=0)
plt.show()
def create_history_df(repo_dir, filename):
#print(abspath(repo_dir))
repo = git.Repo(repo_dir)
all_commits = {}
for commit in repo.iter_commits():
txt = (commit.tree / filename).data_stream.read().decode("UTF-8")
n_words = int(return_piped_cmd("detex | wc -w", stdin=txt).strip())
n_todos = get_todos(txt=txt)
# print(datetime.fromtimestamp(commit.committed_date))
# print(f"words: {n_words}, todos: {n_todos}")
all_commits[pd.to_datetime(commit.committed_datetime, utc=True)] = [n_words, n_todos]
df = pd.DataFrame(all_commits, index=["Words", "Todos"]).T
return df
def merge_page_df(df, date_pages):
for date in df.index:
try:
nearest_datepage_after = date_pages.index[date_pages.index.get_loc(date, method='bfill')]
except KeyError:
continue
if nearest_datepage_after-date <= timedelta(hours=2):
df.loc[date, "Pages"] = int(date_pages[nearest_datepage_after])
return df
if __name__ == "__main__":
#history
df = create_history_df(dirname(FILENAME), "thesis.tex")
date_pages = github_get_npages("cstenkamp", "MastersThesisText", "thesis.pdf")
df = merge_page_df(df, date_pages)
plot_df(df)
#current
n_words = int(return_piped_cmd(f"detex {FILENAME} | wc -w"))
n_pages = get_npages(FILENAME)
n_todos = get_todos(FILENAME)
print(f"Words: {n_words}, Pages: {n_pages}, Todos: {n_todos}") | [((12, 16, 12, 33), 'os.path.dirname', 'dirname', ({(12, 24, 12, 32): '__file__'}, {}), '(__file__)', False, 'from os.path import dirname, join, abspath\n'), ((59, 11, 59, 32), 'pandas.Series', 'pd.Series', ({(59, 21, 59, 31): 'date_pages'}, {}), '(date_pages)', True, 'import pandas as pd\n'), ((72, 4, 72, 36), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'from matplotlib import pyplot as plt\n'), ((73, 4, 73, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((78, 11, 78, 29), 'git.Repo', 'git.Repo', ({(78, 20, 78, 28): 'repo_dir'}, {}), '(repo_dir)', False, 'import git\n'), ((50, 14, 50, 97), 'requests.get', 'requests.get', ({(50, 27, 50, 96): 'f"""https://nightly.link/{owner}/{repo}/actions/artifacts/{art_id}.zip"""'}, {}), "(\n f'https://nightly.link/{owner}/{repo}/actions/artifacts/{art_id}.zip')", False, 'import requests\n'), ((87, 9, 87, 60), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((104, 27, 104, 44), 'os.path.dirname', 'dirname', ({(104, 35, 104, 43): 'FILENAME'}, {}), '(FILENAME)', False, 'from os.path import dirname, join, abspath\n'), ((96, 42, 96, 60), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((53, 38, 53, 61), 'io.BytesIO', 'io.BytesIO', ({(53, 49, 53, 60): 're2.content'}, {}), '(re2.content)', False, 'import io\n'), ((54, 17, 54, 59), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (), '', False, 'import tempfile\n'), ((86, 20, 86, 71), 'pandas.to_datetime', 'pd.to_datetime', (), '', True, 'import pandas as pd\n'), ((58, 27, 58, 60), 'pandas.to_datetime', 'pd.to_datetime', ({(58, 42, 58, 59): "[i['created_at']]"}, {}), "([i['created_at']])", True, 'import pandas as pd\n')] |
TheFraserLab/enrich_pvalues | setup.py | 6c5065da5e6367cc39a045afbdfa1e78322857a6 | """Installation instructions for enrich_pvalues."""
import os
from setuptools import setup
import enrich_pvalues # For version
VERSION=enrich_pvalues.__version__
GITHUB='https://github.com/MikeDacre/enrich_pvalues'
with open('requirements.txt') as fin:
REQUIREMENTS = [
i[0] for i in [j.split('>=') for j in fin.read().strip().split('\n')]
]
def read(fname):
"""Read the contents of a file in this dir."""
with open(os.path.join(os.path.dirname(__file__), fname)) as fin:
return fin.read()
# Actual setup instructions
setup(
name = 'enrich_pvalues',
version = VERSION,
author = 'Mike Dacre',
author_email = '[email protected]',
description = (
"Compare one dataset to another at a variety of p-value cutoffs"
),
keywords = (
"statistics p-values biology molecular-biology console"
),
long_description = read('README.rst'),
license = 'MIT',
# URLs
url = GITHUB,
download_url='{0}/archive/v{1}.tar.gz'.format(GITHUB, VERSION),
py_modules=['enrich_pvalues'],
entry_points = {
'console_scripts': [
'enrich_pvalues = enrich_pvalues:main',
],
},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
],
# Requirements
requires=REQUIREMENTS,
install_requires=REQUIREMENTS
)
| [((18, 27, 18, 52), 'os.path.dirname', 'os.path.dirname', ({(18, 43, 18, 51): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
brandonmcclure/homeschool | homeschool/students/tests/test_forms.py | 6ba2e35014740e952222535e9492cde0d41338b4 | import datetime
from homeschool.courses.tests.factories import (
CourseFactory,
CourseTaskFactory,
GradedWorkFactory,
)
from homeschool.schools.tests.factories import GradeLevelFactory
from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm
from homeschool.students.models import Coursework, Grade
from homeschool.students.tests.factories import (
CourseworkFactory,
EnrollmentFactory,
GradeFactory,
StudentFactory,
)
from homeschool.test import TestCase
class TestCourseworkForm(TestCase):
def test_is_valid(self):
"""The coursework validates."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert is_valid
def test_student_can_create_coursework(self):
"""The student is enrolled in a course that contains the task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
assert form.non_field_errors() == [
"The student is not enrolled in this course."
]
def test_save_new_coursework(self):
"""A new coursework is created for a student and task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 1
)
def test_save_existing_coursework(self):
"""A new coursework is created for a student and task."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
CourseworkFactory(student=student, course_task=course_task)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 1
)
def test_save_deletes_coursework(self):
"""A blank completed date deletes an existing coursework."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
CourseworkFactory(student=student, course_task=course_task)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
}
form = CourseworkForm(data=data)
form.is_valid()
form.save()
assert (
Coursework.objects.filter(student=student, course_task=course_task).count()
== 0
)
def test_completed_date_outside_school_year(self):
"""The completed data must be in the school year."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": str(
grade_level.school_year.start_date - datetime.timedelta(days=1)
),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
assert form.non_field_errors() == [
"The completed date must be in the school year."
]
def test_invalid_course_task(self):
"""An invalid course task is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": "0",
"completed_date": str(grade_level.school_year.start_date),
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
def test_invalid_completed_date(self):
"""An invalid completed date is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
course_task = CourseTaskFactory(course=course)
data = {
"student": str(student.id),
"course_task": str(course_task.id),
"completed_date": "boom",
}
form = CourseworkForm(data=data)
is_valid = form.is_valid()
assert not is_valid
class TestEnrollmentForm(TestCase):
def test_students_only_enroll_in_one_grade_level_per_year(self):
"""A student can only be enrolled in a single grade level in a school year."""
user = self.make_user()
enrollment = EnrollmentFactory(
student__school=user.school, grade_level__school_year__school=user.school
)
another_grade_level = GradeLevelFactory(
school_year=enrollment.grade_level.school_year
)
data = {
"student": str(enrollment.student.id),
"grade_level": str(another_grade_level.id),
}
form = EnrollmentForm(user=user, data=data)
is_valid = form.is_valid()
assert not is_valid
assert (
"A student may not be enrolled in multiple grade levels in a school year. "
f"{enrollment.student} is enrolled in {enrollment.grade_level}."
in form.non_field_errors()
)
def test_no_grade_level(self):
"""A missing grade level raises a validation error."""
user = self.make_user()
school = user.school
enrollment = EnrollmentFactory(
student__school=school, grade_level__school_year__school=school
)
data = {"student": str(enrollment.student.id), "grade_level": "0"}
form = EnrollmentForm(user=user, data=data)
is_valid = form.is_valid()
assert not is_valid
assert "You need to select a grade level." in form.non_field_errors()
class TestGradeForm(TestCase):
def test_is_valid(self):
"""The new grade validates."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
is_valid = form.is_valid()
assert is_valid
def test_invalid_graded_work(self):
"""An invalid graded work is an error."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
GradedWorkFactory(course_task__course=course)
data = {"student": str(student.id), "graded_work": "0", "score": "100"}
form = GradeForm(data=data)
is_valid = form.is_valid()
assert not is_valid
def test_save(self):
"""The form creates a new grade."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
form.is_valid()
form.save()
assert (
Grade.objects.filter(
student=student, graded_work=graded_work, score=100
).count()
== 1
)
def test_save_update(self):
"""The form updates a grade."""
user = self.make_user()
student = StudentFactory(school=user.school)
grade_level = GradeLevelFactory(school_year__school=user.school)
EnrollmentFactory(student=student, grade_level=grade_level)
course = CourseFactory(grade_levels=[grade_level])
graded_work = GradedWorkFactory(course_task__course=course)
GradeFactory(student=student, graded_work=graded_work)
data = {
"student": str(student.id),
"graded_work": str(graded_work.id),
"score": "100",
}
form = GradeForm(data=data)
form.is_valid()
form.save()
assert (
Grade.objects.filter(student=student, graded_work=graded_work).count() == 1
)
| [((24, 18, 24, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((25, 22, 25, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((26, 8, 26, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((27, 17, 27, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((28, 22, 28, 54), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((34, 15, 34, 40), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((43, 18, 43, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((44, 22, 44, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((45, 17, 45, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((46, 22, 46, 54), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((52, 15, 52, 40), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((64, 18, 64, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((65, 22, 65, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((66, 8, 66, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((67, 17, 67, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((68, 22, 68, 54), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((74, 15, 74, 40), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((87, 18, 87, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((88, 22, 88, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((89, 8, 89, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((90, 17, 90, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((91, 22, 91, 54), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((92, 8, 92, 67), 'homeschool.students.tests.factories.CourseworkFactory', 'CourseworkFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((98, 15, 98, 40), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((111, 18, 111, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((112, 22, 112, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((113, 8, 113, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((114, 17, 114, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((115, 22, 115, 54), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((116, 8, 116, 67), 'homeschool.students.tests.factories.CourseworkFactory', 'CourseworkFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((121, 15, 121, 40), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((134, 18, 134, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((135, 22, 135, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((136, 8, 136, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((137, 17, 137, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((138, 22, 138, 54), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((146, 15, 146, 40), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((158, 18, 158, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((159, 22, 159, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((160, 8, 160, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((161, 17, 161, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((162, 8, 162, 40), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((168, 15, 168, 40), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((177, 18, 177, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((178, 22, 178, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((179, 8, 179, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((180, 17, 180, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((181, 22, 181, 54), 'homeschool.courses.tests.factories.CourseTaskFactory', 'CourseTaskFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((187, 15, 187, 40), 'homeschool.students.forms.CourseworkForm', 'CourseworkForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((198, 21, 200, 9), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((201, 30, 203, 9), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((208, 15, 208, 51), 'homeschool.students.forms.EnrollmentForm', 'EnrollmentForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((223, 21, 225, 9), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((227, 15, 227, 51), 'homeschool.students.forms.EnrollmentForm', 'EnrollmentForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((239, 18, 239, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((240, 22, 240, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((241, 8, 241, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((242, 17, 242, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((243, 22, 243, 67), 'homeschool.courses.tests.factories.GradedWorkFactory', 'GradedWorkFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((249, 15, 249, 35), 'homeschool.students.forms.GradeForm', 'GradeForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((258, 18, 258, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((259, 22, 259, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((260, 8, 260, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((261, 17, 261, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((262, 8, 262, 53), 'homeschool.courses.tests.factories.GradedWorkFactory', 'GradedWorkFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((264, 15, 264, 35), 'homeschool.students.forms.GradeForm', 'GradeForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((273, 18, 273, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((274, 22, 274, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((275, 8, 275, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((276, 17, 276, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((277, 22, 277, 67), 'homeschool.courses.tests.factories.GradedWorkFactory', 'GradedWorkFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((283, 15, 283, 35), 'homeschool.students.forms.GradeForm', 'GradeForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((298, 18, 298, 52), 'homeschool.students.tests.factories.StudentFactory', 'StudentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((299, 22, 299, 72), 'homeschool.schools.tests.factories.GradeLevelFactory', 'GradeLevelFactory', (), '', False, 'from homeschool.schools.tests.factories import GradeLevelFactory\n'), ((300, 8, 300, 67), 'homeschool.students.tests.factories.EnrollmentFactory', 'EnrollmentFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((301, 17, 301, 58), 'homeschool.courses.tests.factories.CourseFactory', 'CourseFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((302, 22, 302, 67), 'homeschool.courses.tests.factories.GradedWorkFactory', 'GradedWorkFactory', (), '', False, 'from homeschool.courses.tests.factories import CourseFactory, CourseTaskFactory, GradedWorkFactory\n'), ((303, 8, 303, 62), 'homeschool.students.tests.factories.GradeFactory', 'GradeFactory', (), '', False, 'from homeschool.students.tests.factories import CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory\n'), ((309, 15, 309, 35), 'homeschool.students.forms.GradeForm', 'GradeForm', (), '', False, 'from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm\n'), ((80, 12, 80, 79), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', (), '', False, 'from homeschool.students.models import Coursework, Grade\n'), ((104, 12, 104, 79), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', (), '', False, 'from homeschool.students.models import Coursework, Grade\n'), ((127, 12, 127, 79), 'homeschool.students.models.Coursework.objects.filter', 'Coursework.objects.filter', (), '', False, 'from homeschool.students.models import Coursework, Grade\n'), ((143, 53, 143, 79), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((289, 12, 291, 13), 'homeschool.students.models.Grade.objects.filter', 'Grade.objects.filter', (), '', False, 'from homeschool.students.models import Coursework, Grade\n'), ((315, 12, 315, 74), 'homeschool.students.models.Grade.objects.filter', 'Grade.objects.filter', (), '', False, 'from homeschool.students.models import Coursework, Grade\n')] |
ai-se/heroes_compsci | Mining_Projects/getAllProjects_Parallel.py | 613fd623a6da073b2c62c773ed902acb0c756809 | """ @Author Jchakra"""
""" This code is to download project information using GitHub API (Following Amrit's Hero paper criteria of how to find good projects) """
from multiprocessing import Process,Lock
import time
import json
import requests
## Downloading all the projects
def func1():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 0
api_url = 'https://api.github.com/'
while i < 10000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 1 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file1.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 1 finished", len(repo_result))
def func2():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 10000
api_url = 'https://api.github.com/'
while i < 20000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 2 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file2.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 2 finished", len(repo_result))
def func3():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 20000
api_url = 'https://api.github.com/'
while i < 30000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 3 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file3.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 3 finished", len(repo_result))
def func4():
repo_result = []
Token_list = [''**'',''**'',''**'',''**'',''**'']
i = 30000
api_url = 'https://api.github.com/'
while i < 40000: # This number will be increased to collect all the projects
repo_url = api_url + 'repositories?since=' + str(i)
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
repo_response = requests.get(repo_url, headers=headers).json()
#print(repo_response)
try:
if ( len(repo_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
project_list = []
try:
for j in range(0,len(repo_response)):
project_id = repo_response[j]['id']
project_name = repo_response[j]['name']
project_full_name = repo_response[j]['full_name']
project_html_url = repo_response[j]['html_url']
project_owner_name = repo_response[j]['owner']['login']
project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" :
"", "commits" : "", "PR" : ""}
project_list.append(project_obj)
except:
print ("exception occurred")
try:
last_id = repo_response[99]["id"]
i = last_id
repo_result = repo_result + project_list
except:
print(" exception inside function 4 ")
break
## Removing projects having less than 8 issues
p = 0
while p < len(repo_result):
repo_owner = repo_result[p]['owner']
repo_name = repo_result[p]['name']
issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
issue_response = requests.get(issue_url, headers=headers).json()
try:
if ( len(issue_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(issue_response) > 10):
repo_result[p]["issues"] = len(issue_response)
p = p + 1
else:
repo_result.pop(p)
## Selecting the projects with Pull Request > 0
m = 0
while m < len(repo_result):
repo_owner = repo_result[m]['owner']
repo_name = repo_result[m]['name']
PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
PR_response = requests.get(PR_url, headers=headers).json()
try:
if ( len(PR_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(PR_response) > 0):
repo_result[m]["PR"] = len(PR_response)
m = m + 1
else:
repo_result.pop(m)
## Selecting Projects with commits > 20
n = 0
while n < len(repo_result):
repo_owner = repo_result[n]['owner']
repo_name = repo_result[n]['name']
commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits'
exception_count = 0
while exception_count < 2:
try:
for k in range(0,len(Token_list)):
headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])}
#print(Token_list[k])
commit_response = requests.get(commit_url, headers=headers).json()
try:
if ( len(commit_response['message']) > 0):
if( k == len(Token_list) - 1):
time.sleep(600)
exception_count = exception_count + 1
else:
continue
except:
break
if ( exception_count == 0):
break
else:
continue
except:
exception_count = 0
if(len(commit_response) > 20):
repo_result[n]["commits"] = len(commit_response)
n = n + 1
else:
repo_result.pop(n)
with open("repo_file4.json", "w") as repo_file:
json.dump(repo_result, repo_file)
print("function 4 finished", len(repo_result))
if __name__ == '__main__':
lock = Lock()
p1 = Process(target=func1)
p2 = Process(target=func2)
p3 = Process(target=func3)
p4 = Process(target=func4)
p1.start()
p2.start()
p3.start()
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
| [((773, 9, 773, 15), 'multiprocessing.Lock', 'Lock', ({}, {}), '()', False, 'from multiprocessing import Process, Lock\n'), ((774, 7, 774, 28), 'multiprocessing.Process', 'Process', (), '', False, 'from multiprocessing import Process, Lock\n'), ((775, 7, 775, 28), 'multiprocessing.Process', 'Process', (), '', False, 'from multiprocessing import Process, Lock\n'), ((776, 7, 776, 28), 'multiprocessing.Process', 'Process', (), '', False, 'from multiprocessing import Process, Lock\n'), ((777, 7, 777, 28), 'multiprocessing.Process', 'Process', (), '', False, 'from multiprocessing import Process, Lock\n'), ((198, 4, 198, 37), 'json.dump', 'json.dump', ({(198, 14, 198, 25): 'repo_result', (198, 27, 198, 36): 'repo_file'}, {}), '(repo_result, repo_file)', False, 'import json\n'), ((388, 4, 388, 37), 'json.dump', 'json.dump', ({(388, 14, 388, 25): 'repo_result', (388, 27, 388, 36): 'repo_file'}, {}), '(repo_result, repo_file)', False, 'import json\n'), ((577, 4, 577, 37), 'json.dump', 'json.dump', ({(577, 14, 577, 25): 'repo_result', (577, 27, 577, 36): 'repo_file'}, {}), '(repo_result, repo_file)', False, 'import json\n'), ((766, 4, 766, 37), 'json.dump', 'json.dump', ({(766, 14, 766, 25): 'repo_result', (766, 27, 766, 36): 'repo_file'}, {}), '(repo_result, repo_file)', False, 'import json\n'), ((30, 26, 30, 65), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((90, 27, 90, 67), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((131, 24, 131, 61), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((171, 28, 171, 69), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((220, 26, 220, 65), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((280, 27, 280, 67), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((321, 24, 321, 61), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((361, 28, 361, 69), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((409, 26, 409, 65), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((469, 27, 469, 67), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((510, 24, 510, 61), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((550, 28, 550, 69), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((598, 26, 598, 65), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((658, 27, 658, 67), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((699, 24, 699, 61), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((739, 28, 739, 69), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((35, 16, 35, 31), 'time.sleep', 'time.sleep', ({(35, 27, 35, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((94, 16, 94, 31), 'time.sleep', 'time.sleep', ({(94, 27, 94, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((135, 16, 135, 31), 'time.sleep', 'time.sleep', ({(135, 27, 135, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((175, 16, 175, 31), 'time.sleep', 'time.sleep', ({(175, 27, 175, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((225, 16, 225, 31), 'time.sleep', 'time.sleep', ({(225, 27, 225, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((284, 16, 284, 31), 'time.sleep', 'time.sleep', ({(284, 27, 284, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((325, 16, 325, 31), 'time.sleep', 'time.sleep', ({(325, 27, 325, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((365, 16, 365, 31), 'time.sleep', 'time.sleep', ({(365, 27, 365, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((414, 16, 414, 31), 'time.sleep', 'time.sleep', ({(414, 27, 414, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((473, 16, 473, 31), 'time.sleep', 'time.sleep', ({(473, 27, 473, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((514, 16, 514, 31), 'time.sleep', 'time.sleep', ({(514, 27, 514, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((554, 16, 554, 31), 'time.sleep', 'time.sleep', ({(554, 27, 554, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((603, 16, 603, 31), 'time.sleep', 'time.sleep', ({(603, 27, 603, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((662, 16, 662, 31), 'time.sleep', 'time.sleep', ({(662, 27, 662, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((703, 16, 703, 31), 'time.sleep', 'time.sleep', ({(703, 27, 703, 30): '(600)'}, {}), '(600)', False, 'import time\n'), ((743, 16, 743, 31), 'time.sleep', 'time.sleep', ({(743, 27, 743, 30): '(600)'}, {}), '(600)', False, 'import time\n')] |
ably77/dcos-tensorflow-tools | examples/source/benchmarks/googlenet_model.py | d434ff6c0cee6db9f62be583723dc2bee46ebbf2 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Googlenet model configuration.
References:
Szegedy, Christian, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich
Going deeper with convolutions
arXiv preprint arXiv:1409.4842 (2014)
"""
import model
class GooglenetModel(model.Model):
def __init__(self):
super(GooglenetModel, self).__init__('googlenet', 224, 32, 0.005)
def add_inference(self, cnn):
def inception_v1(cnn, k, l, m, n, p, q):
cols = [[('conv', k, 1, 1)], [('conv', l, 1, 1), ('conv', m, 3, 3)],
[('conv', n, 1, 1), ('conv', p, 5, 5)],
[('mpool', 3, 3, 1, 1, 'SAME'), ('conv', q, 1, 1)]]
cnn.inception_module('incept_v1', cols)
cnn.conv(64, 7, 7, 2, 2)
cnn.mpool(3, 3, 2, 2, mode='SAME')
cnn.conv(64, 1, 1)
cnn.conv(192, 3, 3)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 64, 96, 128, 16, 32, 32)
inception_v1(cnn, 128, 128, 192, 32, 96, 64)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 192, 96, 208, 16, 48, 64)
inception_v1(cnn, 160, 112, 224, 24, 64, 64)
inception_v1(cnn, 128, 128, 256, 24, 64, 64)
inception_v1(cnn, 112, 144, 288, 32, 64, 64)
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
cnn.mpool(3, 3, 2, 2, mode='SAME')
inception_v1(cnn, 256, 160, 320, 32, 128, 128)
inception_v1(cnn, 384, 192, 384, 48, 128, 128)
cnn.apool(7, 7, 1, 1, mode='VALID')
cnn.reshape([-1, 1024])
| [] |
neo-empresarial/covid-19 | demos/prey-predator/prey_predator_abm/sim_params.py | cef10ee79d955c9e84148c3c8da542788a1f7395 | """
Simulation parameters.
"""
SIMULATION_TIME_STEPS = 300
| [] |
gcosne/generative_inpainting | process_ops.py | 1ae50277e5815a4f0c1e339ede0dbfae8e5036d1 | import cv2
import numpy as np
try:
import scipy
# scipy.ndimage cannot be accessed until explicitly imported
from scipy import ndimage
except ImportError:
scipy = None
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_rotation(x, rg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x, theta=theta, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_shift(x, wrg, hrg, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(x, tx=tx, ty=ty, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_shear(x, intensity, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(x, shear=shear, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_zoom(x, zoom_range, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order int: order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: %s' % (zoom_range,))
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(x, zx=zx, zy=zy, channel_axis=channel_axis,
fill_mode=fill_mode, cval=cval,
order=interpolation_order)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
# For curving soybean pods. L.C.Uzal
def random_curves_transform(x, strength=0.1, range=(0.,255.)):
low, high = range
delta = (high - low) * strength / 2.
xp = np.random.uniform(low=low + delta, high=high - delta)
yp = np.random.uniform(low=xp-delta, high=xp+delta)
xp = np.asarray([low, xp, high])
yp = np.asarray([low, yp, high])
return np.interp(x,xp,yp)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,
row_axis=0, col_axis=1, channel_axis=2,
fill_mode='nearest', cval=0., order=1):
"""Applies an affine transformation specified by the parameters given.
# Arguments
x: 2D numpy array, single image.
theta: Rotation angle in degrees.
tx: Width shift.
ty: Heigh shift.
shear: Shear angle in degrees.
zx: Zoom in x direction.
zy: Zoom in y direction
row_axis: Index of axis for rows in the input image.
col_axis: Index of axis for columns in the input image.
channel_axis: Index of axis for channels in the input image.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
order int: order of interpolation
# Returns
The transformed version of the input.
"""
if scipy is None:
raise ImportError('Image transformations require SciPy. '
'Install SciPy.')
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndimage.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_transform(x, rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
horizontal_flip=False,
vertical_flip=False,
random_curves_strength=0.):
# Generate params
if rotation_range:
theta = np.random.uniform(-rotation_range, rotation_range)
else:
theta = 0
h, w = x.shape[0], x.shape[1]
if height_shift_range:
tx = np.random.uniform(-height_shift_range, height_shift_range) * h
else:
tx = 0
if width_shift_range:
ty = np.random.uniform(-width_shift_range, width_shift_range) * w
else:
ty = 0
if shear_range:
shear = np.random.uniform(-shear_range, shear_range)
else:
shear = 0
if np.isscalar(zoom_range):
zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
# Apply transforms
x = apply_affine_transform(x,
theta,
tx, ty,
shear,
zx, zy)
if channel_shift_range != 0:
x = random_channel_shift(x, channel_shift_range, 2)
if horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, 1)
if vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, 0)
if random_curves_strength > 0.:
x = random_curves_transform(x, random_curves_strength)
return x
if __name__ == "__main__":
import argparse
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--image', default='', type=str)
parser.add_argument('--imageOut', default='result.png', type=str)
args = parser.parse_args()
im = np.array(Image.open(args.image))
img = random_transform(im, rotation_range=10, shear_range=.5, zoom_range=.2, channel_shift_range=10., horizontal_flip=True)
Image.fromarray(np.uint8(img)).save(args.imageOut)
| [((38, 12, 38, 38), 'numpy.random.uniform', 'np.random.uniform', ({(38, 30, 38, 33): '-rg', (38, 35, 38, 37): 'rg'}, {}), '(-rg, rg)', True, 'import numpy as np\n'), ((93, 12, 93, 52), 'numpy.random.uniform', 'np.random.uniform', ({(93, 30, 93, 40): '-intensity', (93, 42, 93, 51): 'intensity'}, {}), '(-intensity, intensity)', True, 'import numpy as np\n'), ((136, 8, 136, 39), 'numpy.rollaxis', 'np.rollaxis', ({(136, 20, 136, 21): 'x', (136, 23, 136, 35): 'channel_axis', (136, 37, 136, 38): '0'}, {}), '(x, channel_axis, 0)', True, 'import numpy as np\n'), ((140, 8, 140, 40), 'numpy.stack', 'np.stack', (), '', True, 'import numpy as np\n'), ((141, 8, 141, 43), 'numpy.rollaxis', 'np.rollaxis', ({(141, 20, 141, 21): 'x', (141, 23, 141, 24): '0', (141, 26, 141, 42): 'channel_axis + 1'}, {}), '(x, 0, channel_axis + 1)', True, 'import numpy as np\n'), ((149, 9, 149, 62), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((150, 9, 150, 55), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((151, 9, 151, 36), 'numpy.asarray', 'np.asarray', ({(151, 20, 151, 35): '[low, xp, high]'}, {}), '([low, xp, high])', True, 'import numpy as np\n'), ((152, 9, 152, 36), 'numpy.asarray', 'np.asarray', ({(152, 20, 152, 35): '[low, yp, high]'}, {}), '([low, yp, high])', True, 'import numpy as np\n'), ((153, 11, 153, 29), 'numpy.interp', 'np.interp', ({(153, 21, 153, 22): 'x', (153, 23, 153, 25): 'xp', (153, 26, 153, 28): 'yp'}, {}), '(x, xp, yp)', True, 'import numpy as np\n'), ((159, 20, 159, 67), 'numpy.array', 'np.array', ({(159, 29, 159, 66): '[[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]'}, {}), '([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])', True, 'import numpy as np\n'), ((160, 19, 160, 68), 'numpy.array', 'np.array', ({(160, 28, 160, 67): '[[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]'}, {}), '([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])', True, 'import numpy as np\n'), ((278, 7, 278, 30), 'numpy.isscalar', 'np.isscalar', ({(278, 19, 278, 29): 'zoom_range'}, {}), '(zoom_range)', True, 'import numpy as np\n'), ((318, 13, 318, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((66, 9, 66, 37), 'numpy.random.uniform', 'np.random.uniform', ({(66, 27, 66, 31): '(-hrg)', (66, 33, 66, 36): 'hrg'}, {}), '(-hrg, hrg)', True, 'import numpy as np\n'), ((67, 9, 67, 37), 'numpy.random.uniform', 'np.random.uniform', ({(67, 27, 67, 31): '(-wrg)', (67, 33, 67, 36): 'wrg'}, {}), '(-wrg, wrg)', True, 'import numpy as np\n'), ((128, 17, 128, 67), 'numpy.random.uniform', 'np.random.uniform', ({(128, 35, 128, 48): 'zoom_range[0]', (128, 50, 128, 63): 'zoom_range[1]', (128, 65, 128, 66): '2'}, {}), '(zoom_range[0], zoom_range[1], 2)', True, 'import numpy as np\n'), ((137, 19, 137, 28), 'numpy.min', 'np.min', ({(137, 26, 137, 27): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((137, 30, 137, 39), 'numpy.max', 'np.max', ({(137, 37, 137, 38): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((161, 30, 161, 59), 'numpy.dot', 'np.dot', ({(161, 37, 161, 50): 'offset_matrix', (161, 52, 161, 58): 'matrix'}, {}), '(offset_matrix, matrix)', True, 'import numpy as np\n'), ((194, 16, 194, 33), 'numpy.deg2rad', 'np.deg2rad', ({(194, 27, 194, 32): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((201, 23, 203, 44), 'numpy.array', 'np.array', ({(201, 32, 203, 43): '[[1, 0, tx], [0, 1, ty], [0, 0, 1]]'}, {}), '([[1, 0, tx], [0, 1, ty], [0, 0, 1]])', True, 'import numpy as np\n'), ((210, 16, 210, 33), 'numpy.deg2rad', 'np.deg2rad', ({(210, 27, 210, 32): 'shear'}, {}), '(shear)', True, 'import numpy as np\n'), ((220, 22, 222, 43), 'numpy.array', 'np.array', ({(220, 31, 222, 42): '[[zx, 0, 0], [0, zy, 0], [0, 0, 1]]'}, {}), '([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])', True, 'import numpy as np\n'), ((232, 12, 232, 43), 'numpy.rollaxis', 'np.rollaxis', ({(232, 24, 232, 25): 'x', (232, 27, 232, 39): 'channel_axis', (232, 41, 232, 42): '0'}, {}), '(x, channel_axis, 0)', True, 'import numpy as np\n'), ((243, 12, 243, 44), 'numpy.stack', 'np.stack', (), '', True, 'import numpy as np\n'), ((244, 12, 244, 47), 'numpy.rollaxis', 'np.rollaxis', ({(244, 24, 244, 25): 'x', (244, 27, 244, 28): '0', (244, 30, 244, 46): 'channel_axis + 1'}, {}), '(x, 0, channel_axis + 1)', True, 'import numpy as np\n'), ((259, 16, 259, 66), 'numpy.random.uniform', 'np.random.uniform', ({(259, 34, 259, 49): '-rotation_range', (259, 51, 259, 65): 'rotation_range'}, {}), '(-rotation_range, rotation_range)', True, 'import numpy as np\n'), ((274, 16, 274, 60), 'numpy.random.uniform', 'np.random.uniform', ({(274, 34, 274, 46): '-shear_range', (274, 48, 274, 59): 'shear_range'}, {}), '(-shear_range, shear_range)', True, 'import numpy as np\n'), ((289, 17, 289, 67), 'numpy.random.uniform', 'np.random.uniform', ({(289, 35, 289, 48): 'zoom_range[0]', (289, 50, 289, 63): 'zoom_range[1]', (289, 65, 289, 66): '2'}, {}), '(zoom_range[0], zoom_range[1], 2)', True, 'import numpy as np\n'), ((323, 18, 323, 40), 'PIL.Image.open', 'Image.open', ({(323, 29, 323, 39): 'args.image'}, {}), '(args.image)', False, 'from PIL import Image\n'), ((13, 8, 13, 21), 'numpy.asarray', 'np.asarray', ({(13, 19, 13, 20): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((207, 31, 207, 69), 'numpy.dot', 'np.dot', ({(207, 38, 207, 54): 'transform_matrix', (207, 56, 207, 68): 'shift_matrix'}, {}), '(transform_matrix, shift_matrix)', True, 'import numpy as np\n'), ((217, 31, 217, 69), 'numpy.dot', 'np.dot', ({(217, 38, 217, 54): 'transform_matrix', (217, 56, 217, 68): 'shear_matrix'}, {}), '(transform_matrix, shear_matrix)', True, 'import numpy as np\n'), ((226, 31, 226, 68), 'numpy.dot', 'np.dot', ({(226, 38, 226, 54): 'transform_matrix', (226, 56, 226, 67): 'zoom_matrix'}, {}), '(transform_matrix, zoom_matrix)', True, 'import numpy as np\n'), ((236, 26, 242, 22), 'scipy.ndimage.interpolation.affine_transform', 'ndimage.interpolation.affine_transform', (), '', False, 'from scipy import ndimage\n'), ((265, 13, 265, 71), 'numpy.random.uniform', 'np.random.uniform', ({(265, 31, 265, 50): '(-height_shift_range)', (265, 52, 265, 70): 'height_shift_range'}, {}), '(-height_shift_range, height_shift_range)', True, 'import numpy as np\n'), ((269, 13, 269, 69), 'numpy.random.uniform', 'np.random.uniform', ({(269, 31, 269, 49): '(-width_shift_range)', (269, 51, 269, 68): 'width_shift_range'}, {}), '(-width_shift_range, width_shift_range)', True, 'import numpy as np\n'), ((302, 11, 302, 29), 'numpy.random.random', 'np.random.random', ({}, {}), '()', True, 'import numpy as np\n'), ((306, 11, 306, 29), 'numpy.random.random', 'np.random.random', ({}, {}), '()', True, 'import numpy as np\n'), ((138, 42, 138, 82), 'numpy.random.uniform', 'np.random.uniform', ({(138, 60, 138, 70): '(-intensity)', (138, 72, 138, 81): 'intensity'}, {}), '(-intensity, intensity)', True, 'import numpy as np\n'), ((325, 20, 325, 33), 'numpy.uint8', 'np.uint8', ({(325, 29, 325, 32): 'img'}, {}), '(img)', True, 'import numpy as np\n'), ((195, 37, 195, 50), 'numpy.cos', 'np.cos', ({(195, 44, 195, 49): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((196, 37, 196, 50), 'numpy.sin', 'np.sin', ({(196, 44, 196, 49): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((196, 52, 196, 65), 'numpy.cos', 'np.cos', ({(196, 59, 196, 64): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((212, 37, 212, 50), 'numpy.cos', 'np.cos', ({(212, 44, 212, 49): 'shear'}, {}), '(shear)', True, 'import numpy as np\n'), ((195, 53, 195, 66), 'numpy.sin', 'np.sin', ({(195, 60, 195, 65): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((211, 38, 211, 51), 'numpy.sin', 'np.sin', ({(211, 45, 211, 50): 'shear'}, {}), '(shear)', True, 'import numpy as np\n')] |
maestro-hybrid-cloud/keystone | keystone/tests/unit/token/test_provider.py | a597a86b854215835a4d54885daeb161d7b0efb8 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from six.moves import urllib
from keystone.tests import unit
from keystone.token import provider
class TestRandomStrings(unit.BaseTestCase):
def test_strings_are_url_safe(self):
s = provider.random_urlsafe_str()
self.assertEqual(s, urllib.parse.quote_plus(s))
def test_strings_can_be_converted_to_bytes(self):
s = provider.random_urlsafe_str()
self.assertTrue(isinstance(s, six.string_types))
b = provider.random_urlsafe_str_to_bytes(s)
self.assertTrue(isinstance(b, bytes))
| [((22, 12, 22, 41), 'keystone.token.provider.random_urlsafe_str', 'provider.random_urlsafe_str', ({}, {}), '()', False, 'from keystone.token import provider\n'), ((26, 12, 26, 41), 'keystone.token.provider.random_urlsafe_str', 'provider.random_urlsafe_str', ({}, {}), '()', False, 'from keystone.token import provider\n'), ((29, 12, 29, 51), 'keystone.token.provider.random_urlsafe_str_to_bytes', 'provider.random_urlsafe_str_to_bytes', ({(29, 49, 29, 50): 's'}, {}), '(s)', False, 'from keystone.token import provider\n'), ((23, 28, 23, 54), 'six.moves.urllib.parse.quote_plus', 'urllib.parse.quote_plus', ({(23, 52, 23, 53): 's'}, {}), '(s)', False, 'from six.moves import urllib\n')] |
mithro/symbiflow-xc-fasm2bels | fasm2bels/database/connection_db_utils.py | 9ed029558bedca4e726969427dc4e62ecd6d5733 | import functools
def create_maybe_get_wire(conn):
c = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_tile_type_pkey(tile):
c.execute('SELECT pkey, tile_type_pkey FROM phy_tile WHERE name = ?',
(tile, ))
return c.fetchone()
@functools.lru_cache(maxsize=None)
def maybe_get_wire(tile, wire):
phy_tile_pkey, tile_type_pkey = get_tile_type_pkey(tile)
c.execute(
'SELECT pkey FROM wire_in_tile WHERE phy_tile_type_pkey = ? and name = ?',
(tile_type_pkey, wire))
result = c.fetchone()
if result is None:
return None
wire_in_tile_pkey = result[0]
c.execute(
'SELECT pkey FROM wire WHERE phy_tile_pkey = ? AND wire_in_tile_pkey = ?',
(phy_tile_pkey, wire_in_tile_pkey))
return c.fetchone()[0]
return maybe_get_wire
def maybe_add_pip(top, maybe_get_wire, feature):
if feature.value != 1:
return
parts = feature.feature.split('.')
assert len(parts) == 3
sink_wire = maybe_get_wire(parts[0], parts[2])
if sink_wire is None:
return
src_wire = maybe_get_wire(parts[0], parts[1])
if src_wire is None:
return
top.active_pips.add((sink_wire, src_wire))
def get_node_pkey(conn, wire_pkey):
c = conn.cursor()
c.execute("SELECT node_pkey FROM wire WHERE pkey = ?", (wire_pkey, ))
return c.fetchone()[0]
def get_wires_in_node(conn, node_pkey):
c = conn.cursor()
c.execute("SELECT pkey FROM wire WHERE node_pkey = ?", (node_pkey, ))
for row in c.fetchall():
yield row[0]
def get_wire(conn, phy_tile_pkey, wire_in_tile_pkey):
c = conn.cursor()
c.execute(
"SELECT pkey FROM wire WHERE wire_in_tile_pkey = ? AND phy_tile_pkey = ?;",
(
wire_in_tile_pkey,
phy_tile_pkey,
))
return c.fetchone()[0]
def get_tile_type(conn, tile_name):
c = conn.cursor()
c.execute(
"""
SELECT name FROM tile_type WHERE pkey = (
SELECT tile_type_pkey FROM phy_tile WHERE name = ?);""", (tile_name, ))
return c.fetchone()[0]
def get_wire_pkey(conn, tile_name, wire):
c = conn.cursor()
c.execute(
"""
WITH selected_tile(phy_tile_pkey, tile_type_pkey) AS (
SELECT
pkey,
tile_type_pkey
FROM
phy_tile
WHERE
name = ?
)
SELECT
wire.pkey
FROM
wire
WHERE
wire.phy_tile_pkey = (
SELECT
selected_tile.phy_tile_pkey
FROM
selected_tile
)
AND wire.wire_in_tile_pkey = (
SELECT
wire_in_tile.pkey
FROM
wire_in_tile
WHERE
wire_in_tile.name = ?
AND wire_in_tile.phy_tile_type_pkey = (
SELECT
tile_type_pkey
FROM
selected_tile
)
);
""", (tile_name, wire))
results = c.fetchone()
assert results is not None, (tile_name, wire)
return results[0]
| [((7, 5, 7, 38), 'functools.lru_cache', 'functools.lru_cache', (), '', False, 'import functools\n'), ((13, 5, 13, 38), 'functools.lru_cache', 'functools.lru_cache', (), '', False, 'import functools\n')] |
bcgov/ppr-deprecated | ppr-api/src/services/payment_service.py | c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3 | """A module that provides functionality for accessing the Payments API."""
import enum
import http
import logging
import requests
from fastapi import Depends, Header, HTTPException
from fastapi.security.http import HTTPAuthorizationCredentials
import auth.authentication
import config
import schemas.payment
logger = logging.getLogger(__name__)
CORP_TYPE = 'PPR'
class FilingCode(enum.Enum):
"""An enumeration of the filing codes available to PPR."""
SEARCH = 'SERCH'
YEARLY_REGISTRATION = 'FSREG'
INFINITE_REGISTRATION = 'INFRG'
class PaymentService:
"""A service used for interacting with the Payments API."""
auth_header: HTTPAuthorizationCredentials
account_id: str
def __init__(self, auth_header: HTTPAuthorizationCredentials = Depends(auth.authentication.bearer_scheme),
account_id: str = Header(None)):
"""Initialize the repository with the Authorization and Account-Id headers provided in the request."""
self.auth_header = auth_header
self.account_id = account_id
def create_payment(self, filing_code: FilingCode):
"""Submit a payment request and provide the details to the caller."""
request = {
'businessInfo': {'corpType': CORP_TYPE},
'filingInfo': {'filingTypes': [{'filingTypeCode': filing_code.value}]}
}
pay_response = requests.post(
'{}/payment-requests'.format(config.PAY_API_URL), json=request,
headers={
'Authorization': '{} {}'.format(self.auth_header.scheme, self.auth_header.credentials),
'Account-Id': self.account_id
}
)
try:
auth.authentication.check_auth_response(pay_response)
except HTTPException as auth_ex:
logger.error('Create Payment call failed auth with status {}. Response body: {}'.format(
pay_response.status_code, pay_response.text))
raise auth_ex
if not pay_response: # status_code is unsuccessful
logger.error('Create Payment call failed unexpectedly with status {}. Response body: {}'.format(
pay_response.status_code, pay_response.text))
raise HTTPException(status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR)
body = pay_response.json()
return schemas.payment.Payment(id=body['id'], status=body['statusCode'], method=body['paymentMethod'])
| [((16, 9, 16, 36), 'logging.getLogger', 'logging.getLogger', ({(16, 27, 16, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((35, 67, 35, 109), 'fastapi.Depends', 'Depends', ({(35, 75, 35, 108): 'auth.authentication.bearer_scheme'}, {}), '(auth.authentication.bearer_scheme)', False, 'from fastapi import Depends, Header, HTTPException\n'), ((36, 35, 36, 47), 'fastapi.Header', 'Header', ({(36, 42, 36, 46): 'None'}, {}), '(None)', False, 'from fastapi import Depends, Header, HTTPException\n'), ((65, 18, 65, 82), 'fastapi.HTTPException', 'HTTPException', (), '', False, 'from fastapi import Depends, Header, HTTPException\n')] |
kolyasalubov/Lv-639.pythonCore | SmerekaRoman/HW_6/HW 6.3.py | 06f10669a188318884adb00723127465ebdf2907 | def numb_of_char(a):
d = {}
for char in set(a):
d[char] = a.count(char)
return d
a = numb_of_char(str(input("Input the word please: ")))
print(a)
| [] |
jiadaizhao/LeetCode | 0201-0300/0251-Flatten 2D Vector/0251-Flatten 2D Vector.py | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | class Vector2D:
def __init__(self, v: List[List[int]]):
def getIt():
for row in v:
for val in row:
yield val
self.it = iter(getIt())
self.val = next(self.it, None)
def next(self) -> int:
result = self.val
self.val = next(self.it, None)
return result
def hasNext(self) -> bool:
return self.val is not None
# Your Vector2D object will be instantiated and called as such:
# obj = Vector2D(v)
# param_1 = obj.next()
# param_2 = obj.hasNext()
| [] |
jbhayback/reconciliation-manager | logger_decorator.py | 5de10a0ec89e397a4937d1764976c94cde06beee | from datetime import datetime
import inspect
def log_time(msg=None):
def decorator(f):
nonlocal msg
if msg is None:
msg = '{} time spent: '.format(f.__name__)
def inner(*args, **kwargs):
# check if the object has a logger
global logger
if args and hasattr(args[0], 'logger'):
logger = args[0].logger
start = datetime.now()
result = f(*args, **kwargs)
logger.info(
msg + ' {} seconds'.format((datetime.now() - start).total_seconds())
)
return result
return inner
return decorator
def log_params(f):
arg_spec = inspect.getargspec(f).args
has_self = arg_spec and arg_spec[0] == 'self'
def decorator(*args, **kwargs):
logger.info(
'calling {} with args: {}, and kwargs: {}'.format(
f.__name__, args if not has_self else args[1:], kwargs
)
)
return f(*args, **kwargs)
return decorator
| [((29, 15, 29, 36), 'inspect.getargspec', 'inspect.getargspec', ({(29, 34, 29, 35): 'f'}, {}), '(f)', False, 'import inspect\n'), ((16, 20, 16, 34), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((19, 44, 19, 58), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
rog-works/lf3py | lf3py/di/__init__.py | e89937f7aa133ed54d85764f06101ab9abf6b960 | from lf3py.di.di import DI # noqa F401
| [] |
lenarother/django-critical-css | critical/tasks.py | 15c12ea02f7ea049e59efba4d963c35f41f26d78 | import logging
from django.utils.safestring import mark_safe
from django_rq import job
from inline_static.css import transform_css_urls
logger = logging.getLogger(__name__)
@job
def calculate_critical_css(critical_id, original_path):
from .exceptions import CriticalException
from .models import Critical
from .services import calculate_critical_css as service_calculate
logger.info('Task: critical css with id {0} requested.'.format(critical_id))
critical = Critical.objects.filter(id=critical_id).first()
if not critical:
raise CriticalException('There is no Critical object with id {0}'.format(critical_id))
logger.info('Task: {0}, {1}'.format(critical.url, critical.path))
critical.is_pending = True
critical.save(update_fields=['is_pending'])
logger.info('Task: critical css with id {0} pending.'.format(critical_id))
try:
critical_css_raw = service_calculate(critical.url, critical.path)
critical_css = transform_css_urls(original_path, critical.path, critical_css_raw)
except Exception as exc:
critical.is_pending = False
critical.save(update_fields=['is_pending'])
raise CriticalException('Could not calculate critical css') from exc
critical.css = mark_safe(critical_css)
critical.is_pending = False
critical.save()
logger.info('Task: critical css with id {0} saved.'.format(critical_id))
| [((7, 9, 7, 36), 'logging.getLogger', 'logging.getLogger', ({(7, 27, 7, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((34, 19, 34, 42), 'django.utils.safestring.mark_safe', 'mark_safe', ({(34, 29, 34, 41): 'critical_css'}, {}), '(critical_css)', False, 'from django.utils.safestring import mark_safe\n'), ((28, 23, 28, 89), 'inline_static.css.transform_css_urls', 'transform_css_urls', ({(28, 42, 28, 55): 'original_path', (28, 57, 28, 70): 'critical.path', (28, 72, 28, 88): 'critical_css_raw'}, {}), '(original_path, critical.path, critical_css_raw)', False, 'from inline_static.css import transform_css_urls\n')] |
wei2912/bce-simulation | test.py | 65c19051417c871bce4585481eb06c5ba986a96f | #!/usr/bin/env python
# coding=utf-8
"""
This script tests the simulations of the experiments.
"""
import math
from utils import coin_var, needle_var
def main():
needle_var_vals = [
(1.1, 1.0),
(1.4, 1.0),
(2.0, 1.0),
(2.9, 1.0),
(3.3, 1.0),
(5.0, 1.0)
]
print("needle_var:")
for L, D in needle_var_vals:
trials = 1000000
pred_prob = needle_var.predict_prob(length=L, gap_width=D)
pred_hits = pred_prob * trials
hits = needle_var.run_trials(length=L, gap_width=D, trials=trials)
if pred_hits == 0 or pred_hits == trials:
stat = float('nan')
else:
stat = sum([
(hits - pred_hits) ** 2 / pred_hits,
((trials - hits) - (trials - pred_hits)) ** 2 / (trials-pred_hits)
])
print("L = {}, D = {}, expected = {}, observed = {}, stat = {}".format(L, D, pred_hits, hits, stat))
print("coin_var:")
coin_var_vals = [
(1.0, 1.0),
(1.0, 1.2),
(1.0, math.sqrt(2)),
(1.0, 1.5),
(1.0, 1.8),
(1.0, 1.9),
(1.0, 2.0),
(1.0, 3.0),
(1.0, 5.0)
]
for R, D in coin_var_vals:
trials = 100000
pred_prob = coin_var.predict_prob(diameter=2*R, gap_width=D)
pred_hits = pred_prob * trials
hits = coin_var.run_trials(diameter=2*R, gap_width=D, trials=trials)
if pred_hits == 0 or pred_hits == trials:
stat = float('nan')
else:
stat = sum([
(hits - pred_hits) ** 2 / pred_hits,
((trials - hits) - (trials - pred_hits)) ** 2 / (trials-pred_hits)
])
print("R = {}, D = {}, expected = {}, observed = {}, stat = {}".format(R, D, pred_hits, hits, stat))
main()
| [((26, 20, 26, 66), 'utils.needle_var.predict_prob', 'needle_var.predict_prob', (), '', False, 'from utils import coin_var, needle_var\n'), ((28, 15, 28, 74), 'utils.needle_var.run_trials', 'needle_var.run_trials', (), '', False, 'from utils import coin_var, needle_var\n'), ((57, 20, 57, 68), 'utils.coin_var.predict_prob', 'coin_var.predict_prob', (), '', False, 'from utils import coin_var, needle_var\n'), ((59, 15, 59, 76), 'utils.coin_var.run_trials', 'coin_var.run_trials', (), '', False, 'from utils import coin_var, needle_var\n'), ((44, 14, 44, 26), 'math.sqrt', 'math.sqrt', ({(44, 24, 44, 25): '(2)'}, {}), '(2)', False, 'import math\n')] |
fernandozanutto/PyNES | instructions/instructions.py | cb8d589ceb55cd7df0e114e726c6b6bbbc556172 | from addressing import *
from instructions.base_instructions import SetBit, ClearBit
from instructions.generic_instructions import Instruction
from status import Status
# set status instructions
class Sec(SetBit):
identifier_byte = bytes([0x38])
bit = Status.StatusTypes.carry
class Sei(SetBit):
identifier_byte = bytes([0x78])
bit = Status.StatusTypes.interrupt
class Sed(SetBit):
identifier_byte = bytes([0xF8])
bit = Status.StatusTypes.decimal
# clear status instructions
class Cld(ClearBit):
identifier_byte = bytes([0xD8])
bit = Status.StatusTypes.decimal
class Clc(ClearBit):
identifier_byte = bytes([0x18])
bit = Status.StatusTypes.carry
class Clv(ClearBit):
identifier_byte = bytes([0xB8])
bit = Status.StatusTypes.overflow
class Cli(ClearBit):
identifier_byte = bytes([0x58])
bit = Status.StatusTypes.interrupt
class Bit(Instruction):
@classmethod
def get_data(cls, cpu, memory_address, data_bytes) -> Optional[int]:
return cpu.bus.read_memory(memory_address)
@classmethod
def apply_side_effects(cls, cpu, memory_address, value):
and_result = cpu.a_reg & value
cpu.status_reg.bits[Status.StatusTypes.zero] = not and_result
cpu.status_reg.bits[Status.StatusTypes.overflow] = (
value & (1 << 6)) > 0
cpu.status_reg.bits[Status.StatusTypes.negative] = (
value & (1 << 7)) > 0
class BitZeroPage(ZeroPageAddressing, Bit):
identifier_byte = bytes([0x24])
class BitAbsolute(AbsoluteAddressing, Bit):
identifier_byte = bytes([0x2C])
class Brk(ImplicitAddressing, Instruction):
identifier_byte = bytes([0x00])
@classmethod
def get_data(cls, cpu, memory_address, data_bytes) -> Optional[int]:
return super().get_data(cpu, memory_address, data_bytes)
@classmethod
def write(cls, cpu, memory_address, value):
cpu.push_to_stack(cpu.pc_reg + 1, 2)
cpu.push_to_stack(cpu.status_reg.to_int() | (1 << 4), 1)
@classmethod
def apply_side_effects(cls, cpu, memory_address, value):
cpu.status_reg.bits[Status.StatusTypes.interrupt] = 1
cpu.running = False
@classmethod
def get_cycles(cls):
return 7
| [] |
vermouth1992/Leetcode | python/530.minimum-absolute-difference-in-bst.py | 0d7dda52b12f9e01d88fc279243742cd8b4bcfd1 | #
# @lc app=leetcode id=530 lang=python3
#
# [530] Minimum Absolute Difference in BST
#
# https://leetcode.com/problems/minimum-absolute-difference-in-bst/description/
#
# algorithms
# Easy (55.23%)
# Total Accepted: 115.5K
# Total Submissions: 209K
# Testcase Example: '[4,2,6,1,3]'
#
# Given the root of a Binary Search Tree (BST), return the minimum absolute
# difference between the values of any two different nodes in the tree.
#
#
# Example 1:
#
#
# Input: root = [4,2,6,1,3]
# Output: 1
#
#
# Example 2:
#
#
# Input: root = [1,0,48,null,null,12,49]
# Output: 1
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [2, 10^4].
# 0 <= Node.val <= 10^5
#
#
#
# Note: This question is the same as 783:
# https://leetcode.com/problems/minimum-distance-between-bst-nodes/
#
#
# Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def getNodeValues(self, root: TreeNode) -> List[int]:
value = []
self.getNodeValuesHelper(root, value)
return value
def getNodeValuesHelper(self, root: TreeNode, value: List[int]):
if root is None:
return
value.append(root.val)
self.getNodeValuesHelper(root.left, value)
self.getNodeValuesHelper(root.right, value)
def getMinimumDifference(self, root: TreeNode) -> int:
# get all the values and put into a list O(n)
value = self.getNodeValues(root)
# sort the list O(nlogn)
value = sorted(value)
# find the minimum difference between ajacent values O(n)
min_abs_diff = abs(value[0] - value[1])
for i in range(1, len(value) - 1):
diff = abs(value[i] - value[i + 1])
if diff < min_abs_diff:
min_abs_diff = diff
return min_abs_diff
| [] |
abhaikollara/tensorflow | tensorflow/python/eager/remote_cloud_tpu_test.py | 4f96df3659696990cb34d0ad07dc67843c4225a9 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that we can connect to a real Cloud TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import absltest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import remote
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
EXPECTED_DEVICES_PRE_CONNECT = [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:XLA_CPU:0'
]
EXPECTED_DEVICES_AFTER_CONNECT = [
'/job:localhost/replica:0/task:0/device:CPU:0',
'/job:localhost/replica:0/task:0/device:XLA_CPU:0',
'/job:worker/replica:0/task:0/device:CPU:0',
'/job:worker/replica:0/task:0/device:XLA_CPU:0',
'/job:worker/replica:0/task:0/device:TPU_SYSTEM:0',
'/job:worker/replica:0/task:0/device:TPU:0',
'/job:worker/replica:0/task:0/device:TPU:1',
'/job:worker/replica:0/task:0/device:TPU:2',
'/job:worker/replica:0/task:0/device:TPU:3',
'/job:worker/replica:0/task:0/device:TPU:4',
'/job:worker/replica:0/task:0/device:TPU:5',
'/job:worker/replica:0/task:0/device:TPU:6',
'/job:worker/replica:0/task:0/device:TPU:7',
]
class RemoteCloudTPUTest(absltest.TestCase):
"""Test that we can connect to a real Cloud TPU."""
def test_connect(self):
self.assertCountEqual(
EXPECTED_DEVICES_PRE_CONNECT,
context.list_devices())
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project
)
remote.connect_to_cluster(resolver)
self.assertCountEqual(
EXPECTED_DEVICES_AFTER_CONNECT,
context.list_devices())
tpu_strategy_util.initialize_tpu_system(resolver)
if __name__ == '__main__':
absltest.main()
| [((30, 0, 30, 60), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(30, 20, 30, 25): '"""tpu"""', (30, 27, 30, 29): '""""""', (30, 31, 30, 59): '"""Name of TPU to connect to."""'}, {}), "('tpu', '', 'Name of TPU to connect to.')", False, 'from absl import flags\n'), ((31, 0, 31, 69), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(31, 20, 31, 29): '"""project"""', (31, 31, 31, 35): 'None', (31, 37, 31, 68): '"""Name of GCP project with TPU."""'}, {}), "('project', None, 'Name of GCP project with TPU.')", False, 'from absl import flags\n'), ((32, 0, 32, 63), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', ({(32, 20, 32, 26): '"""zone"""', (32, 28, 32, 32): 'None', (32, 34, 32, 62): '"""Name of GCP zone with TPU."""'}, {}), "('zone', None, 'Name of GCP zone with TPU.')", False, 'from absl import flags\n'), ((75, 2, 75, 17), 'absl.testing.absltest.main', 'absltest.main', ({}, {}), '()', False, 'from absl.testing import absltest\n'), ((63, 15, 65, 5), 'tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver.TPUClusterResolver', 'tpu_cluster_resolver.TPUClusterResolver', (), '', False, 'from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver\n'), ((66, 4, 66, 39), 'tensorflow.python.eager.remote.connect_to_cluster', 'remote.connect_to_cluster', ({(66, 30, 66, 38): 'resolver'}, {}), '(resolver)', False, 'from tensorflow.python.eager import remote\n'), ((72, 4, 72, 53), 'tensorflow.python.tpu.tpu_strategy_util.initialize_tpu_system', 'tpu_strategy_util.initialize_tpu_system', ({(72, 44, 72, 52): 'resolver'}, {}), '(resolver)', False, 'from tensorflow.python.tpu import tpu_strategy_util\n'), ((61, 8, 61, 30), 'tensorflow.python.eager.context.list_devices', 'context.list_devices', ({}, {}), '()', False, 'from tensorflow.python.eager import context\n'), ((70, 8, 70, 30), 'tensorflow.python.eager.context.list_devices', 'context.list_devices', ({}, {}), '()', False, 'from tensorflow.python.eager import context\n')] |
gbtn/bitcoin-sv-gbtn | test/functional/bsv-blocksize-params.py | 8b09d1aa072da819fb3309b0be85dae0f1ac9549 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that the blockmaxsize and excessiveblocksize parameters are also
settable via the bitcoin.conf file.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.cdefs import (ONE_MEGABYTE)
import os
class BSVBlockSizeParams(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.maxminedblocksize = 4 * ONE_MEGABYTE
self.maxblocksize = 16 * ONE_MEGABYTE
def setup_chain(self):
super().setup_chain()
with open(os.path.join(self.options.tmpdir + "/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write("blockmaxsize=" + str(self.maxminedblocksize) + "\n")
f.write("excessiveblocksize=" + str(self.maxblocksize) + "\n")
def add_options(self, parser):
super().add_options(parser)
def run_test(self):
gires = self.nodes[0].getinfo()
assert_equal(gires["maxblocksize"], self.maxblocksize)
assert_equal(gires["maxminedblocksize"], self.maxminedblocksize)
if __name__ == '__main__':
BSVBlockSizeParams().main()
| [((35, 8, 35, 62), 'test_framework.util.assert_equal', 'assert_equal', ({(35, 21, 35, 42): "gires['maxblocksize']", (35, 44, 35, 61): 'self.maxblocksize'}, {}), "(gires['maxblocksize'], self.maxblocksize)", False, 'from test_framework.util import assert_equal, assert_raises_rpc_error\n'), ((36, 8, 36, 72), 'test_framework.util.assert_equal', 'assert_equal', ({(36, 21, 36, 47): "gires['maxminedblocksize']", (36, 49, 36, 71): 'self.maxminedblocksize'}, {}), "(gires['maxminedblocksize'], self.maxminedblocksize)", False, 'from test_framework.util import assert_equal, assert_raises_rpc_error\n'), ((26, 18, 26, 78), 'os.path.join', 'os.path.join', ({(26, 31, 26, 61): "(self.options.tmpdir + '/node0')", (26, 63, 26, 77): '"""bitcoin.conf"""'}, {}), "(self.options.tmpdir + '/node0', 'bitcoin.conf')", False, 'import os\n')] |
headlessme/yotta | yotta/test/cli/outdated.py | 947ab074b629c8f18ca91ab84ebaa29096b011c6 | #!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
# internal modules:
from . import util
from . import cli
Test_Outdated = {
'module.json':'''{
"name": "test-outdated",
"version": "0.0.0",
"description": "Test yotta outdated",
"author": "James Crosby <[email protected]>",
"license": "Apache-2.0",
"dependencies":{
"test-testing-dummy": "*"
}
}''',
'source/foo.c':'''#include "stdio.h"
int foo(){
printf("foo!\\n");
return 7;
}''',
# test-testing-dummy v0.0.1 (a newer version is available from the registry,
# and will be installed by yt up)
'yotta_modules/test-testing-dummy/module.json':'''{
"name": "test-testing-dummy",
"version": "0.0.1",
"description": "Test yotta's compilation of tests.",
"author": "James Crosby <[email protected]>",
"license": "Apache-2.0"
}
'''
}
class TestCLIOutdated(unittest.TestCase):
def test_outdated(self):
path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path)
self.assertNotEqual(statuscode, 0)
self.assertIn('test-testing-dummy', stdout + stderr)
util.rmRf(path)
def test_notOutdated(self):
path = util.writeTestFiles(Test_Outdated, True)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'up'], cwd=path)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path)
self.assertEqual(statuscode, 0)
self.assertNotIn('test-testing-dummy', stdout + stderr)
util.rmRf(path)
| [] |
Starcross/django-geoposition | geoposition/tests/urls.py | b2b3af2a1b73e0ce99e76f19b7f63f1a91f3e093 | from django.urls import path, include
from django.contrib import admin
from example.views import poi_list
admin.autodiscover()
urlpatterns = [
path('', poi_list),
path('admin/', admin.site.urls),
]
| [((5, 0, 5, 20), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ({}, {}), '()', False, 'from django.contrib import admin\n'), ((8, 4, 8, 22), 'django.urls.path', 'path', ({(8, 9, 8, 11): '""""""', (8, 13, 8, 21): 'poi_list'}, {}), "('', poi_list)", False, 'from django.urls import path, include\n'), ((9, 4, 9, 35), 'django.urls.path', 'path', ({(9, 9, 9, 17): '"""admin/"""', (9, 19, 9, 34): 'admin.site.urls'}, {}), "('admin/', admin.site.urls)", False, 'from django.urls import path, include\n')] |
Allen1218/Python_Project_Interesting | A_Stocker/Stocker.py | 55d5e58e70e21d45c4bb9dc4d4c219f3a8385834 | import threading
import tushare as ts
import pandas as pd
import datetime
STOCK = {#'002594':[1,170.15], ## 比亚迪 / 几手,成本价
'601012':[11,99.9], ## 隆基股份
'002340':[12,8.72], ## 格林美
'603259':[1,141.7], ## 药明康德
'002346':[10,10.68], ## 柘中股份
#'600438':[9,42.96], ## 通威股份
#'002475':[3,59.51], ## 立讯精密
#'603308':[1,33.49], ## 应流股份
#'002415': [3, 66.40], ## 海康威视
# '600559':[3,35.3], ## 老白干
# '601100':[1, 114.5], ## 恒立液压
# '603466':[6, 22.40] ## 风语筑
}
TimerNum = 20.0 # s
Total = 0
# #rodo
def get_all_price():
'''process all stock'''
stockCode = list(STOCK.keys())
df = ts.get_realtime_quotes(stockCode)
lp = list(STOCK.values())
stockNum = []
stockCostPrice = []
for i in range(len(lp)):
stockNum.append(lp[i][0])
stockCostPrice.append(lp[i][1])
df['num'] = stockNum
df['stockCostPrice'] = stockCostPrice
# 处理
# profit and lost ratio 盈亏率
plRatio = round((df['price'].astype(float) / df['stockCostPrice'] - 1)*100,2)
# profit and lost 盈亏
df['plRatio'] = plRatio
df['stockNum'] = stockNum
pl = round(df['plRatio'].astype(float) * df['stockNum'] * df['stockCostPrice'].astype(float),2)
df['pl'] = pl
# 当日涨幅 Rise and fall
currentRF = round((df['price'].astype(float) / df['pre_close'].astype(float) - 1)*100,2)
df['currentRF'] = currentRF
df1 = df[[ 'open', 'price', 'stockCostPrice', 'plRatio', 'num','pl', 'currentRF','name']]
pd.set_option('display.unicode.ambiguous_as_wide', True)
pd.set_option('display.unicode.east_asian_width', True)
pd.set_option('display.width', 180) # 设置打印宽度(**重要**)
pd.set_option('display.max_columns', 1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 1000)
sss = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f)")[:-4]
print('\n')
print("----------------" + sss +"------------------")
print(df1)
sum_int = round(df['pl'].sum(),2)
print("total profit and lost is " + sum_int.astype(str))
print('\n')
# df.to_csv('stock_data.csv', encoding='utf_8_sig', index=None)
global timer
timer = threading.Timer(TimerNum, get_all_price, [])
timer.start()
if __name__ == '__main__':
print(STOCK)
get_all_price()
timer = threading.Timer(TimerNum, get_all_price, [])
timer.start()
| [((25, 9, 25, 42), 'tushare.get_realtime_quotes', 'ts.get_realtime_quotes', ({(25, 32, 25, 41): 'stockCode'}, {}), '(stockCode)', True, 'import tushare as ts\n'), ((52, 4, 52, 60), 'pandas.set_option', 'pd.set_option', ({(52, 18, 52, 53): '"""display.unicode.ambiguous_as_wide"""', (52, 55, 52, 59): '(True)'}, {}), "('display.unicode.ambiguous_as_wide', True)", True, 'import pandas as pd\n'), ((53, 4, 53, 59), 'pandas.set_option', 'pd.set_option', ({(53, 18, 53, 52): '"""display.unicode.east_asian_width"""', (53, 54, 53, 58): '(True)'}, {}), "('display.unicode.east_asian_width', True)", True, 'import pandas as pd\n'), ((54, 4, 54, 39), 'pandas.set_option', 'pd.set_option', ({(54, 18, 54, 33): '"""display.width"""', (54, 35, 54, 38): '(180)'}, {}), "('display.width', 180)", True, 'import pandas as pd\n'), ((55, 4, 55, 46), 'pandas.set_option', 'pd.set_option', ({(55, 18, 55, 39): '"""display.max_columns"""', (55, 41, 55, 45): '(1000)'}, {}), "('display.max_columns', 1000)", True, 'import pandas as pd\n'), ((56, 4, 56, 40), 'pandas.set_option', 'pd.set_option', ({(56, 18, 56, 33): '"""display.width"""', (56, 35, 56, 39): '(1000)'}, {}), "('display.width', 1000)", True, 'import pandas as pd\n'), ((57, 4, 57, 47), 'pandas.set_option', 'pd.set_option', ({(57, 18, 57, 40): '"""display.max_colwidth"""', (57, 42, 57, 46): '(1000)'}, {}), "('display.max_colwidth', 1000)", True, 'import pandas as pd\n'), ((71, 12, 71, 56), 'threading.Timer', 'threading.Timer', ({(71, 28, 71, 36): 'TimerNum', (71, 38, 71, 51): 'get_all_price', (71, 53, 71, 55): '[]'}, {}), '(TimerNum, get_all_price, [])', False, 'import threading\n'), ((78, 12, 78, 56), 'threading.Timer', 'threading.Timer', ({(78, 28, 78, 36): 'TimerNum', (78, 38, 78, 51): 'get_all_price', (78, 53, 78, 55): '[]'}, {}), '(TimerNum, get_all_price, [])', False, 'import threading\n'), ((58, 10, 58, 33), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n')] |
mkazin/StatementRenamer | tests/extractors/test_etrade.py | ef03c71f0e627a15a4bba08e45bfa90ecacd28fc | from datetime import datetime
from statement_renamer.extractors.etrade import ETradeDateExtractor as EXTRACTOR_UNDER_TEST
from statement_renamer.extractors.factory import ExtractorFactory
TESTDATA = (
"""
PAGE 1 OF 6 February 1, 2019 - March 31, 2019AccountNumber:####-####AccountType:ROTH IRA
PAGE 5 OF 6Account Number: ####-####Statement Period : February 1, 2019 - March 31, 2019Account Type
TolearnmoreabouttheRSDAProgram,pleasereviewyourRSDAProgramCustomerAgreement,visitwww.etrade.com,orcallusat1-800-387-2331
"""
)
def test_monthly_statement():
extractor = EXTRACTOR_UNDER_TEST()
data = extractor.extract(TESTDATA)
new_name = extractor.rename(data)
assert data.get_start_date() == datetime(2019, 2, 1)
assert data.get_end_date() == datetime(2019, 3, 31)
assert new_name == '2019-03 E-Trade Statement.pdf'
def test_factory():
extractor = ExtractorFactory.get_matching_extractor(TESTDATA)
assert isinstance(extractor, EXTRACTOR_UNDER_TEST)
| [((17, 16, 17, 38), 'statement_renamer.extractors.etrade.ETradeDateExtractor', 'EXTRACTOR_UNDER_TEST', ({}, {}), '()', True, 'from statement_renamer.extractors.etrade import ETradeDateExtractor as EXTRACTOR_UNDER_TEST\n'), ((28, 16, 28, 65), 'statement_renamer.extractors.factory.ExtractorFactory.get_matching_extractor', 'ExtractorFactory.get_matching_extractor', ({(28, 56, 28, 64): 'TESTDATA'}, {}), '(TESTDATA)', False, 'from statement_renamer.extractors.factory import ExtractorFactory\n'), ((21, 36, 21, 56), 'datetime.datetime', 'datetime', ({(21, 45, 21, 49): '(2019)', (21, 51, 21, 52): '(2)', (21, 54, 21, 55): '(1)'}, {}), '(2019, 2, 1)', False, 'from datetime import datetime\n'), ((22, 34, 22, 55), 'datetime.datetime', 'datetime', ({(22, 43, 22, 47): '(2019)', (22, 49, 22, 50): '(3)', (22, 52, 22, 54): '(31)'}, {}), '(2019, 3, 31)', False, 'from datetime import datetime\n')] |
M3nin0/supreme-broccoli | Estrutura_Decisao/who.py | 186c1ea3b839ba3139f9301660dec8fbd27a162e | prod1 = float(input("Insira o valor do produto A: "))
prod2 = float(input("Insira o valor do produto B: "))
prod3 = float(input("Insira o valor do produto C: "))
if prod1 < prod2 and prod1 < prod3:
print ("Escolha o produto A é o mais barato")
elif prod2 < prod1 and prod2 < prod3:
print ("Escolha o produto B é o mais barato")
elif prod3 < prod1 and prod3 < prod2:
print ("Escolha o produto C é o mais barato")
| [] |
Lube-Project/ProgettoLube | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/train/experimental/__init__.py | cbf33971e2c2e865783ec1a2302625539186a338 | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.train.experimental namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.training.experimental.loss_scale import DynamicLossScale
from tensorflow.python.training.experimental.loss_scale import FixedLossScale
from tensorflow.python.training.experimental.loss_scale import LossScale
from tensorflow.python.training.experimental.mixed_precision import disable_mixed_precision_graph_rewrite
from tensorflow.python.training.experimental.mixed_precision import enable_mixed_precision_graph_rewrite
from tensorflow.python.training.tracking.python_state import PythonState
del _print_function
| [] |
BenjaminGabet/SciDataTool | SciDataTool/Methods/VectorField/plot_3D_Data.py | 7994441de4c54921d43750cacd8df761ba4bd421 | def plot_3D_Data(
self,
*arg_list,
is_norm=False,
unit="SI",
component_list=None,
save_path=None,
x_min=None,
x_max=None,
y_min=None,
y_max=None,
z_min=None,
z_max=None,
z_range=None,
is_auto_ticks=True,
is_auto_range=False,
is_2D_view=False,
is_same_size=False,
N_stem=100,
fig=None,
ax=None,
is_show_fig=None,
is_logscale_x=False,
is_logscale_y=False,
is_logscale_z=False,
thresh=0.02,
is_switch_axes=False,
colormap="RdBu_r",
win_title=None,
font_name="arial",
font_size_title=12,
font_size_label=10,
font_size_legend=8,
):
"""Plots a field as a function of time
Parameters
----------
self : Output
an Output object
Data_str : str
name of the Data Object to plot (e.g. "mag.Br")
*arg_list : list of str
arguments to specify which axes to plot
is_norm : bool
boolean indicating if the field must be normalized
unit : str
unit in which to plot the field
save_path : str
full path including folder, name and extension of the file to save if save_path is not None
x_min : float
minimum value for the x-axis
x_max : float
maximum value for the x-axis
y_min : float
minimum value for the y-axis
y_max : float
maximum value for the y-axis
z_min : float
minimum value for the z-axis
z_max : float
maximum value for the z-axis
is_auto_ticks : bool
in fft, adjust ticks to freqs (deactivate if too close)
is_auto_range : bool
in fft, display up to 1% of max
is_2D_view : bool
True to plot Data in xy plane and put z as colormap
is_same_size : bool
True to have all color blocks with same size in 2D view
N_stem : int
number of harmonics to plot (only for stem plots)
fig : Matplotlib.figure.Figure
existing figure to use if None create a new one
ax : Matplotlib.axes.Axes object
ax on which to plot the data
is_show_fig : bool
True to show figure after plot
is_logscale_x : bool
boolean indicating if the x-axis must be set in logarithmic scale
is_logscale_y : bool
boolean indicating if the y-axis must be set in logarithmic scale
is_logscale_z : bool
boolean indicating if the z-axis must be set in logarithmic scale
thresh : float
threshold for automatic fft ticks
is_switch_axes : bool
to switch x and y axes
"""
# Call the plot on each component
if component_list is None: # default: extract all components
component_list = self.components.keys()
for i, comp in enumerate(component_list):
if save_path is not None and len(component_list) > 1:
save_path_comp = (
save_path.split(".")[0] + "_" + comp + "." + save_path.split(".")[1]
)
else:
save_path_comp = save_path
self.components[comp].plot_3D_Data(
arg_list,
is_norm=is_norm,
unit=unit,
save_path=save_path_comp,
x_min=x_min,
x_max=x_max,
y_min=y_min,
y_max=y_max,
z_min=z_min,
z_max=z_max,
colormap=colormap,
is_auto_ticks=is_auto_ticks,
is_auto_range=is_auto_range,
is_2D_view=is_2D_view,
is_same_size=is_same_size,
N_stem=N_stem,
fig=fig,
ax=ax,
is_show_fig=is_show_fig,
is_logscale_x=is_logscale_x,
is_logscale_y=is_logscale_y,
is_logscale_z=is_logscale_z,
thresh=thresh,
is_switch_axes=is_switch_axes,
win_title=win_title,
font_name=font_name,
font_size_title=font_size_title,
font_size_label=font_size_label,
font_size_legend=font_size_legend,
)
| [] |
obilaniu/orion | tests/unittests/plotting/test_plotly_backend.py | bc886daf791d66490b59e43657f6f6db45d34ea8 | """Collection of tests for :mod:`orion.plotting.backend_plotly`."""
import copy
import numpy
import pandas
import plotly
import pytest
import orion.client
from orion.analysis.partial_dependency_utils import partial_dependency_grid
from orion.core.worker.experiment import Experiment
from orion.plotting.base import (
lpi,
parallel_coordinates,
partial_dependencies,
rankings,
regret,
regrets,
)
from orion.testing import create_experiment
from orion.testing.plotting import (
assert_lpi_plot,
assert_parallel_coordinates_plot,
assert_partial_dependencies_plot,
assert_rankings_plot,
assert_regret_plot,
assert_regrets_plot,
)
config = dict(
name="experiment-name",
space={"x": "uniform(0, 200)"},
metadata={
"user": "test-user",
"orion_version": "XYZ",
"VCS": {
"type": "git",
"is_dirty": False,
"HEAD_sha": "test",
"active_branch": None,
"diff_sha": "diff",
},
},
version=1,
pool_size=1,
max_trials=10,
working_dir="",
algorithms={"random": {"seed": 1}},
producer={"strategy": "NoParallelStrategy"},
)
trial_config = {
"experiment": 0,
"status": "completed",
"worker": None,
"start_time": None,
"end_time": None,
"heartbeat": None,
"results": [],
"params": [],
}
def mock_space(x="uniform(0, 6)", y="uniform(0, 3)", **kwargs):
"""Build a mocked space"""
mocked_config = copy.deepcopy(config)
mocked_config["space"] = {"x": x}
if y is not None:
mocked_config["space"]["y"] = y
mocked_config["space"].update(kwargs)
return mocked_config
def mock_experiment(
monkeypatch, ids=None, x=None, y=None, z=None, objectives=None, status=None
):
"""Mock experiment to_pandas to return given data (or default one)"""
if ids is None:
ids = ["a", "b", "c", "d"]
if x is None:
x = [0, 1, 2, 4]
if y is None:
y = [3, 2, 0, 1]
if objectives is None:
objectives = [0.1, 0.2, 0.3, 0.5]
if status is None:
status = ["completed", "completed", "completed", "completed"]
data = {
"id": ids,
"x": x,
"objective": objectives,
"status": status,
"suggested": ids,
}
if not isinstance(y, str):
data["y"] = y
if z is not None:
data["z"] = z
def to_pandas(self, with_evc_tree=False):
return pandas.DataFrame(data=data)
monkeypatch.setattr(Experiment, "to_pandas", to_pandas)
def mock_experiment_with_random_to_pandas(monkeypatch, status=None, unbalanced=False):
def to_pandas(self, with_evc_tree=False):
if unbalanced:
N = numpy.random.randint(5, 15)
elif status is not None:
N = len(status)
else:
N = 10
ids = numpy.arange(N)
x = numpy.random.normal(0, 0.1, size=N)
y = numpy.random.normal(0, 0.1, size=N)
objectives = numpy.random.normal(0, 0.1, size=N)
if status is None:
exp_status = ["completed"] * N
else:
exp_status = status
data = pandas.DataFrame(
data={
"id": ids,
"x": x,
"y": y,
"objective": objectives,
"status": exp_status,
"suggested": ids,
}
)
return data
monkeypatch.setattr(Experiment, "to_pandas", to_pandas)
def mock_model():
"""Return a mocked regressor which just predict iterated integers"""
class Model:
"""Mocked Regressor"""
def __init__(self):
self.i = 0
def predict(self, data):
"""Returns counting of predictions requested."""
data = numpy.arange(data.shape[0]) + self.i
self.i += data.shape[0]
return data # + numpy.random.normal(0, self.i, size=data.shape[0])
return Model()
def mock_train_regressor(monkeypatch, assert_model=None, assert_model_kwargs=None):
"""Mock the train_regressor to return the mocked regressor instead"""
def train_regressor(model, data, **kwargs):
"""Return the mocked model, and then model argument if requested"""
if assert_model:
assert model == assert_model
if assert_model_kwargs:
assert kwargs == assert_model_kwargs
return mock_model()
monkeypatch.setattr(
"orion.analysis.partial_dependency_utils.train_regressor", train_regressor
)
@pytest.mark.usefixtures("version_XYZ")
class TestLPI:
"""Tests the ``lpi()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
lpi(None)
def test_returns_plotly_object(self):
"""Tests that the plotly backend returns a plotly object"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self, monkeypatch):
"""Tests the layout of the plot"""
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
df = experiment.to_pandas()
assert df["x"].tolist() == [0, 1, 2, 4]
assert df["y"].tolist() == [3, 2, 0, 1]
assert df["objective"].tolist() == [0.1, 0.2, 0.3, 0.5]
assert_lpi_plot(plot, dims=["x", "y"])
def test_experiment_worker_as_parameter(self, monkeypatch):
"""Tests that ``Experiment`` is a valid parameter"""
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
experiment,
_,
):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y"])
def test_ignore_uncompleted_statuses(self, monkeypatch):
"""Tests that uncompleted statuses are ignored"""
config = mock_space()
mock_experiment(
monkeypatch,
ids="abcdefgh",
x=[0, 0, 0, 1, 0, 2, 0, 3],
y=[1, 0, 0, 2, 0, 0, 0, 3],
objectives=[0.1, None, None, 0.2, None, 0.3, None, 0.5],
status=[
"completed",
"new",
"reserved",
"completed",
"broken",
"completed",
"interrupted",
"completed",
],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment)
assert_lpi_plot(plot, dims=["x", "y"])
def test_multidim(self, monkeypatch):
"""Tests that dimensions with shape > 1 are flattened properly"""
config = mock_space(y="uniform(0, 3, shape=2)")
mock_experiment(monkeypatch, y=[[3, 3], [2, 3], [1, 2], [0, 3]])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y[0]", "y[1]"])
def test_fidelity(self, monkeypatch):
"""Tests that fidelity is supported"""
config = mock_space(y="fidelity(1, 200, base=3)")
mock_experiment(monkeypatch, y=[1, 3 ** 2, 1, 3 ** 4])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y"])
def test_categorical(self, monkeypatch):
"""Tests that categorical is supported"""
config = mock_space(y='choices(["a", "b", "c"])')
mock_experiment(monkeypatch, y=["c", "c", "a", "b"])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y"])
def test_categorical_multidim(self, monkeypatch):
"""Tests that multidim categorical is supported"""
config = mock_space(y='choices(["a", "b", "c"], shape=3)')
mock_experiment(
monkeypatch,
y=[["c", "b", "a"], ["c", "a", "c"], ["a", "b", "a"], ["c", "b", "b"]],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = lpi(experiment, model_kwargs=dict(random_state=1))
assert_lpi_plot(plot, dims=["x", "y[0]", "y[1]", "y[2]"])
@pytest.mark.usefixtures("version_XYZ")
class TestParallelCoordinates:
"""Tests the ``parallel_coordinates()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
parallel_coordinates(None)
def test_returns_plotly_object(self):
"""Tests that the plotly backend returns a plotly object"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = parallel_coordinates(experiment)
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self):
"""Tests the layout of the plot"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["x", "loss"])
def test_experiment_worker_as_parameter(self):
"""Tests that ``Experiment`` is a valid parameter"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
experiment,
_,
):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["x", "loss"])
def test_ignore_uncompleted_statuses(self):
"""Tests that uncompleted statuses are ignored"""
with create_experiment(config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["x", "loss"])
def test_unsupported_order_key(self):
"""Tests that unsupported order keys are rejected"""
with create_experiment(config, trial_config) as (_, _, experiment):
with pytest.raises(ValueError):
parallel_coordinates(experiment, order=["unsupported"])
def test_order_columns(self):
"""Tests that columns are sorted according to ``order``"""
multidim_config = copy.deepcopy(config)
for k in "yzutv":
multidim_config["space"][k] = "uniform(0, 200)"
with create_experiment(multidim_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment, order="vzyx")
assert_parallel_coordinates_plot(plot, order=["v", "z", "y", "x", "loss"])
def test_multidim(self):
"""Tests that dimensions with shape > 1 are flattened properly"""
multidim_config = copy.deepcopy(config)
multidim_config["space"]["y"] = "uniform(0, 200, shape=4)"
with create_experiment(multidim_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(
plot, order=["x", "y[0]", "y[1]", "y[2]", "y[3]", "loss"]
)
def test_fidelity(self):
"""Tests that fidelity is set to first column by default"""
fidelity_config = copy.deepcopy(config)
fidelity_config["space"]["z"] = "fidelity(1, 200, base=3)"
with create_experiment(fidelity_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["z", "x", "loss"])
def test_categorical(self):
"""Tests that categorical is supported"""
categorical_config = copy.deepcopy(config)
categorical_config["space"]["z"] = 'choices(["a", "b", "c"])'
with create_experiment(categorical_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(plot, order=["x", "z", "loss"])
def test_categorical_multidim(self):
"""Tests that multidim categorical is supported"""
categorical_config = copy.deepcopy(config)
categorical_config["space"]["z"] = 'choices(["a", "b", "c"], shape=3)'
with create_experiment(categorical_config, trial_config) as (_, _, experiment):
plot = parallel_coordinates(experiment)
assert_parallel_coordinates_plot(
plot, order=["x", "z[0]", "z[1]", "z[2]", "loss"]
)
@pytest.mark.usefixtures("version_XYZ")
class TestPartialDependencies:
"""Tests the ``partial_dependencies()`` method provided by the plotly backend"""
def test_returns_plotly_object(self, monkeypatch):
"""Tests that the plotly backend returns a plotly object"""
mock_train_regressor(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self, monkeypatch):
"""Tests the layout of the plot"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
df = experiment.to_pandas()
assert df["x"].tolist() == [0, 1, 2, 4]
assert df["y"].tolist() == [3, 2, 0, 1]
assert df["objective"].tolist() == [0.1, 0.2, 0.3, 0.5]
assert_partial_dependencies_plot(plot, dims=["x", "y"])
def test_ignore_uncompleted_statuses(self, monkeypatch):
"""Tests that uncompleted statuses are ignored"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(
monkeypatch,
ids="abcdefgh",
x=[0, 0, 0, 1, 0, 2, 0, 3],
y=[1, 0, 0, 2, 0, 0, 0, 3],
objectives=[0.1, None, None, 0.2, None, 0.3, None, 0.5],
status=[
"completed",
"new",
"reserved",
"completed",
"broken",
"completed",
"interrupted",
"completed",
],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(experiment, n_grid_points=5)
assert_partial_dependencies_plot(plot, dims=["x", "y"])
def test_multidim(self, monkeypatch):
"""Tests that dimensions with shape > 1 are flattened properly"""
mock_train_regressor(monkeypatch)
config = mock_space(y="uniform(0, 3, shape=2)")
mock_experiment(monkeypatch, y=[[3, 3], [2, 3], [1, 2], [0, 3]])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(plot, dims=["x", "y[0]", "y[1]"])
def test_fidelity(self, monkeypatch):
"""Tests that fidelity is supported"""
mock_train_regressor(monkeypatch)
config = mock_space(y="fidelity(1, 200, base=3)")
mock_experiment(monkeypatch, y=[1, 3 ** 2, 1, 3 ** 4])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(plot, dims=["x", "y"], log_dims=["y"])
def test_categorical(self, monkeypatch):
"""Tests that categorical is supported"""
mock_train_regressor(monkeypatch)
config = mock_space(y='choices(["a", "b", "c"])')
mock_experiment(monkeypatch, y=["c", "c", "a", "b"])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
# There is only 3 categories, so test must be adjusted accordingly.
assert_partial_dependencies_plot(
plot, dims=["x", "y"], n_grid_points={"x": 5, "y": 3}
)
def test_categorical_multidim(self, monkeypatch):
"""Tests that multidim categorical is supported"""
mock_train_regressor(monkeypatch)
config = mock_space(y='choices(["a", "b", "c"], shape=3)')
mock_experiment(
monkeypatch,
y=[["c", "b", "a"], ["c", "a", "c"], ["a", "b", "a"], ["c", "b", "b"]],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(
plot,
dims=["x", "y[0]", "y[1]", "y[2]"],
n_grid_points={"x": 5, "y[0]": 3, "y[1]": 3, "y[2]": 3},
)
def test_logarithmic_scales_first(self, monkeypatch):
"""Test that log dims are turn to log scale
Test first dim specifically because special xaxis name for first dim.
"""
mock_train_regressor(monkeypatch)
config = mock_space(x="loguniform(0.001, 1)", z="uniform(0, 1)")
mock_experiment(monkeypatch, x=[0.001, 0.1, 0.01, 1], z=[0, 0.1, 0.2, 0.5])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(
plot, dims=["x", "y", "z"], n_grid_points=5, log_dims=["x"]
)
def test_logarithmic_scales_any_dim(self, monkeypatch):
"""Test that log dims are turn to log scale"""
mock_train_regressor(monkeypatch)
config = mock_space(y="loguniform(0.001, 1)", z="uniform(0, 1)")
mock_experiment(monkeypatch, y=[0.001, 0.1, 0.01, 1], z=[0, 0.1, 0.2, 0.5])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(
plot, dims=["x", "y", "z"], n_grid_points=5, log_dims=["y"]
)
def test_int_logarithmic_scales(self, monkeypatch):
"""Test that int log dims are turn to log scale"""
mock_train_regressor(monkeypatch)
config = mock_space(y="loguniform(1, 1000, discrete=True)", z="uniform(0, 1)")
mock_experiment(monkeypatch, y=[1, 10, 100, 1000], z=[0, 0.1, 0.2, 0.5])
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(
plot, dims=["x", "y", "z"], n_grid_points=5, log_dims=["y"]
)
def test_one_param(self, monkeypatch):
"""Test ploting a space with only 1 dim"""
mock_train_regressor(monkeypatch)
config = mock_space(y=None)
mock_experiment(monkeypatch, y="drop")
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment, n_grid_points=5, model_kwargs=dict(random_state=1)
)
assert_partial_dependencies_plot(plot, dims=["x"], n_grid_points=5)
def test_select_params(self, monkeypatch):
"""Test selecting subset"""
mock_train_regressor(monkeypatch)
config = mock_space(z="uniform(0, 1)")
mock_experiment(monkeypatch, z=[0, 0.1, 0.2, 0.5])
for params in [["x"], ["x", "y"], ["y", "z"]]:
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
params=params,
n_grid_points=5,
model_kwargs=dict(random_state=1),
)
assert_partial_dependencies_plot(plot, dims=params, n_grid_points=5)
def test_custom_smoothing(self, monkeypatch):
"""Test changing smoothing value"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=5,
model_kwargs=dict(random_state=1),
smoothing=1.2,
)
with pytest.raises(AssertionError):
assert_partial_dependencies_plot(plot, dims=["x", "y"], n_grid_points=5)
assert_partial_dependencies_plot(
plot, dims=["x", "y"], n_grid_points=5, smoothing=1.2
)
def test_custom_n_grid_points(self, monkeypatch):
"""Test changing n_grid_points value"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=10,
model_kwargs=dict(random_state=1),
)
with pytest.raises(AssertionError):
assert_partial_dependencies_plot(plot, dims=["x", "y"], n_grid_points=5)
assert_partial_dependencies_plot(plot, dims=["x", "y"], n_grid_points=10)
def test_custom_n_samples(self, monkeypatch):
"""Test changing n_samples value"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
PARAMS = ["x", "y"]
N_SAMPLES = numpy.random.randint(20, 50)
def mock_partial_dependency_grid(space, model, params, samples, n_points):
print(samples)
assert samples.shape == (N_SAMPLES, len(PARAMS))
return partial_dependency_grid(space, model, params, samples, n_points)
monkeypatch.setattr(
"orion.analysis.partial_dependency_utils.partial_dependency_grid",
mock_partial_dependency_grid,
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=10,
model_kwargs=dict(random_state=1),
n_samples=N_SAMPLES,
)
def test_custom_colorscale(self, monkeypatch):
"""Test changing colorscale"""
mock_train_regressor(monkeypatch)
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=5,
colorscale="Viridis",
model_kwargs=dict(random_state=1),
)
with pytest.raises(AssertionError):
assert_partial_dependencies_plot(
plot, dims=["x", "y"], n_grid_points=5, custom_colorscale=False
)
assert_partial_dependencies_plot(
plot, dims=["x", "y"], n_grid_points=5, custom_colorscale=True
)
def test_custom_model(self, monkeypatch):
"""Test changing type of regression model"""
mock_train_regressor(monkeypatch, assert_model="BaggingRegressor")
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=5,
model="BaggingRegressor",
model_kwargs=dict(random_state=1),
)
def test_custom_model_kwargs(self, monkeypatch):
"""Test changing arguments of regression model"""
mock_train_regressor(monkeypatch, assert_model_kwargs=dict(random_state=1))
config = mock_space()
mock_experiment(monkeypatch)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = partial_dependencies(
experiment,
n_grid_points=5,
model_kwargs=dict(random_state=1),
)
@pytest.mark.usefixtures("version_XYZ")
class TestRankings:
"""Tests the ``rankings()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
rankings(None)
def test_returns_plotly_object(self, monkeypatch):
"""Tests that the plotly backend returns a plotly object"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings([experiment, experiment])
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self, monkeypatch):
"""Tests the layout of the plot"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings([experiment])
assert_rankings_plot(plot, [f"{experiment.name}-v{experiment.version}"])
def test_list_of_experiments(self, monkeypatch):
"""Tests the rankings with list of experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
child = orion.client.create_experiment(
experiment.name, branching={"branch_to": "child"}
)
plot = rankings([experiment, child])
# Exps are sorted alphabetically by names.
assert_rankings_plot(
plot, [f"{exp.name}-v{exp.version}" for exp in [child, experiment]]
)
def test_list_of_experiments_name_conflict(self, monkeypatch):
"""Tests the rankings with list of experiments with the same name"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
child = orion.client.create_experiment(
experiment.name, branching={"branch_to": experiment.name}
)
assert child.name == experiment.name
assert child.version == experiment.version + 1
plot = rankings([experiment, child])
# Exps are sorted alphabetically by names.
assert_rankings_plot(
plot, [f"{exp.name}-v{exp.version}" for exp in [experiment, child]]
)
def test_dict_of_experiments(self, monkeypatch):
"""Tests the rankings with renamed experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings({"exp-1": experiment, "exp-2": experiment})
assert_rankings_plot(plot, ["exp-1", "exp-2"])
def test_list_of_dict_of_experiments(self, monkeypatch):
"""Tests the rankings with avg of competitions"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings(
[{"exp-1": experiment, "exp-2": experiment} for _ in range(10)]
)
assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True)
def test_dict_of_list_of_experiments(self, monkeypatch):
"""Tests the rankings with avg of experiments separated in lists"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})
assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True)
def test_unbalanced_experiments(self, monkeypatch):
"""Tests the regrets with avg of unbalanced experiments"""
mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})
assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0)
def test_ignore_uncompleted_statuses(self, monkeypatch):
"""Tests that uncompleted statuses are ignored"""
mock_experiment_with_random_to_pandas(
monkeypatch,
status=[
"completed",
"new",
"reserved",
"completed",
"broken",
"completed",
"interrupted",
"completed",
],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = rankings([experiment])
assert_rankings_plot(
plot, [f"{experiment.name}-v{experiment.version}"], balanced=4
)
def test_unsupported_order_key(self):
"""Tests that unsupported order keys are rejected"""
with create_experiment(config, trial_config) as (_, _, experiment):
with pytest.raises(ValueError):
rankings([experiment], order_by="unsupported")
@pytest.mark.usefixtures("version_XYZ")
class TestRegret:
"""Tests the ``regret()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
regret(None)
def test_returns_plotly_object(self):
"""Tests that the plotly backend returns a plotly object"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regret(experiment)
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self):
"""Tests the layout of the plot"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regret(experiment)
assert_regret_plot(plot)
def test_experiment_worker_as_parameter(self):
"""Tests that ``Experiment`` is a valid parameter"""
with create_experiment(config, trial_config, ["completed"]) as (
_,
experiment,
_,
):
plot = regret(experiment)
assert_regret_plot(plot)
def test_ignore_uncompleted_statuses(self):
"""Tests that uncompleted statuses are ignored"""
with create_experiment(config, trial_config) as (_, _, experiment):
plot = regret(experiment)
assert_regret_plot(plot)
def test_unsupported_order_key(self):
"""Tests that unsupported order keys are rejected"""
with create_experiment(config, trial_config) as (_, _, experiment):
with pytest.raises(ValueError):
regret(experiment, order_by="unsupported")
@pytest.mark.usefixtures("version_XYZ")
class TestRegrets:
"""Tests the ``regrets()`` method provided by the plotly backend"""
def test_requires_argument(self):
"""Tests that the experiment data are required."""
with pytest.raises(ValueError):
regrets(None)
def test_returns_plotly_object(self, monkeypatch):
"""Tests that the plotly backend returns a plotly object"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets([experiment])
assert type(plot) is plotly.graph_objects.Figure
def test_graph_layout(self, monkeypatch):
"""Tests the layout of the plot"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets([experiment])
assert_regrets_plot(plot, [f"{experiment.name}-v{experiment.version}"])
def test_list_of_experiments(self, monkeypatch):
"""Tests the regrets with list of experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
child = orion.client.create_experiment(
experiment.name, branching={"branch_to": "child"}
)
plot = regrets([experiment, child])
# Exps are sorted alphabetically by names.
assert_regrets_plot(
plot, [f"{exp.name}-v{exp.version}" for exp in [child, experiment]]
)
def test_list_of_experiments_name_conflict(self, monkeypatch):
"""Tests the regrets with list of experiments with the same name"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
child = orion.client.create_experiment(
experiment.name, branching={"branch_to": experiment.name}
)
assert child.name == experiment.name
assert child.version == experiment.version + 1
plot = regrets([experiment, child])
# Exps are sorted alphabetically by names.
assert_regrets_plot(
plot, [f"{exp.name}-v{exp.version}" for exp in [experiment, child]]
)
def test_dict_of_experiments(self, monkeypatch):
"""Tests the regrets with renamed experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets({"exp-1": experiment, "exp-2": experiment})
assert_regrets_plot(plot, ["exp-1", "exp-2"])
def test_dict_of_list_of_experiments(self, monkeypatch):
"""Tests the regrets with avg of experiments"""
mock_experiment_with_random_to_pandas(monkeypatch)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})
assert_regrets_plot(plot, ["exp-1", "exp-2"], with_avg=True)
def test_unbalanced_experiments(self, monkeypatch):
"""Tests the regrets with avg of unbalanced experiments"""
mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True)
with create_experiment(config, trial_config, ["completed"]) as (
_,
_,
experiment,
):
plot = regrets({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10})
assert_regrets_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0)
def test_ignore_uncompleted_statuses(self, monkeypatch):
"""Tests that uncompleted statuses are ignored"""
mock_experiment_with_random_to_pandas(
monkeypatch,
status=[
"completed",
"new",
"reserved",
"completed",
"broken",
"completed",
"interrupted",
"completed",
],
)
with create_experiment(config, trial_config) as (_, _, experiment):
plot = regrets([experiment])
assert_regrets_plot(
plot, [f"{experiment.name}-v{experiment.version}"], balanced=4
)
def test_unsupported_order_key(self):
"""Tests that unsupported order keys are rejected"""
with create_experiment(config, trial_config) as (_, _, experiment):
with pytest.raises(ValueError):
regrets([experiment], order_by="unsupported")
| [((176, 1, 176, 39), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(176, 25, 176, 38): '"""version_XYZ"""'}, {}), "('version_XYZ')", False, 'import pytest\n'), ((292, 1, 292, 39), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(292, 25, 292, 38): '"""version_XYZ"""'}, {}), "('version_XYZ')", False, 'import pytest\n'), ((398, 1, 398, 39), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(398, 25, 398, 38): '"""version_XYZ"""'}, {}), "('version_XYZ')", False, 'import pytest\n'), ((716, 1, 716, 39), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(716, 25, 716, 38): '"""version_XYZ"""'}, {}), "('version_XYZ')", False, 'import pytest\n'), ((867, 1, 867, 39), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(867, 25, 867, 38): '"""version_XYZ"""'}, {}), "('version_XYZ')", False, 'import pytest\n'), ((923, 1, 923, 39), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(923, 25, 923, 38): '"""version_XYZ"""'}, {}), "('version_XYZ')", False, 'import pytest\n'), ((66, 20, 66, 41), 'copy.deepcopy', 'copy.deepcopy', ({(66, 34, 66, 40): 'config'}, {}), '(config)', False, 'import copy\n'), ((104, 15, 104, 42), 'pandas.DataFrame', 'pandas.DataFrame', (), '', False, 'import pandas\n'), ((117, 14, 117, 29), 'numpy.arange', 'numpy.arange', ({(117, 27, 117, 28): 'N'}, {}), '(N)', False, 'import numpy\n'), ((118, 12, 118, 47), 'numpy.random.normal', 'numpy.random.normal', (), '', False, 'import numpy\n'), ((119, 12, 119, 47), 'numpy.random.normal', 'numpy.random.normal', (), '', False, 'import numpy\n'), ((120, 21, 120, 56), 'numpy.random.normal', 'numpy.random.normal', (), '', False, 'import numpy\n'), ((126, 15, 135, 9), 'pandas.DataFrame', 'pandas.DataFrame', (), '', False, 'import pandas\n'), ((211, 8, 211, 46), 'orion.testing.plotting.assert_lpi_plot', 'assert_lpi_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((224, 8, 224, 46), 'orion.testing.plotting.assert_lpi_plot', 'assert_lpi_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((249, 8, 249, 46), 'orion.testing.plotting.assert_lpi_plot', 'assert_lpi_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((258, 8, 258, 57), 'orion.testing.plotting.assert_lpi_plot', 'assert_lpi_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((267, 8, 267, 46), 'orion.testing.plotting.assert_lpi_plot', 'assert_lpi_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((276, 8, 276, 46), 'orion.testing.plotting.assert_lpi_plot', 'assert_lpi_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((289, 8, 289, 65), 'orion.testing.plotting.assert_lpi_plot', 'assert_lpi_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((321, 8, 321, 67), 'orion.testing.plotting.assert_parallel_coordinates_plot', 'assert_parallel_coordinates_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((332, 8, 332, 67), 'orion.testing.plotting.assert_parallel_coordinates_plot', 'assert_parallel_coordinates_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((339, 8, 339, 67), 'orion.testing.plotting.assert_parallel_coordinates_plot', 'assert_parallel_coordinates_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((349, 26, 349, 47), 'copy.deepcopy', 'copy.deepcopy', ({(349, 40, 349, 46): 'config'}, {}), '(config)', False, 'import copy\n'), ((355, 8, 355, 82), 'orion.testing.plotting.assert_parallel_coordinates_plot', 'assert_parallel_coordinates_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((359, 26, 359, 47), 'copy.deepcopy', 'copy.deepcopy', ({(359, 40, 359, 46): 'config'}, {}), '(config)', False, 'import copy\n'), ((364, 8, 366, 9), 'orion.testing.plotting.assert_parallel_coordinates_plot', 'assert_parallel_coordinates_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((370, 26, 370, 47), 'copy.deepcopy', 'copy.deepcopy', ({(370, 40, 370, 46): 'config'}, {}), '(config)', False, 'import copy\n'), ((375, 8, 375, 72), 'orion.testing.plotting.assert_parallel_coordinates_plot', 'assert_parallel_coordinates_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((379, 29, 379, 50), 'copy.deepcopy', 'copy.deepcopy', ({(379, 43, 379, 49): 'config'}, {}), '(config)', False, 'import copy\n'), ((384, 8, 384, 72), 'orion.testing.plotting.assert_parallel_coordinates_plot', 'assert_parallel_coordinates_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((388, 29, 388, 50), 'copy.deepcopy', 'copy.deepcopy', ({(388, 43, 388, 49): 'config'}, {}), '(config)', False, 'import copy\n'), ((393, 8, 395, 9), 'orion.testing.plotting.assert_parallel_coordinates_plot', 'assert_parallel_coordinates_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((434, 8, 434, 63), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((460, 8, 460, 63), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((472, 8, 472, 74), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((484, 8, 484, 79), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((497, 8, 499, 9), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((515, 8, 519, 9), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((535, 8, 537, 9), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((550, 8, 552, 9), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((565, 8, 567, 9), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((580, 8, 580, 75), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((616, 8, 618, 9), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((636, 8, 636, 81), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((645, 20, 645, 48), 'numpy.random.randint', 'numpy.random.randint', ({(645, 41, 645, 43): '20', (645, 45, 645, 47): '50'}, {}), '(20, 50)', False, 'import numpy\n'), ((684, 8, 686, 9), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((747, 8, 747, 80), 'orion.testing.plotting.assert_rankings_plot', 'assert_rankings_plot', ({(747, 29, 747, 33): 'plot', (747, 35, 747, 79): "[f'{experiment.name}-v{experiment.version}']"}, {}), "(plot, [f'{experiment.name}-v{experiment.version}'])", False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((764, 8, 766, 9), 'orion.testing.plotting.assert_rankings_plot', 'assert_rankings_plot', ({(765, 12, 765, 16): 'plot', (765, 18, 765, 79): "[f'{exp.name}-v{exp.version}' for exp in [child, experiment]]"}, {}), "(plot, [f'{exp.name}-v{exp.version}' for exp in [child,\n experiment]])", False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((784, 8, 786, 9), 'orion.testing.plotting.assert_rankings_plot', 'assert_rankings_plot', ({(785, 12, 785, 16): 'plot', (785, 18, 785, 79): "[f'{exp.name}-v{exp.version}' for exp in [experiment, child]]"}, {}), "(plot, [f'{exp.name}-v{exp.version}' for exp in [\n experiment, child]])", False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((798, 8, 798, 54), 'orion.testing.plotting.assert_rankings_plot', 'assert_rankings_plot', ({(798, 29, 798, 33): 'plot', (798, 35, 798, 53): "['exp-1', 'exp-2']"}, {}), "(plot, ['exp-1', 'exp-2'])", False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((812, 8, 812, 69), 'orion.testing.plotting.assert_rankings_plot', 'assert_rankings_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((824, 8, 824, 69), 'orion.testing.plotting.assert_rankings_plot', 'assert_rankings_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((836, 8, 836, 81), 'orion.testing.plotting.assert_rankings_plot', 'assert_rankings_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((856, 8, 858, 9), 'orion.testing.plotting.assert_rankings_plot', 'assert_rankings_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((896, 8, 896, 32), 'orion.testing.plotting.assert_regret_plot', 'assert_regret_plot', ({(896, 27, 896, 31): 'plot'}, {}), '(plot)', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((907, 8, 907, 32), 'orion.testing.plotting.assert_regret_plot', 'assert_regret_plot', ({(907, 27, 907, 31): 'plot'}, {}), '(plot)', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((914, 8, 914, 32), 'orion.testing.plotting.assert_regret_plot', 'assert_regret_plot', ({(914, 27, 914, 31): 'plot'}, {}), '(plot)', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((954, 8, 954, 79), 'orion.testing.plotting.assert_regrets_plot', 'assert_regrets_plot', ({(954, 28, 954, 32): 'plot', (954, 34, 954, 78): "[f'{experiment.name}-v{experiment.version}']"}, {}), "(plot, [f'{experiment.name}-v{experiment.version}'])", False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((971, 8, 973, 9), 'orion.testing.plotting.assert_regrets_plot', 'assert_regrets_plot', ({(972, 12, 972, 16): 'plot', (972, 18, 972, 79): "[f'{exp.name}-v{exp.version}' for exp in [child, experiment]]"}, {}), "(plot, [f'{exp.name}-v{exp.version}' for exp in [child,\n experiment]])", False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((991, 8, 993, 9), 'orion.testing.plotting.assert_regrets_plot', 'assert_regrets_plot', ({(992, 12, 992, 16): 'plot', (992, 18, 992, 79): "[f'{exp.name}-v{exp.version}' for exp in [experiment, child]]"}, {}), "(plot, [f'{exp.name}-v{exp.version}' for exp in [\n experiment, child]])", False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((1005, 8, 1005, 53), 'orion.testing.plotting.assert_regrets_plot', 'assert_regrets_plot', ({(1005, 28, 1005, 32): 'plot', (1005, 34, 1005, 52): "['exp-1', 'exp-2']"}, {}), "(plot, ['exp-1', 'exp-2'])", False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((1017, 8, 1017, 68), 'orion.testing.plotting.assert_regrets_plot', 'assert_regrets_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((1029, 8, 1029, 80), 'orion.testing.plotting.assert_regrets_plot', 'assert_regrets_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((1049, 8, 1051, 9), 'orion.testing.plotting.assert_regrets_plot', 'assert_regrets_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((112, 16, 112, 43), 'numpy.random.randint', 'numpy.random.randint', ({(112, 37, 112, 38): '5', (112, 40, 112, 42): '15'}, {}), '(5, 15)', False, 'import numpy\n'), ((182, 13, 182, 38), 'pytest.raises', 'pytest.raises', ({(182, 27, 182, 37): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((183, 12, 183, 21), 'orion.plotting.base.lpi', 'lpi', ({(183, 16, 183, 20): 'None'}, {}), '(None)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((187, 13, 187, 67), 'orion.testing.create_experiment', 'create_experiment', ({(187, 31, 187, 37): 'config', (187, 39, 187, 51): 'trial_config', (187, 53, 187, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((200, 13, 200, 67), 'orion.testing.create_experiment', 'create_experiment', ({(200, 31, 200, 37): 'config', (200, 39, 200, 51): 'trial_config', (200, 53, 200, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((217, 13, 217, 67), 'orion.testing.create_experiment', 'create_experiment', ({(217, 31, 217, 37): 'config', (217, 39, 217, 51): 'trial_config', (217, 53, 217, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((246, 13, 246, 52), 'orion.testing.create_experiment', 'create_experiment', ({(246, 31, 246, 37): 'config', (246, 39, 246, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((247, 19, 247, 34), 'orion.plotting.base.lpi', 'lpi', ({(247, 23, 247, 33): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((255, 13, 255, 52), 'orion.testing.create_experiment', 'create_experiment', ({(255, 31, 255, 37): 'config', (255, 39, 255, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((264, 13, 264, 52), 'orion.testing.create_experiment', 'create_experiment', ({(264, 31, 264, 37): 'config', (264, 39, 264, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((273, 13, 273, 52), 'orion.testing.create_experiment', 'create_experiment', ({(273, 31, 273, 37): 'config', (273, 39, 273, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((286, 13, 286, 52), 'orion.testing.create_experiment', 'create_experiment', ({(286, 31, 286, 37): 'config', (286, 39, 286, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((298, 13, 298, 38), 'pytest.raises', 'pytest.raises', ({(298, 27, 298, 37): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((299, 12, 299, 38), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', ({(299, 33, 299, 37): 'None'}, {}), '(None)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((303, 13, 303, 67), 'orion.testing.create_experiment', 'create_experiment', ({(303, 31, 303, 37): 'config', (303, 39, 303, 51): 'trial_config', (303, 53, 303, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((308, 19, 308, 51), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', ({(308, 40, 308, 50): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((314, 13, 314, 67), 'orion.testing.create_experiment', 'create_experiment', ({(314, 31, 314, 37): 'config', (314, 39, 314, 51): 'trial_config', (314, 53, 314, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((319, 19, 319, 51), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', ({(319, 40, 319, 50): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((325, 13, 325, 67), 'orion.testing.create_experiment', 'create_experiment', ({(325, 31, 325, 37): 'config', (325, 39, 325, 51): 'trial_config', (325, 53, 325, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((330, 19, 330, 51), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', ({(330, 40, 330, 50): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((336, 13, 336, 52), 'orion.testing.create_experiment', 'create_experiment', ({(336, 31, 336, 37): 'config', (336, 39, 336, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((337, 19, 337, 51), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', ({(337, 40, 337, 50): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((343, 13, 343, 52), 'orion.testing.create_experiment', 'create_experiment', ({(343, 31, 343, 37): 'config', (343, 39, 343, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((352, 13, 352, 61), 'orion.testing.create_experiment', 'create_experiment', ({(352, 31, 352, 46): 'multidim_config', (352, 48, 352, 60): 'trial_config'}, {}), '(multidim_config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((353, 19, 353, 65), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', (), '', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((361, 13, 361, 61), 'orion.testing.create_experiment', 'create_experiment', ({(361, 31, 361, 46): 'multidim_config', (361, 48, 361, 60): 'trial_config'}, {}), '(multidim_config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((362, 19, 362, 51), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', ({(362, 40, 362, 50): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((372, 13, 372, 61), 'orion.testing.create_experiment', 'create_experiment', ({(372, 31, 372, 46): 'fidelity_config', (372, 48, 372, 60): 'trial_config'}, {}), '(fidelity_config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((373, 19, 373, 51), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', ({(373, 40, 373, 50): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((381, 13, 381, 64), 'orion.testing.create_experiment', 'create_experiment', ({(381, 31, 381, 49): 'categorical_config', (381, 51, 381, 63): 'trial_config'}, {}), '(categorical_config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((382, 19, 382, 51), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', ({(382, 40, 382, 50): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((390, 13, 390, 64), 'orion.testing.create_experiment', 'create_experiment', ({(390, 31, 390, 49): 'categorical_config', (390, 51, 390, 63): 'trial_config'}, {}), '(categorical_config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((391, 19, 391, 51), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', ({(391, 40, 391, 50): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((405, 13, 405, 67), 'orion.testing.create_experiment', 'create_experiment', ({(405, 31, 405, 37): 'config', (405, 39, 405, 51): 'trial_config', (405, 53, 405, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((421, 13, 421, 67), 'orion.testing.create_experiment', 'create_experiment', ({(421, 31, 421, 37): 'config', (421, 39, 421, 51): 'trial_config', (421, 53, 421, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((457, 13, 457, 52), 'orion.testing.create_experiment', 'create_experiment', ({(457, 31, 457, 37): 'config', (457, 39, 457, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((458, 19, 458, 68), 'orion.plotting.base.partial_dependencies', 'partial_dependencies', (), '', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((467, 13, 467, 52), 'orion.testing.create_experiment', 'create_experiment', ({(467, 31, 467, 37): 'config', (467, 39, 467, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((479, 13, 479, 52), 'orion.testing.create_experiment', 'create_experiment', ({(479, 31, 479, 37): 'config', (479, 39, 479, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((491, 13, 491, 52), 'orion.testing.create_experiment', 'create_experiment', ({(491, 31, 491, 37): 'config', (491, 39, 491, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((510, 13, 510, 52), 'orion.testing.create_experiment', 'create_experiment', ({(510, 31, 510, 37): 'config', (510, 39, 510, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((530, 13, 530, 52), 'orion.testing.create_experiment', 'create_experiment', ({(530, 31, 530, 37): 'config', (530, 39, 530, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((545, 13, 545, 52), 'orion.testing.create_experiment', 'create_experiment', ({(545, 31, 545, 37): 'config', (545, 39, 545, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((560, 13, 560, 52), 'orion.testing.create_experiment', 'create_experiment', ({(560, 31, 560, 37): 'config', (560, 39, 560, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((575, 13, 575, 52), 'orion.testing.create_experiment', 'create_experiment', ({(575, 31, 575, 37): 'config', (575, 39, 575, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((597, 12, 597, 80), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((605, 13, 605, 52), 'orion.testing.create_experiment', 'create_experiment', ({(605, 31, 605, 37): 'config', (605, 39, 605, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((613, 13, 613, 42), 'pytest.raises', 'pytest.raises', ({(613, 27, 613, 41): 'AssertionError'}, {}), '(AssertionError)', False, 'import pytest\n'), ((614, 12, 614, 84), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((626, 13, 626, 52), 'orion.testing.create_experiment', 'create_experiment', ({(626, 31, 626, 37): 'config', (626, 39, 626, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((633, 13, 633, 42), 'pytest.raises', 'pytest.raises', ({(633, 27, 633, 41): 'AssertionError'}, {}), '(AssertionError)', False, 'import pytest\n'), ((634, 12, 634, 84), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((650, 19, 650, 83), 'orion.analysis.partial_dependency_utils.partial_dependency_grid', 'partial_dependency_grid', ({(650, 43, 650, 48): 'space', (650, 50, 650, 55): 'model', (650, 57, 650, 63): 'params', (650, 65, 650, 72): 'samples', (650, 74, 650, 82): 'n_points'}, {}), '(space, model, params, samples, n_points)', False, 'from orion.analysis.partial_dependency_utils import partial_dependency_grid\n'), ((657, 13, 657, 52), 'orion.testing.create_experiment', 'create_experiment', ({(657, 31, 657, 37): 'config', (657, 39, 657, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((671, 13, 671, 52), 'orion.testing.create_experiment', 'create_experiment', ({(671, 31, 671, 37): 'config', (671, 39, 671, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((679, 13, 679, 42), 'pytest.raises', 'pytest.raises', ({(679, 27, 679, 41): 'AssertionError'}, {}), '(AssertionError)', False, 'import pytest\n'), ((680, 12, 682, 13), 'orion.testing.plotting.assert_partial_dependencies_plot', 'assert_partial_dependencies_plot', (), '', False, 'from orion.testing.plotting import assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot\n'), ((694, 13, 694, 52), 'orion.testing.create_experiment', 'create_experiment', ({(694, 31, 694, 37): 'config', (694, 39, 694, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((708, 13, 708, 52), 'orion.testing.create_experiment', 'create_experiment', ({(708, 31, 708, 37): 'config', (708, 39, 708, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((722, 13, 722, 38), 'pytest.raises', 'pytest.raises', ({(722, 27, 722, 37): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((723, 12, 723, 26), 'orion.plotting.base.rankings', 'rankings', ({(723, 21, 723, 25): 'None'}, {}), '(None)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((728, 13, 728, 67), 'orion.testing.create_experiment', 'create_experiment', ({(728, 31, 728, 37): 'config', (728, 39, 728, 51): 'trial_config', (728, 53, 728, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((733, 19, 733, 53), 'orion.plotting.base.rankings', 'rankings', ({(733, 28, 733, 52): '[experiment, experiment]'}, {}), '([experiment, experiment])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((740, 13, 740, 67), 'orion.testing.create_experiment', 'create_experiment', ({(740, 31, 740, 37): 'config', (740, 39, 740, 51): 'trial_config', (740, 53, 740, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((745, 19, 745, 41), 'orion.plotting.base.rankings', 'rankings', ({(745, 28, 745, 40): '[experiment]'}, {}), '([experiment])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((752, 13, 752, 67), 'orion.testing.create_experiment', 'create_experiment', ({(752, 31, 752, 37): 'config', (752, 39, 752, 51): 'trial_config', (752, 53, 752, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((761, 19, 761, 48), 'orion.plotting.base.rankings', 'rankings', ({(761, 28, 761, 47): '[experiment, child]'}, {}), '([experiment, child])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((771, 13, 771, 67), 'orion.testing.create_experiment', 'create_experiment', ({(771, 31, 771, 37): 'config', (771, 39, 771, 51): 'trial_config', (771, 53, 771, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((781, 19, 781, 48), 'orion.plotting.base.rankings', 'rankings', ({(781, 28, 781, 47): '[experiment, child]'}, {}), '([experiment, child])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((791, 13, 791, 67), 'orion.testing.create_experiment', 'create_experiment', ({(791, 31, 791, 37): 'config', (791, 39, 791, 51): 'trial_config', (791, 53, 791, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((796, 19, 796, 71), 'orion.plotting.base.rankings', 'rankings', ({(796, 28, 796, 70): "{'exp-1': experiment, 'exp-2': experiment}"}, {}), "({'exp-1': experiment, 'exp-2': experiment})", False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((803, 13, 803, 67), 'orion.testing.create_experiment', 'create_experiment', ({(803, 31, 803, 37): 'config', (803, 39, 803, 51): 'trial_config', (803, 53, 803, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((817, 13, 817, 67), 'orion.testing.create_experiment', 'create_experiment', ({(817, 31, 817, 37): 'config', (817, 39, 817, 51): 'trial_config', (817, 53, 817, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((822, 19, 822, 85), 'orion.plotting.base.rankings', 'rankings', ({(822, 28, 822, 84): "{'exp-1': [experiment] * 10, 'exp-2': [experiment] * 10}"}, {}), "({'exp-1': [experiment] * 10, 'exp-2': [experiment] * 10})", False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((829, 13, 829, 67), 'orion.testing.create_experiment', 'create_experiment', ({(829, 31, 829, 37): 'config', (829, 39, 829, 51): 'trial_config', (829, 53, 829, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((834, 19, 834, 85), 'orion.plotting.base.rankings', 'rankings', ({(834, 28, 834, 84): "{'exp-1': [experiment] * 10, 'exp-2': [experiment] * 10}"}, {}), "({'exp-1': [experiment] * 10, 'exp-2': [experiment] * 10})", False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((853, 13, 853, 52), 'orion.testing.create_experiment', 'create_experiment', ({(853, 31, 853, 37): 'config', (853, 39, 853, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((854, 19, 854, 41), 'orion.plotting.base.rankings', 'rankings', ({(854, 28, 854, 40): '[experiment]'}, {}), '([experiment])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((862, 13, 862, 52), 'orion.testing.create_experiment', 'create_experiment', ({(862, 31, 862, 37): 'config', (862, 39, 862, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((873, 13, 873, 38), 'pytest.raises', 'pytest.raises', ({(873, 27, 873, 37): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((874, 12, 874, 24), 'orion.plotting.base.regret', 'regret', ({(874, 19, 874, 23): 'None'}, {}), '(None)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((878, 13, 878, 67), 'orion.testing.create_experiment', 'create_experiment', ({(878, 31, 878, 37): 'config', (878, 39, 878, 51): 'trial_config', (878, 53, 878, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((883, 19, 883, 37), 'orion.plotting.base.regret', 'regret', ({(883, 26, 883, 36): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((889, 13, 889, 67), 'orion.testing.create_experiment', 'create_experiment', ({(889, 31, 889, 37): 'config', (889, 39, 889, 51): 'trial_config', (889, 53, 889, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((894, 19, 894, 37), 'orion.plotting.base.regret', 'regret', ({(894, 26, 894, 36): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((900, 13, 900, 67), 'orion.testing.create_experiment', 'create_experiment', ({(900, 31, 900, 37): 'config', (900, 39, 900, 51): 'trial_config', (900, 53, 900, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((905, 19, 905, 37), 'orion.plotting.base.regret', 'regret', ({(905, 26, 905, 36): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((911, 13, 911, 52), 'orion.testing.create_experiment', 'create_experiment', ({(911, 31, 911, 37): 'config', (911, 39, 911, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((912, 19, 912, 37), 'orion.plotting.base.regret', 'regret', ({(912, 26, 912, 36): 'experiment'}, {}), '(experiment)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((918, 13, 918, 52), 'orion.testing.create_experiment', 'create_experiment', ({(918, 31, 918, 37): 'config', (918, 39, 918, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((929, 13, 929, 38), 'pytest.raises', 'pytest.raises', ({(929, 27, 929, 37): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((930, 12, 930, 25), 'orion.plotting.base.regrets', 'regrets', ({(930, 20, 930, 24): 'None'}, {}), '(None)', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((935, 13, 935, 67), 'orion.testing.create_experiment', 'create_experiment', ({(935, 31, 935, 37): 'config', (935, 39, 935, 51): 'trial_config', (935, 53, 935, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((940, 19, 940, 40), 'orion.plotting.base.regrets', 'regrets', ({(940, 27, 940, 39): '[experiment]'}, {}), '([experiment])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((947, 13, 947, 67), 'orion.testing.create_experiment', 'create_experiment', ({(947, 31, 947, 37): 'config', (947, 39, 947, 51): 'trial_config', (947, 53, 947, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((952, 19, 952, 40), 'orion.plotting.base.regrets', 'regrets', ({(952, 27, 952, 39): '[experiment]'}, {}), '([experiment])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((959, 13, 959, 67), 'orion.testing.create_experiment', 'create_experiment', ({(959, 31, 959, 37): 'config', (959, 39, 959, 51): 'trial_config', (959, 53, 959, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((968, 19, 968, 47), 'orion.plotting.base.regrets', 'regrets', ({(968, 27, 968, 46): '[experiment, child]'}, {}), '([experiment, child])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((978, 13, 978, 67), 'orion.testing.create_experiment', 'create_experiment', ({(978, 31, 978, 37): 'config', (978, 39, 978, 51): 'trial_config', (978, 53, 978, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((988, 19, 988, 47), 'orion.plotting.base.regrets', 'regrets', ({(988, 27, 988, 46): '[experiment, child]'}, {}), '([experiment, child])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((998, 13, 998, 67), 'orion.testing.create_experiment', 'create_experiment', ({(998, 31, 998, 37): 'config', (998, 39, 998, 51): 'trial_config', (998, 53, 998, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((1003, 19, 1003, 70), 'orion.plotting.base.regrets', 'regrets', ({(1003, 27, 1003, 69): "{'exp-1': experiment, 'exp-2': experiment}"}, {}), "({'exp-1': experiment, 'exp-2': experiment})", False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((1010, 13, 1010, 67), 'orion.testing.create_experiment', 'create_experiment', ({(1010, 31, 1010, 37): 'config', (1010, 39, 1010, 51): 'trial_config', (1010, 53, 1010, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((1015, 19, 1015, 84), 'orion.plotting.base.regrets', 'regrets', ({(1015, 27, 1015, 83): "{'exp-1': [experiment] * 10, 'exp-2': [experiment] * 10}"}, {}), "({'exp-1': [experiment] * 10, 'exp-2': [experiment] * 10})", False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((1022, 13, 1022, 67), 'orion.testing.create_experiment', 'create_experiment', ({(1022, 31, 1022, 37): 'config', (1022, 39, 1022, 51): 'trial_config', (1022, 53, 1022, 66): "['completed']"}, {}), "(config, trial_config, ['completed'])", False, 'from orion.testing import create_experiment\n'), ((1027, 19, 1027, 84), 'orion.plotting.base.regrets', 'regrets', ({(1027, 27, 1027, 83): "{'exp-1': [experiment] * 10, 'exp-2': [experiment] * 10}"}, {}), "({'exp-1': [experiment] * 10, 'exp-2': [experiment] * 10})", False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((1046, 13, 1046, 52), 'orion.testing.create_experiment', 'create_experiment', ({(1046, 31, 1046, 37): 'config', (1046, 39, 1046, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((1047, 19, 1047, 40), 'orion.plotting.base.regrets', 'regrets', ({(1047, 27, 1047, 39): '[experiment]'}, {}), '([experiment])', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((1055, 13, 1055, 52), 'orion.testing.create_experiment', 'create_experiment', ({(1055, 31, 1055, 37): 'config', (1055, 39, 1055, 51): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((153, 19, 153, 46), 'numpy.arange', 'numpy.arange', ({(153, 32, 153, 45): 'data.shape[0]'}, {}), '(data.shape[0])', False, 'import numpy\n'), ((344, 17, 344, 42), 'pytest.raises', 'pytest.raises', ({(344, 31, 344, 41): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((345, 16, 345, 71), 'orion.plotting.base.parallel_coordinates', 'parallel_coordinates', (), '', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((589, 17, 589, 56), 'orion.testing.create_experiment', 'create_experiment', ({(589, 35, 589, 41): 'config', (589, 43, 589, 55): 'trial_config'}, {}), '(config, trial_config)', False, 'from orion.testing import create_experiment\n'), ((863, 17, 863, 42), 'pytest.raises', 'pytest.raises', ({(863, 31, 863, 41): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((864, 16, 864, 62), 'orion.plotting.base.rankings', 'rankings', (), '', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((919, 17, 919, 42), 'pytest.raises', 'pytest.raises', ({(919, 31, 919, 41): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((920, 16, 920, 58), 'orion.plotting.base.regret', 'regret', (), '', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n'), ((1056, 17, 1056, 42), 'pytest.raises', 'pytest.raises', ({(1056, 31, 1056, 41): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n'), ((1057, 16, 1057, 61), 'orion.plotting.base.regrets', 'regrets', (), '', False, 'from orion.plotting.base import lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets\n')] |
Jakob-Unfried/msc-legacy | autodiff/debug_vjp.py | 2c41f3f714936c25dd534bd66da802c26176fcfa | import pdb
import warnings
from jax import custom_vjp
@custom_vjp
def debug_identity(x):
"""
acts as identity, but inserts a pdb trace on the backwards pass
"""
warnings.warn('Using a module intended for debugging')
return x
def _debug_fwd(x):
warnings.warn('Using a module intended for debugging')
return x, x
# noinspection PyUnusedLocal
def _debug_bwd(x, g):
pdb.set_trace()
return g
debug_identity.defvjp(_debug_fwd, _debug_bwd)
| [((12, 4, 12, 58), 'warnings.warn', 'warnings.warn', ({(12, 18, 12, 57): '"""Using a module intended for debugging"""'}, {}), "('Using a module intended for debugging')", False, 'import warnings\n'), ((17, 4, 17, 58), 'warnings.warn', 'warnings.warn', ({(17, 18, 17, 57): '"""Using a module intended for debugging"""'}, {}), "('Using a module intended for debugging')", False, 'import warnings\n'), ((23, 4, 23, 19), 'pdb.set_trace', 'pdb.set_trace', ({}, {}), '()', False, 'import pdb\n')] |
vwfinley/mileage | mileage.py | eb880107c8c38d33706eac74d01a0d0516716cc7 | #!/usr/bin/env python
# Some helpful links
# https://docs.python.org/3/library/tkinter.html
# https://www.python-course.eu/tkinter_entry_widgets.php
import tkinter as tk
class Application(tk.Frame):
def __init__(self, root=None):
super().__init__(root)
self.root = root
self.root.title("Mileage")
self.root.geometry("250x125")
self.pack()
self.miles = tk.Entry(self);
self.gallons = tk.Entry(self);
self.mpg = tk.Label(self)
self.init_widgets()
def init_widgets(self):
self.miles.grid(row=0)
tk.Label(self, text="Miles").grid(row=0, column=1)
self.gallons.grid(row=1)
tk.Label(self, text="Gallons").grid(row=1, column=1)
self.mpg.grid(row=2)
tk.Label(self, text="MPG").grid(row=2, column=1)
tk.Button(self, text="Calculate", command = self.calculate).grid(row=3, column=1)
tk.Button(self, text="Quit", command=self.root.destroy).grid(row=4, column=1)
def calculate(self):
self.mpg['text'] = float(self.miles.get()) / float(self.gallons.get())
app = Application(root=tk.Tk())
app.mainloop()
| [((19, 21, 19, 35), 'tkinter.Entry', 'tk.Entry', ({(19, 30, 19, 34): 'self'}, {}), '(self)', True, 'import tkinter as tk\n'), ((20, 23, 20, 37), 'tkinter.Entry', 'tk.Entry', ({(20, 32, 20, 36): 'self'}, {}), '(self)', True, 'import tkinter as tk\n'), ((21, 19, 21, 33), 'tkinter.Label', 'tk.Label', ({(21, 28, 21, 32): 'self'}, {}), '(self)', True, 'import tkinter as tk\n'), ((42, 23, 42, 30), 'tkinter.Tk', 'tk.Tk', ({}, {}), '()', True, 'import tkinter as tk\n'), ((27, 8, 27, 36), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((30, 8, 30, 38), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((33, 8, 33, 34), 'tkinter.Label', 'tk.Label', (), '', True, 'import tkinter as tk\n'), ((35, 8, 35, 67), 'tkinter.Button', 'tk.Button', (), '', True, 'import tkinter as tk\n'), ((36, 8, 36, 63), 'tkinter.Button', 'tk.Button', (), '', True, 'import tkinter as tk\n')] |
ulternate/table_tennis_league | rankings/elo.py | 1762c5b606f149b27d9c06c82e825c948c47b56f | def elo(winner_rank, loser_rank, weighting):
"""
:param winner: The Player that won the match.
:param loser: The Player that lost the match.
:param weighting: The weighting factor to suit your comp.
:return: (winner_new_rank, loser_new_rank) Tuple.
This follows the ELO ranking method.
"""
winner_rank_transformed = 10 ** (winner_rank / 400)
opponent_rank_transformed = 10 ** (loser_rank / 400)
transformed_sum = winner_rank_transformed + opponent_rank_transformed
winner_score = winner_rank_transformed / transformed_sum
loser_score = opponent_rank_transformed / transformed_sum
winner_rank = winner_rank + weighting * (
1 - winner_score)
loser_rank = loser_rank - weighting * loser_score
# Set a floor of 100 for the rankings.
winner_rank = 100 if winner_rank < 100 else winner_rank
loser_rank = 100 if loser_rank < 100 else loser_rank
winner_rank = float('{result:.2f}'.format(result=winner_rank))
loser_rank = float('{result:.2f}'.format(result=loser_rank))
return winner_rank, loser_rank
| [] |
samplics-org/samplics | src/samplics/regression/glm.py | b5f49d075194cc24208f567e6a00e86aa24bec26 | from __future__ import annotations
from typing import Any, Callable, Optional, Union
import numpy as np
# import pandas as pd
import statsmodels.api as sm
from samplics.estimation.expansion import TaylorEstimator
from samplics.utils.formats import dict_to_dataframe, fpc_as_dict, numpy_array, remove_nans
from samplics.utils.types import Array, Number, Series, StringNumber
class SurveyGLM:
"""General linear models under complex survey sampling"""
def __init__(self):
self.beta: np.ndarray
@staticmethod
def _residuals(e: np.ndarray, psu: np.ndarray, nb_vars: Number) -> tuple(np.ndarray, Number):
psus = np.unique(psu)
if psus.shape[0] == 1 and e.shape[0] == 1:
raise AssertionError("Only one observation in the stratum")
if psus.shape[0] == 1:
psu = np.arange(e.shape[0])
psus = np.unique(psu)
e_values = np.zeros((psus.shape[0], nb_vars))
for i, p in enumerate(np.unique(psus)):
e_values[i, :] += np.sum(e[psu == p, :], axis=0)
e_means = np.sum(e_values, axis=0) / psus.shape[0]
return np.transpose(e_values - e_means) @ (e_values - e_means), psus.shape[0]
def _calculate_g(
self,
samp_weight: np.ndarray,
resid: np.ndarray,
x: np.ndarray,
stratum: Optional[np.ndarray],
psu: Optional[np.ndarray],
fpc: Union[dict[StringNumber, Number], Number],
glm_scale=Number,
) -> np.ndarray:
e = (samp_weight * resid)[:, None] * x / glm_scale
if psu is None:
psu = np.arange(e.shape[0])
if stratum is None:
e_h, n_h = self._residuals(e=e, psu=psu, nb_vars=x.shape[1])
return fpc * (n_h / (n_h - 1)) * e_h
else:
g_h = np.zeros((x.shape[1], x.shape[1]))
for s in np.unique(stratum):
e_s = e[stratum == s, :]
psu_s = psu[stratum == s]
e_h, n_h = self._residuals(e=e_s, psu=psu_s, nb_vars=x.shape[1])
g_h += fpc[s] * (n_h / (n_h - 1)) * e_h
return g_h
def estimate(
self,
y: Array,
x: Optional[Array] = None,
samp_weight: Optional[Array] = None,
stratum: Optional[Series] = None,
psu: Optional[Series] = None,
fpc: Union[dict[StringNumber, Number], Series, Number] = 1.0,
remove_nan: bool = False,
) -> None:
y = numpy_array(y)
y_temp = y.copy()
x = numpy_array(x) if x is not None else None
psu = numpy_array(psu) if psu is not None else None
if samp_weight is None:
weight_temp = np.ones(y.shape[0])
elif isinstance(samp_weight, (float, int)):
weight_temp = samp_weight * np.ones(y_temp.shape[0])
elif isinstance(samp_weight, np.ndarray):
weight_temp = samp_weight.copy()
else:
weight_temp = np.asarray(samp_weight)
if not isinstance(fpc, dict):
self.fpc = fpc_as_dict(stratum, fpc)
else:
if list(np.unique(stratum)) != list(fpc.keys()):
raise AssertionError("fpc dictionary keys must be the same as the strata!")
else:
self.fpc = fpc
glm_model = sm.GLM(endog=y_temp, exog=x, var_weights=weight_temp)
glm_results = glm_model.fit()
g = self._calculate_g(
samp_weight=samp_weight,
resid=glm_results.resid_response,
x=x,
stratum=stratum,
psu=psu,
fpc=self.fpc,
glm_scale=glm_results.scale,
)
d = glm_results.cov_params()
self.beta = glm_results.params
self.cov_beta = (d @ g) @ d
| [((24, 15, 24, 29), 'numpy.unique', 'np.unique', ({(24, 25, 24, 28): 'psu'}, {}), '(psu)', True, 'import numpy as np\n'), ((30, 19, 30, 53), 'numpy.zeros', 'np.zeros', ({(30, 28, 30, 52): '(psus.shape[0], nb_vars)'}, {}), '((psus.shape[0], nb_vars))', True, 'import numpy as np\n'), ((75, 12, 75, 26), 'samplics.utils.formats.numpy_array', 'numpy_array', ({(75, 24, 75, 25): 'y'}, {}), '(y)', False, 'from samplics.utils.formats import dict_to_dataframe, fpc_as_dict, numpy_array, remove_nans\n'), ((98, 20, 98, 73), 'statsmodels.api.GLM', 'sm.GLM', (), '', True, 'import statsmodels.api as sm\n'), ((28, 18, 28, 39), 'numpy.arange', 'np.arange', ({(28, 28, 28, 38): 'e.shape[0]'}, {}), '(e.shape[0])', True, 'import numpy as np\n'), ((29, 19, 29, 33), 'numpy.unique', 'np.unique', ({(29, 29, 29, 32): 'psu'}, {}), '(psu)', True, 'import numpy as np\n'), ((32, 30, 32, 45), 'numpy.unique', 'np.unique', ({(32, 40, 32, 44): 'psus'}, {}), '(psus)', True, 'import numpy as np\n'), ((33, 30, 33, 60), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((34, 18, 34, 42), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((51, 18, 51, 39), 'numpy.arange', 'np.arange', ({(51, 28, 51, 38): 'e.shape[0]'}, {}), '(e.shape[0])', True, 'import numpy as np\n'), ((56, 18, 56, 52), 'numpy.zeros', 'np.zeros', ({(56, 27, 56, 51): '(x.shape[1], x.shape[1])'}, {}), '((x.shape[1], x.shape[1]))', True, 'import numpy as np\n'), ((57, 21, 57, 39), 'numpy.unique', 'np.unique', ({(57, 31, 57, 38): 'stratum'}, {}), '(stratum)', True, 'import numpy as np\n'), ((78, 12, 78, 26), 'samplics.utils.formats.numpy_array', 'numpy_array', ({(78, 24, 78, 25): 'x'}, {}), '(x)', False, 'from samplics.utils.formats import dict_to_dataframe, fpc_as_dict, numpy_array, remove_nans\n'), ((79, 14, 79, 30), 'samplics.utils.formats.numpy_array', 'numpy_array', ({(79, 26, 79, 29): 'psu'}, {}), '(psu)', False, 'from samplics.utils.formats import dict_to_dataframe, fpc_as_dict, numpy_array, remove_nans\n'), ((82, 26, 82, 45), 'numpy.ones', 'np.ones', ({(82, 34, 82, 44): 'y.shape[0]'}, {}), '(y.shape[0])', True, 'import numpy as np\n'), ((91, 23, 91, 48), 'samplics.utils.formats.fpc_as_dict', 'fpc_as_dict', ({(91, 35, 91, 42): 'stratum', (91, 44, 91, 47): 'fpc'}, {}), '(stratum, fpc)', False, 'from samplics.utils.formats import dict_to_dataframe, fpc_as_dict, numpy_array, remove_nans\n'), ((36, 15, 36, 47), 'numpy.transpose', 'np.transpose', ({(36, 28, 36, 46): '(e_values - e_means)'}, {}), '(e_values - e_means)', True, 'import numpy as np\n'), ((84, 40, 84, 64), 'numpy.ones', 'np.ones', ({(84, 48, 84, 63): 'y_temp.shape[0]'}, {}), '(y_temp.shape[0])', True, 'import numpy as np\n'), ((88, 26, 88, 49), 'numpy.asarray', 'np.asarray', ({(88, 37, 88, 48): 'samp_weight'}, {}), '(samp_weight)', True, 'import numpy as np\n'), ((93, 20, 93, 38), 'numpy.unique', 'np.unique', ({(93, 30, 93, 37): 'stratum'}, {}), '(stratum)', True, 'import numpy as np\n')] |
leg100/scopes | tests/test_scopes.py | 6a31908acf44b9f65f25668230197ed13229a80d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `scopes` package."""
import os
print(os.getenv('PYTHONPATH'))
import pytest
from click.testing import CliRunner
from scopes.tasks import tasks, bolt, spout, builder
from scopes.graph import G, build, topological_sort, traverse
from scopes import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'scopes.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
# t1---
# | |
# v v
# t2 t3
# \ / t4
# v |
# t5<----/
@pytest.fixture
def example():
tasks.clear()
G.clear()
@spout({'x': None})
def t1():
yield {'x': 'east'}
yield {'x': 'west'}
@bolt({'y': None}, lambda d: 'x' in d)
def t2(dep):
return {'y': 1, **dep}
@bolt({'z': None}, lambda d: d == {'x': None})
def t3(dep):
return {'z': 1, **dep}
@spout({'c': None})
def t4():
yield {'c': 4, 'x': 'east'}
yield {'c': 5, 'x': 'west'}
@builder({'a': 2}, lambda _: True, 'x')
def t5(obj, dep):
obj.update(dep)
def test_task_decorator(example):
assert len(tasks) == 5
assert callable(tasks[0].func)
assert tasks[0].obj == {'x': None}
def test_task_dag(example):
build(tasks)
assert len(G) == 5
assert len(G.edges) == 6
def test_task_traversal(example):
build(tasks)
nodes = topological_sort()
results = traverse(nodes)
assert results == {
't1': [{'x': 'east'}, {'x': 'west'}],
't2': [{'x': 'east', 'y': 1}, {'x': 'west', 'y': 1}],
't3': [{'x': 'east', 'z': 1}, {'x': 'west', 'z': 1}],
't4': [{'x': 'east', 'c': 4}, {'x': 'west', 'c': 5}],
't5': [
{'a': 2, 'x': 'east', 'y': 1, 'z': 1, 'c': 4},
{'a': 2, 'x': 'west', 'y': 1, 'z': 1, 'c': 5}
]
}
| [((8, 6, 8, 29), 'os.getenv', 'os.getenv', ({(8, 16, 8, 28): '"""PYTHONPATH"""'}, {}), "('PYTHONPATH')", False, 'import os\n'), ((37, 13, 37, 24), 'click.testing.CliRunner', 'CliRunner', ({}, {}), '()', False, 'from click.testing import CliRunner\n'), ((57, 4, 57, 17), 'scopes.tasks.tasks.clear', 'tasks.clear', ({}, {}), '()', False, 'from scopes.tasks import tasks, bolt, spout, builder\n'), ((58, 4, 58, 13), 'scopes.graph.G.clear', 'G.clear', ({}, {}), '()', False, 'from scopes.graph import G, build, topological_sort, traverse\n'), ((60, 5, 60, 23), 'scopes.tasks.spout', 'spout', ({(60, 11, 60, 22): "{'x': None}"}, {}), "({'x': None})", False, 'from scopes.tasks import tasks, bolt, spout, builder\n'), ((66, 5, 66, 42), 'scopes.tasks.bolt', 'bolt', ({(66, 10, 66, 21): "{'y': None}", (66, 23, 66, 41): "(lambda d: 'x' in d)"}, {}), "({'y': None}, lambda d: 'x' in d)", False, 'from scopes.tasks import tasks, bolt, spout, builder\n'), ((71, 5, 71, 50), 'scopes.tasks.bolt', 'bolt', ({(71, 10, 71, 21): "{'z': None}", (71, 23, 71, 49): "(lambda d: d == {'x': None})"}, {}), "({'z': None}, lambda d: d == {'x': None})", False, 'from scopes.tasks import tasks, bolt, spout, builder\n'), ((76, 5, 76, 23), 'scopes.tasks.spout', 'spout', ({(76, 11, 76, 22): "{'c': None}"}, {}), "({'c': None})", False, 'from scopes.tasks import tasks, bolt, spout, builder\n'), ((82, 5, 82, 43), 'scopes.tasks.builder', 'builder', ({(82, 13, 82, 21): "{'a': 2}", (82, 23, 82, 37): '(lambda _: True)', (82, 39, 82, 42): '"""x"""'}, {}), "({'a': 2}, lambda _: True, 'x')", False, 'from scopes.tasks import tasks, bolt, spout, builder\n'), ((94, 4, 94, 16), 'scopes.graph.build', 'build', ({(94, 10, 94, 15): 'tasks'}, {}), '(tasks)', False, 'from scopes.graph import G, build, topological_sort, traverse\n'), ((101, 4, 101, 16), 'scopes.graph.build', 'build', ({(101, 10, 101, 15): 'tasks'}, {}), '(tasks)', False, 'from scopes.graph import G, build, topological_sort, traverse\n'), ((102, 12, 102, 30), 'scopes.graph.topological_sort', 'topological_sort', ({}, {}), '()', False, 'from scopes.graph import G, build, topological_sort, traverse\n'), ((103, 14, 103, 29), 'scopes.graph.traverse', 'traverse', ({(103, 23, 103, 28): 'nodes'}, {}), '(nodes)', False, 'from scopes.graph import G, build, topological_sort, traverse\n')] |
tornadoyi/timeparse | timeparse/LunarSolarConverter/__init__.py | 1e44dbc6acdb07d6c023806d55034642c7ec0de9 | # -*- coding: utf-8 -*-
__author__ = 'isee15'
import LunarSolarConverter
converter = LunarSolarConverter.LunarSolarConverter()
def LunarToSolar(year, month, day, isleap = False):
lunar = LunarSolarConverter.Lunar(year, month, day, isleap)
solar = converter.LunarToSolar(lunar)
return (solar.solarYear, solar.solarMonth, solar.solarDay)
def SolarToLunar(year, month, day):
solar = LunarSolarConverter.Solar(year, month, day)
lunar = converter.SolarToLunar(solar)
return (lunar.lunarYear, lunar.lunarMonth, lunar.lunarDay)
def LunarMonthDays(year, month, isleap = False):
converter = LunarSolarConverter.LunarSolarConverter
days = converter.lunar_month_days[year - converter.lunar_month_days[0]]
leap = LunarSolarConverter.GetBitInt(days, 4, 13)
offset = 0
loopend = leap
if not isleap:
if month <= leap or leap == 0:
loopend = month - 1
else:
loopend = month
days = LunarSolarConverter.GetBitInt(days, 1, 12 - loopend) == 1 and 30 or 29
return days
| [((6, 12, 6, 53), 'LunarSolarConverter.LunarSolarConverter', 'LunarSolarConverter.LunarSolarConverter', ({}, {}), '()', False, 'import LunarSolarConverter\n'), ((9, 12, 9, 63), 'LunarSolarConverter.Lunar', 'LunarSolarConverter.Lunar', ({(9, 38, 9, 42): 'year', (9, 44, 9, 49): 'month', (9, 51, 9, 54): 'day', (9, 56, 9, 62): 'isleap'}, {}), '(year, month, day, isleap)', False, 'import LunarSolarConverter\n'), ((15, 12, 15, 55), 'LunarSolarConverter.Solar', 'LunarSolarConverter.Solar', ({(15, 38, 15, 42): 'year', (15, 44, 15, 49): 'month', (15, 51, 15, 54): 'day'}, {}), '(year, month, day)', False, 'import LunarSolarConverter\n'), ((25, 11, 25, 53), 'LunarSolarConverter.GetBitInt', 'LunarSolarConverter.GetBitInt', ({(25, 41, 25, 45): 'days', (25, 47, 25, 48): '4', (25, 50, 25, 52): '13'}, {}), '(days, 4, 13)', False, 'import LunarSolarConverter\n'), ((35, 11, 35, 63), 'LunarSolarConverter.GetBitInt', 'LunarSolarConverter.GetBitInt', ({(35, 41, 35, 45): 'days', (35, 47, 35, 48): '(1)', (35, 50, 35, 62): '(12 - loopend)'}, {}), '(days, 1, 12 - loopend)', False, 'import LunarSolarConverter\n')] |
ArnovanHilten/NVFlare | examples/hello-pt/custom/cifar10validator.py | bb45e7d606849c6bc8f7542347459c6ba1be00c4 | # Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, ToTensor, Normalize
from nvflare.apis.dxo import from_shareable, DataKind, DXO
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.app_constant import AppConstants
from simple_network import SimpleNetwork
class Cifar10Validator(Executor):
def __init__(self, validate_task_name=AppConstants.TASK_VALIDATION):
super(Cifar10Validator, self).__init__()
self._validate_task_name = validate_task_name
# Setup the model
self.model = SimpleNetwork()
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model.to(self.device)
# Preparing the dataset for testing.
transforms = Compose([
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self.test_data = CIFAR10(root='~/data', train=False, transform=transforms)
self.test_loader = DataLoader(self.test_data, batch_size=4, shuffle=False)
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
if task_name == self._validate_task_name:
model_owner = "?"
try:
try:
dxo = from_shareable(shareable)
except:
self.log_error(fl_ctx, "Error in extracting dxo from shareable.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Ensure data_kind is weights.
if not dxo.data_kind == DataKind.WEIGHTS:
self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.")
return make_reply(ReturnCode.BAD_TASK_DATA)
# Extract weights and ensure they are tensor.
model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?")
weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()}
# Get validation accuracy
val_accuracy = self.do_validation(weights, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"Accuracy when validating {model_owner}'s model on"
f" {fl_ctx.get_identity_name()}"f's data: {val_accuracy}')
dxo = DXO(data_kind=DataKind.METRICS, data={'val_acc': val_accuracy})
return dxo.to_shareable()
except:
self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
else:
return make_reply(ReturnCode.TASK_UNKNOWN)
def do_validation(self, weights, abort_signal):
self.model.load_state_dict(weights)
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for i, (images, labels) in enumerate(self.test_loader):
if abort_signal.triggered:
return 0
images, labels = images.to(self.device), labels.to(self.device)
output = self.model(images)
_, pred_label = torch.max(output, 1)
correct += (pred_label == labels).sum().item()
total += images.size()[0]
metric = correct/float(total)
return metric
| [((38, 21, 38, 36), 'simple_network.SimpleNetwork', 'SimpleNetwork', ({}, {}), '()', False, 'from simple_network import SimpleNetwork\n'), ((47, 25, 47, 82), 'torchvision.datasets.CIFAR10', 'CIFAR10', (), '', False, 'from torchvision.datasets import CIFAR10\n'), ((48, 27, 48, 82), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((39, 46, 39, 71), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((39, 22, 39, 42), 'torch.device', 'torch.device', ({(39, 35, 39, 41): '"""cuda"""'}, {}), "('cuda')", False, 'import torch\n'), ((39, 77, 39, 96), 'torch.device', 'torch.device', ({(39, 90, 39, 95): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n'), ((83, 19, 83, 54), 'nvflare.apis.shareable.make_reply', 'make_reply', ({(83, 30, 83, 53): 'ReturnCode.TASK_UNKNOWN'}, {}), '(ReturnCode.TASK_UNKNOWN)', False, 'from nvflare.apis.shareable import Shareable, make_reply\n'), ((92, 13, 92, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((44, 12, 44, 22), 'torchvision.transforms.ToTensor', 'ToTensor', ({}, {}), '()', False, 'from torchvision.transforms import Compose, ToTensor, Normalize\n'), ((45, 12, 45, 55), 'torchvision.transforms.Normalize', 'Normalize', ({(45, 22, 45, 37): '(0.5, 0.5, 0.5)', (45, 39, 45, 54): '(0.5, 0.5, 0.5)'}, {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))', False, 'from torchvision.transforms import Compose, ToTensor, Normalize\n'), ((77, 22, 77, 85), 'nvflare.apis.dxo.DXO', 'DXO', (), '', False, 'from nvflare.apis.dxo import from_shareable, DataKind, DXO\n'), ((100, 32, 100, 52), 'torch.max', 'torch.max', ({(100, 42, 100, 48): 'output', (100, 50, 100, 51): '1'}, {}), '(output, 1)', False, 'import torch\n'), ((55, 26, 55, 51), 'nvflare.apis.dxo.from_shareable', 'from_shareable', ({(55, 41, 55, 50): 'shareable'}, {}), '(shareable)', False, 'from nvflare.apis.dxo import from_shareable, DataKind, DXO\n'), ((63, 27, 63, 63), 'nvflare.apis.shareable.make_reply', 'make_reply', ({(63, 38, 63, 62): 'ReturnCode.BAD_TASK_DATA'}, {}), '(ReturnCode.BAD_TASK_DATA)', False, 'from nvflare.apis.shareable import Shareable, make_reply\n'), ((67, 30, 67, 68), 'torch.as_tensor', 'torch.as_tensor', (), '', False, 'import torch\n'), ((72, 27, 72, 62), 'nvflare.apis.shareable.make_reply', 'make_reply', ({(72, 38, 72, 61): 'ReturnCode.TASK_ABORTED'}, {}), '(ReturnCode.TASK_ABORTED)', False, 'from nvflare.apis.shareable import Shareable, make_reply\n'), ((81, 23, 81, 65), 'nvflare.apis.shareable.make_reply', 'make_reply', ({(81, 34, 81, 64): 'ReturnCode.EXECUTION_EXCEPTION'}, {}), '(ReturnCode.EXECUTION_EXCEPTION)', False, 'from nvflare.apis.shareable import Shareable, make_reply\n'), ((58, 27, 58, 63), 'nvflare.apis.shareable.make_reply', 'make_reply', ({(58, 38, 58, 62): 'ReturnCode.BAD_TASK_DATA'}, {}), '(ReturnCode.BAD_TASK_DATA)', False, 'from nvflare.apis.shareable import Shareable, make_reply\n')] |
wrharding/aws-infra | lambda/enable-traffic-mirroring.py | 5e913f8342b3a3b3a4599648c4a914f828b5bc18 | # MIT License
# Copyright (c) 2020-2021 Chris Farris (https://www.chrisfarris.com)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import boto3
from botocore.exceptions import ClientError
import json
import os
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
TAG_KEY=os.getenv('TAG_KEY', default='WireShark')
def handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
ec2_client = boto3.client('ec2')
mirror_sessions = ec2_client.describe_traffic_mirror_sessions()['TrafficMirrorSessions']
enabled_enis = []
max_session_id = 0
for s in mirror_sessions:
enabled_enis.append(s['NetworkInterfaceId'])
if s['SessionNumber'] > max_session_id:
max_session_id = s['SessionNumber']
response = ec2_client.describe_instances(
Filters=[
{'Name': 'instance-state-name', 'Values': ['running']},
],
MaxResults=1000 # I should never need to paginate.
)
for r in response['Reservations']:
for i in r['Instances']:
if not i['InstanceType'].startswith("t3"):
logger.debug(f"Instance {i['InstanceId']} is not a t3 and does not support Traffic Mirroring")
continue
for tag in i['Tags']:
if tag['Key'] == TAG_KEY:
# See if a mirror session is setup
for eni in i['NetworkInterfaces']:
if eni['NetworkInterfaceId'] not in enabled_enis:
logger.info(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} needs Mirroring Enabled")
max_session_id += 1
enable_traffic_mirroring(ec2_client, eni['NetworkInterfaceId'], i['InstanceId'], max_session_id)
else:
logger.debug(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} is already Enabled")
def enable_traffic_mirroring(ec2_client, eni, instance_id, session_id):
response = ec2_client.create_traffic_mirror_session(
NetworkInterfaceId=eni,
TrafficMirrorTargetId=os.environ['TARGET_ID'],
TrafficMirrorFilterId=os.environ['FILTER_ID'],
SessionNumber=session_id,
Description=f"Enabled by Lambda for {instance_id}"
)
## END OF FUNCTION ##
if __name__ == '__main__':
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
ch = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
# create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
try:
handler(None, None)
except KeyboardInterrupt:
exit(1)
| [((29, 9, 29, 28), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((35, 8, 35, 49), 'os.getenv', 'os.getenv', (), '', False, 'import os\n'), ((40, 17, 40, 36), 'boto3.client', 'boto3.client', ({(40, 30, 40, 35): '"""ec2"""'}, {}), "('ec2')", False, 'import boto3\n'), ((92, 9, 92, 32), 'logging.StreamHandler', 'logging.StreamHandler', ({}, {}), '()', False, 'import logging\n'), ((98, 16, 98, 64), 'logging.Formatter', 'logging.Formatter', ({(98, 34, 98, 63): '"""%(levelname)s - %(message)s"""'}, {}), "('%(levelname)s - %(message)s')", False, 'import logging\n'), ((30, 33, 30, 71), 'os.getenv', 'os.getenv', (), '', False, 'import os\n'), ((31, 0, 31, 29), 'logging.getLogger', 'logging.getLogger', ({(31, 18, 31, 28): '"""botocore"""'}, {}), "('botocore')", False, 'import logging\n'), ((32, 0, 32, 26), 'logging.getLogger', 'logging.getLogger', ({(32, 18, 32, 25): '"""boto3"""'}, {}), "('boto3')", False, 'import logging\n'), ((33, 0, 33, 28), 'logging.getLogger', 'logging.getLogger', ({(33, 18, 33, 27): '"""urllib3"""'}, {}), "('urllib3')", False, 'import logging\n'), ((38, 38, 38, 71), 'json.dumps', 'json.dumps', (), '', False, 'import json\n')] |
wu6u3/async_trpo | src/value_function.py | b6e3dd56775464b58f7433773e8b04d88cf3fdbc | """
State-Value Function
Written by Patrick Coady (pat-coady.github.io)
Modified by Tin-Yin Lai (wu6u3) into asynchronous version
"""
import tensorflow as tf
import numpy as np
from sklearn.utils import shuffle
#import os
class NNValueFunction(object):
""" NN-based state-value function """
def __init__(self, obs_dim, hid1_mult, thread_idx, shared_nn):
"""
Args:
obs_dim: number of dimensions in observation vector (int)
hid1_mult: size of first hidden layer, multiplier of obs_dim
"""
self.replay_buffer_x = None
self.replay_buffer_y = None
self.obs_dim = obs_dim
self.hid1_mult = hid1_mult
self.epochs = 10
self.lr = None # learning rate set in _build_graph()
self._thread_idx=thread_idx # -1 for global
self._scope_name = "nn_net_"+str(self._thread_idx)
self._build_graph()
#self.sess = tf.Session(graph=self.g)
#self.sess.run(self.init)
var_refs = [v._ref() for v in self.get_vars()]
self.gradients = tf.gradients(
self.loss, var_refs,
gate_gradients=False,
aggregation_method=None,
colocate_gradients_with_ops=False)
self.apply_gradients=None
self.sync = self.sync_from(shared_nn)
#self. global_fit = self.fit_for_global(x=None, y=None, logger=None)
def _build_graph(self):
""" Construct TensorFlow graph, including loss function, init op and train op """
with tf.variable_scope(self._scope_name) as scope:
self.obs_ph = tf.placeholder(tf.float32, (None, self.obs_dim), 'obs_valfunc')
self.val_ph = tf.placeholder(tf.float32, (None,), 'val_valfunc')
# hid1 layer size is 10x obs_dim, hid3 size is 10, and hid2 is geometric mean
hid1_size = self.obs_dim * self.hid1_mult # default multipler 10 chosen empirically on 'Hopper-v1'
hid3_size = 5 # 5 chosen empirically on 'Hopper-v1'
hid2_size = int(np.sqrt(hid1_size * hid3_size))
# heuristic to set learning rate based on NN size (tuned on 'Hopper-v1')
self.lr = 1e-2 / np.sqrt(hid2_size) # 1e-3 empirically determined
print('Value Params -- h1: {}, h2: {}, h3: {}, lr: {:.3g}'
.format(hid1_size, hid2_size, hid3_size, self.lr))
# 3 hidden layers with tanh activations
out = tf.layers.dense(self.obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / self.obs_dim)), name="h1")
out = tf.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid1_size)), name="h2")
out = tf.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid2_size)), name="h3")
out = tf.layers.dense(out, 1,
kernel_initializer=tf.random_normal_initializer(
stddev=np.sqrt(1 / hid3_size)), name='output')
self.out = tf.squeeze(out)
self.loss = tf.reduce_mean(tf.square(self.out - self.val_ph)) # squared loss
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.minimize(self.loss)
#self.init = tf.global_variables_initializer()
self.h1_w, self.h1_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h1')
self.h2_w, self.h2_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h2')
self.h3_w, self.h3_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h3')
self.output_w, self.output_b =tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/output')
scope.reuse_variables()
#self.sess = tf.Session(graph=self.g)
#self.sess.run(self.init)
def fit_for_global(self, x, y, logger):
""" Fit model to current data batch + previous data batch
Args:
x: features
y: target
logger: logger to save training loss and % explained variance
"""
num_batches = max(x.shape[0] // 256, 1)
batch_size = x.shape[0] // num_batches
y_hat = self.predict(sess, x) # check explained variance prior to update
old_exp_var = 1 - np.var(y - y_hat)/np.var(y)
if self.replay_buffer_x is None:
x_train, y_train = x, y
else:
x_train = np.concatenate([x, self.replay_buffer_x])
y_train = np.concatenate([y, self.replay_buffer_y])
self.replay_buffer_x = x
self.replay_buffer_y = y
for e in range(self.epochs):
x_train, y_train = shuffle(x_train, y_train)
for j in range(num_batches):
start = j * batch_size
end = (j + 1) * batch_size
feed_dict = {self.obs_ph: x_train[start:end, :],
self.val_ph: y_train[start:end]}
_, l = sess.run([self.train_op, self.loss], feed_dict=feed_dict)
y_hat = self.predict(sess, x)
loss = np.mean(np.square(y_hat - y)) # explained variance after update
exp_var = 1 - np.var(y - y_hat) / np.var(y) # diagnose over-fitting of val func
logger.log({'ValFuncLoss': loss,
'ExplainedVarNew': exp_var,
'ExplainedVarOld': old_exp_var})
def fit(self, sess, x, y, logger):
""" Fit model to current data batch + previous data batch
Args:
x: features
y: target
logger: logger to save training loss and % explained variance
"""
num_batches = max(x.shape[0] // 256, 1)
batch_size = x.shape[0] // num_batches
y_hat = self.predict(sess, x) # check explained variance prior to update
old_exp_var = 1 - np.var(y - y_hat)/np.var(y)
if self.replay_buffer_x is None:
x_train, y_train = x, y
else:
x_train = np.concatenate([x, self.replay_buffer_x])
y_train = np.concatenate([y, self.replay_buffer_y])
self.replay_buffer_x = x
self.replay_buffer_y = y
for e in range(self.epochs):
x_train, y_train = shuffle(x_train, y_train)
for j in range(num_batches):
start = j * batch_size
end = (j + 1) * batch_size
feed_dict = {self.obs_ph: x_train[start:end, :],
self.val_ph: y_train[start:end]}
_, l = sess.run([self.train_op, self.loss], feed_dict=feed_dict)
y_hat = self.predict(sess, x)
loss = np.mean(np.square(y_hat - y)) # explained variance after update
exp_var = 1 - np.var(y - y_hat) / np.var(y) # diagnose over-fitting of val func
logger.log({'ValFuncLoss': loss,
'ExplainedVarNew': exp_var,
'ExplainedVarOld': old_exp_var})
def predict(self, sess, x):
""" Predict method """
feed_dict = {self.obs_ph: x}
y_hat = sess.run(self.out, feed_dict=feed_dict)
return np.squeeze(y_hat)
#def close_sess(self):
# """ Close TensorFlow session """
# sess.close()
def get_vars(self):
return [self.h1_w, self.h1_b,
self.h2_w, self.h2_b,
self.h3_w, self.h3_b,
self.output_w, self.output_b ]
# weights = []
#name = []
#for tensor in self.g.as_graph_def().node:
# name.append(tensor.name)
#print(name)
#with self.g.as_default() as g:
# with tf.variable_scope(self._scope_name) as scope:
# weights.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope))
# weights.append(g.get_tensor_by_name('h1/kernel:0'))
# weights.append(g.get_tensor_by_name('h1/bias:0'))
# weights.append(g.get_tensor_by_name('h2/kernel:0'))
# weights.append(g.get_tensor_by_name('h2/bias:0'))
# weights.append(g.get_tensor_by_name('h3/kernel:0'))
# weights.append(g.get_tensor_by_name('h3/bias:0'))
# return weights
def sync_from(self, shared_nn, name=None):
if shared_nn != None:
src_vars = shared_nn.get_vars()
dst_vars = self.get_vars()
sync_ops = []
with tf.name_scope(name, self._scope_name, []) as name:
for(src_var, dst_var) in zip(src_vars, dst_vars):
sync_op = tf.assign(dst_var, src_var)
sync_ops.append(sync_op)
return tf.group(*sync_ops, name=name)
else:
return None
| [((35, 25, 39, 46), 'tensorflow.gradients', 'tf.gradients', (), '', True, 'import tensorflow as tf\n'), ((163, 15, 163, 32), 'numpy.squeeze', 'np.squeeze', ({(163, 26, 163, 31): 'y_hat'}, {}), '(y_hat)', True, 'import numpy as np\n'), ((47, 13, 47, 48), 'tensorflow.variable_scope', 'tf.variable_scope', ({(47, 31, 47, 47): 'self._scope_name'}, {}), '(self._scope_name)', True, 'import tensorflow as tf\n'), ((48, 26, 48, 89), 'tensorflow.placeholder', 'tf.placeholder', ({(48, 41, 48, 51): 'tf.float32', (48, 53, 48, 73): '(None, self.obs_dim)', (48, 75, 48, 88): '"""obs_valfunc"""'}, {}), "(tf.float32, (None, self.obs_dim), 'obs_valfunc')", True, 'import tensorflow as tf\n'), ((49, 26, 49, 76), 'tensorflow.placeholder', 'tf.placeholder', ({(49, 41, 49, 51): 'tf.float32', (49, 53, 49, 60): '(None,)', (49, 62, 49, 75): '"""val_valfunc"""'}, {}), "(tf.float32, (None,), 'val_valfunc')", True, 'import tensorflow as tf\n'), ((72, 23, 72, 38), 'tensorflow.squeeze', 'tf.squeeze', ({(72, 34, 72, 37): 'out'}, {}), '(out)', True, 'import tensorflow as tf\n'), ((74, 24, 74, 55), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ({(74, 47, 74, 54): 'self.lr'}, {}), '(self.lr)', True, 'import tensorflow as tf\n'), ((78, 35, 78, 110), 'tensorflow.get_collection', 'tf.get_collection', ({(78, 53, 78, 85): 'tf.GraphKeys.TRAINABLE_VARIABLES', (78, 87, 78, 109): "self._scope_name + '/h1'"}, {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name + '/h1')", True, 'import tensorflow as tf\n'), ((79, 35, 79, 110), 'tensorflow.get_collection', 'tf.get_collection', ({(79, 53, 79, 85): 'tf.GraphKeys.TRAINABLE_VARIABLES', (79, 87, 79, 109): "self._scope_name + '/h2'"}, {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name + '/h2')", True, 'import tensorflow as tf\n'), ((80, 35, 80, 110), 'tensorflow.get_collection', 'tf.get_collection', ({(80, 53, 80, 85): 'tf.GraphKeys.TRAINABLE_VARIABLES', (80, 87, 80, 109): "self._scope_name + '/h3'"}, {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name + '/h3')", True, 'import tensorflow as tf\n'), ((81, 42, 81, 121), 'tensorflow.get_collection', 'tf.get_collection', ({(81, 60, 81, 92): 'tf.GraphKeys.TRAINABLE_VARIABLES', (81, 94, 81, 120): "self._scope_name + '/output'"}, {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name +\n '/output')", True, 'import tensorflow as tf\n'), ((103, 22, 103, 63), 'numpy.concatenate', 'np.concatenate', ({(103, 37, 103, 62): '[x, self.replay_buffer_x]'}, {}), '([x, self.replay_buffer_x])', True, 'import numpy as np\n'), ((104, 22, 104, 63), 'numpy.concatenate', 'np.concatenate', ({(104, 37, 104, 62): '[y, self.replay_buffer_y]'}, {}), '([y, self.replay_buffer_y])', True, 'import numpy as np\n'), ((108, 31, 108, 56), 'sklearn.utils.shuffle', 'shuffle', ({(108, 39, 108, 46): 'x_train', (108, 48, 108, 55): 'y_train'}, {}), '(x_train, y_train)', False, 'from sklearn.utils import shuffle\n'), ((116, 23, 116, 43), 'numpy.square', 'np.square', ({(116, 33, 116, 42): 'y_hat - y'}, {}), '(y_hat - y)', True, 'import numpy as np\n'), ((138, 22, 138, 63), 'numpy.concatenate', 'np.concatenate', ({(138, 37, 138, 62): '[x, self.replay_buffer_x]'}, {}), '([x, self.replay_buffer_x])', True, 'import numpy as np\n'), ((139, 22, 139, 63), 'numpy.concatenate', 'np.concatenate', ({(139, 37, 139, 62): '[y, self.replay_buffer_y]'}, {}), '([y, self.replay_buffer_y])', True, 'import numpy as np\n'), ((143, 31, 143, 56), 'sklearn.utils.shuffle', 'shuffle', ({(143, 39, 143, 46): 'x_train', (143, 48, 143, 55): 'y_train'}, {}), '(x_train, y_train)', False, 'from sklearn.utils import shuffle\n'), ((151, 23, 151, 43), 'numpy.square', 'np.square', ({(151, 33, 151, 42): 'y_hat - y'}, {}), '(y_hat - y)', True, 'import numpy as np\n'), ((208, 19, 208, 49), 'tensorflow.group', 'tf.group', (), '', True, 'import tensorflow as tf\n'), ((53, 28, 53, 58), 'numpy.sqrt', 'np.sqrt', ({(53, 36, 53, 57): 'hid1_size * hid3_size'}, {}), '(hid1_size * hid3_size)', True, 'import numpy as np\n'), ((55, 29, 55, 47), 'numpy.sqrt', 'np.sqrt', ({(55, 37, 55, 46): 'hid2_size'}, {}), '(hid2_size)', True, 'import numpy as np\n'), ((73, 39, 73, 72), 'tensorflow.square', 'tf.square', ({(73, 49, 73, 71): 'self.out - self.val_ph'}, {}), '(self.out - self.val_ph)', True, 'import tensorflow as tf\n'), ((99, 26, 99, 43), 'numpy.var', 'np.var', ({(99, 33, 99, 42): '(y - y_hat)'}, {}), '(y - y_hat)', True, 'import numpy as np\n'), ((99, 44, 99, 53), 'numpy.var', 'np.var', ({(99, 51, 99, 52): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((117, 22, 117, 39), 'numpy.var', 'np.var', ({(117, 29, 117, 38): '(y - y_hat)'}, {}), '(y - y_hat)', True, 'import numpy as np\n'), ((117, 42, 117, 51), 'numpy.var', 'np.var', ({(117, 49, 117, 50): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((134, 26, 134, 43), 'numpy.var', 'np.var', ({(134, 33, 134, 42): '(y - y_hat)'}, {}), '(y - y_hat)', True, 'import numpy as np\n'), ((134, 44, 134, 53), 'numpy.var', 'np.var', ({(134, 51, 134, 52): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((152, 22, 152, 39), 'numpy.var', 'np.var', ({(152, 29, 152, 38): '(y - y_hat)'}, {}), '(y - y_hat)', True, 'import numpy as np\n'), ((152, 42, 152, 51), 'numpy.var', 'np.var', ({(152, 49, 152, 50): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((201, 17, 201, 58), 'tensorflow.name_scope', 'tf.name_scope', ({(201, 31, 201, 35): 'name', (201, 37, 201, 53): 'self._scope_name', (201, 55, 201, 57): '[]'}, {}), '(name, self._scope_name, [])', True, 'import tensorflow as tf\n'), ((205, 30, 205, 57), 'tensorflow.assign', 'tf.assign', ({(205, 40, 205, 47): 'dst_var', (205, 49, 205, 56): 'src_var'}, {}), '(dst_var, src_var)', True, 'import tensorflow as tf\n'), ((61, 45, 61, 70), 'numpy.sqrt', 'np.sqrt', ({(61, 53, 61, 69): '1 / self.obs_dim'}, {}), '(1 / self.obs_dim)', True, 'import numpy as np\n'), ((65, 45, 65, 67), 'numpy.sqrt', 'np.sqrt', ({(65, 53, 65, 66): '1 / hid1_size'}, {}), '(1 / hid1_size)', True, 'import numpy as np\n'), ((68, 45, 68, 67), 'numpy.sqrt', 'np.sqrt', ({(68, 53, 68, 66): '1 / hid2_size'}, {}), '(1 / hid2_size)', True, 'import numpy as np\n'), ((71, 45, 71, 67), 'numpy.sqrt', 'np.sqrt', ({(71, 53, 71, 66): '1 / hid3_size'}, {}), '(1 / hid3_size)', True, 'import numpy as np\n')] |
bkidwell/mdepub | mdepub/actions/__init__.py | af9e7d2065fb8251b6767e827ac2cff059ce7668 | """mdepub actions -- these modules do the actual work."""
import archive
import clean
import create
import epub
import extract
import html
import newid
import version
| [] |
bethlakshmi/gbe-divio-djangocms-python2.7 | gbe/views/make_bid_view.py | 6e9b2c894162524bbbaaf73dcbe927988707231d | from django.views.generic import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import (
get_object_or_404,
render,
)
from gbe.models import (
Conference,
UserMessage,
)
from gbe_logging import log_func
from gbe.functions import (
validate_profile,
)
from gbe.email.functions import notify_reviewers_on_bid_change
from gbetext import (
no_login_msg,
fee_instructions,
full_login_msg,
payment_needed_msg,
payment_details_error,
)
from gbe_utils.text import no_profile_msg
from gbe.ticketing_idd_interface import (
get_payment_details,
get_ticket_form,
fee_paid,
)
class MakeBidView(View):
form = None
has_draft = True
instructions = ''
payment_form = None
coordinated = False
def groundwork(self, request, args, kwargs):
self.owner = validate_profile(request, require=False)
if not self.owner or not self.owner.complete:
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PROFILE_INCOMPLETE",
defaults={
'summary': "Profile Incomplete",
'description': no_profile_msg})
messages.warning(request, user_message[0].description)
return '%s?next=%s' % (
reverse('profile_update', urlconf='gbe.urls'),
reverse('%s_create' % self.bid_type.lower(),
urlconf='gbe.urls'))
self.bid_object = None
if "bid_id" in kwargs:
bid_id = kwargs.get("bid_id")
self.bid_object = get_object_or_404(self.bid_class, pk=bid_id)
self.conference = self.bid_object.b_conference
else:
self.conference = Conference.objects.filter(
accepting_bids=True).first()
def make_post_forms(self, request, the_form):
if self.bid_object:
self.form = the_form(
request.POST,
instance=self.bid_object,
initial=self.get_initial(),
prefix=self.prefix)
else:
self.form = the_form(
request.POST,
initial=self.get_initial(),
prefix=self.prefix)
self.set_up_form()
def set_up_post(self, request):
the_form = None
if 'submit' in list(request.POST.keys()) or not self.has_draft:
the_form = self.submit_form
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="SUBMIT_SUCCESS",
defaults={
'summary': "%s Submit Success" % self.bid_type,
'description': self.submit_msg})
else:
the_form = self.draft_form
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="DRAFT_SUCCESS",
defaults={
'summary': "%s Save Draft Success" % self.bid_type,
'description': self.draft_msg})
self.make_post_forms(request, the_form)
return user_message
def make_context(self, request):
paid = fee_paid(
self.bid_type,
self.owner.user_object.username,
self.conference)
instructions = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="BID_INSTRUCTIONS",
defaults={
'summary': "%s Bid Instructions" % self.bid_type,
'description': self.instructions})
context = {
'conference': self.conference,
'forms': [self.form],
'page_title': self.page_title,
'view_title': self.view_title,
'draft_fields': self.draft_fields,
'submit_fields': self.submit_fields,
'fee_paid': paid,
'view_header_text': instructions[0].description,
}
if not paid and not self.coordinated:
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="FEE_MESSAGE",
defaults={
'summary': "%s Pre-submit Message" % self.bid_type,
'description': fee_instructions})
messages.info(
request,
user_message[0].description)
if self.payment_form:
context['forms'] += [self.payment_form]
else:
context['forms'] += [get_ticket_form(self.bid_class.__name__,
self.conference)]
return context
def get_create_form(self, request):
if self.bid_object:
self.form = self.submit_form(
prefix=self.prefix,
instance=self.bid_object,
initial=self.get_initial())
else:
self.form = self.submit_form(
prefix=self.prefix,
initial=self.get_initial())
self.set_up_form()
return render(
request,
'gbe/bid.tmpl',
self.make_context(request)
)
def check_validity(self, request):
return self.form.is_valid()
def set_up_form(self):
pass
def get_invalid_response(self, request):
self.set_up_form()
context = self.make_context(request)
return render(
request,
'gbe/bid.tmpl',
context)
def submit_bid(self, request):
self.bid_object.submitted = True
self.bid_object.save()
notify_reviewers_on_bid_change(
self.owner,
self.bid_object,
self.bid_type,
"Submission",
self.conference,
'%s Reviewers' % self.bid_type,
reverse('%s_review' % self.bid_type.lower(),
urlconf='gbe.urls'))
@never_cache
@log_func
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
follow_on = '?next=%s' % reverse(
'%s_create' % self.bid_type.lower(),
urlconf='gbe.urls')
user_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="USER_NOT_LOGGED_IN",
defaults={
'summary': "Need Login - %s Bid",
'description': no_login_msg})
full_msg = full_login_msg % (
user_message[0].description,
reverse('login', urlconf='gbe.urls') + follow_on)
messages.warning(request, full_msg)
return HttpResponseRedirect(
reverse('register', urlconf='gbe.urls') + follow_on)
redirect = self.groundwork(request, args, kwargs)
if redirect:
return HttpResponseRedirect(redirect)
return self.get_create_form(request)
@never_cache
@log_func
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
cart_items = []
paypal_button = None
total = None
redirect = None
redirect = self.groundwork(request, args, kwargs)
if redirect:
return HttpResponseRedirect(redirect)
user_message = self.set_up_post(request)
# check bid validity
if not self.check_validity(request):
return self.get_invalid_response(request)
if not self.coordinated and not fee_paid(
self.bid_type,
self.owner.user_object.username,
self.conference) and "draft" not in list(request.POST.keys()):
self.payment_form = get_ticket_form(self.bid_class.__name__,
self.conference,
request.POST)
if not self.payment_form.is_valid():
error_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="PAYMENT_CHOICE_INVALID",
defaults={
'summary': "User Made Invalid Ticket Choice",
'description': payment_details_error})
messages.error(request, error_message[0].description)
return self.get_invalid_response(request)
# save bid
if not self.bid_object:
self.bid_object = self.form.save(commit=False)
self.set_valid_form(request)
# if this isn't a draft, move forward through process, setting up
# payment review if payment is needed
if "submit" in list(request.POST.keys()):
if self.payment_form:
cart_items, paypal_button, total = get_payment_details(
request,
self.payment_form,
self.bid_type,
self.bid_object.pk,
self.owner.user_object.pk)
dynamic_message = UserMessage.objects.get_or_create(
view=self.__class__.__name__,
code="NOT_PAID_INSTRUCTIONS",
defaults={
'summary': "%s Not Paid" % self.bid_type,
'description': payment_needed_msg})
page_title = '%s Payment' % self.bid_type
return render(
request,
'gbe/confirm_pay.tmpl',
{'dynamic_message': dynamic_message[0].description,
'page_title': page_title,
'cart_items': cart_items,
'total': total,
'paypal_button': paypal_button})
else:
redirect = self.submit_bid(request)
messages.success(request, user_message[0].description)
return HttpResponseRedirect(
redirect or reverse('home', urlconf='gbe.urls'))
def dispatch(self, *args, **kwargs):
return super(MakeBidView, self).dispatch(*args, **kwargs)
| [((215, 5, 215, 37), 'django.utils.decorators.method_decorator', 'method_decorator', ({(215, 22, 215, 36): 'login_required'}, {}), '(login_required)', False, 'from django.utils.decorators import method_decorator\n'), ((44, 21, 44, 61), 'gbe.functions.validate_profile', 'validate_profile', (), '', False, 'from gbe.functions import validate_profile\n'), ((103, 15, 106, 28), 'gbe.ticketing_idd_interface.fee_paid', 'fee_paid', ({(104, 12, 104, 25): 'self.bid_type', (105, 12, 105, 43): 'self.owner.user_object.username', (106, 12, 106, 27): 'self.conference'}, {}), '(self.bid_type, self.owner.user_object.username, self.conference)', False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((107, 23, 112, 50), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', (), '', False, 'from gbe.models import Conference, UserMessage\n'), ((168, 15, 171, 20), 'django.shortcuts.render', 'render', ({(169, 12, 169, 19): 'request', (170, 12, 170, 26): '"""gbe/bid.tmpl"""', (171, 12, 171, 19): 'context'}, {}), "(request, 'gbe/bid.tmpl', context)", False, 'from django.shortcuts import get_object_or_404, render\n'), ((283, 8, 283, 62), 'django.contrib.messages.success', 'messages.success', ({(283, 25, 283, 32): 'request', (283, 34, 283, 61): 'user_message[0].description'}, {}), '(request, user_message[0].description)', False, 'from django.contrib import messages\n'), ((46, 27, 51, 51), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', (), '', False, 'from gbe.models import Conference, UserMessage\n'), ((52, 12, 52, 66), 'django.contrib.messages.warning', 'messages.warning', ({(52, 29, 52, 36): 'request', (52, 38, 52, 65): 'user_message[0].description'}, {}), '(request, user_message[0].description)', False, 'from django.contrib import messages\n'), ((61, 30, 61, 74), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404, render\n'), ((85, 27, 90, 52), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', (), '', False, 'from gbe.models import Conference, UserMessage\n'), ((93, 27, 98, 51), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', (), '', False, 'from gbe.models import Conference, UserMessage\n'), ((124, 27, 129, 53), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', (), '', False, 'from gbe.models import Conference, UserMessage\n'), ((131, 12, 133, 44), 'django.contrib.messages.info', 'messages.info', ({(132, 16, 132, 23): 'request', (133, 16, 133, 43): 'user_message[0].description'}, {}), '(request, user_message[0].description)', False, 'from django.contrib import messages\n'), ((193, 27, 198, 49), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', (), '', False, 'from gbe.models import Conference, UserMessage\n'), ((202, 12, 202, 47), 'django.contrib.messages.warning', 'messages.warning', ({(202, 29, 202, 36): 'request', (202, 38, 202, 46): 'full_msg'}, {}), '(request, full_msg)', False, 'from django.contrib import messages\n'), ((209, 19, 209, 49), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', ({(209, 40, 209, 48): 'redirect'}, {}), '(redirect)', False, 'from django.http import HttpResponseRedirect\n'), ((223, 19, 223, 49), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', ({(223, 40, 223, 48): 'redirect'}, {}), '(redirect)', False, 'from django.http import HttpResponseRedirect\n'), ((235, 32, 237, 61), 'gbe.ticketing_idd_interface.get_ticket_form', 'get_ticket_form', ({(235, 48, 235, 71): 'self.bid_class.__name__', (236, 48, 236, 63): 'self.conference', (237, 48, 237, 60): 'request.POST'}, {}), '(self.bid_class.__name__, self.conference, request.POST)', False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((231, 40, 234, 32), 'gbe.ticketing_idd_interface.fee_paid', 'fee_paid', ({(232, 16, 232, 29): 'self.bid_type', (233, 16, 233, 47): 'self.owner.user_object.username', (234, 16, 234, 31): 'self.conference'}, {}), '(self.bid_type, self.owner.user_object.username, self.conference)', False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((239, 32, 244, 66), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', (), '', False, 'from gbe.models import Conference, UserMessage\n'), ((245, 16, 245, 69), 'django.contrib.messages.error', 'messages.error', ({(245, 31, 245, 38): 'request', (245, 40, 245, 68): 'error_message[0].description'}, {}), '(request, error_message[0].description)', False, 'from django.contrib import messages\n'), ((258, 51, 263, 46), 'gbe.ticketing_idd_interface.get_payment_details', 'get_payment_details', ({(259, 20, 259, 27): 'request', (260, 20, 260, 37): 'self.payment_form', (261, 20, 261, 33): 'self.bid_type', (262, 20, 262, 38): 'self.bid_object.pk', (263, 20, 263, 45): 'self.owner.user_object.pk'}, {}), '(request, self.payment_form, self.bid_type, self.\n bid_object.pk, self.owner.user_object.pk)', False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((265, 34, 270, 59), 'gbe.models.UserMessage.objects.get_or_create', 'UserMessage.objects.get_or_create', (), '', False, 'from gbe.models import Conference, UserMessage\n'), ((272, 23, 279, 53), 'django.shortcuts.render', 'render', ({(273, 20, 273, 27): 'request', (274, 20, 274, 42): '"""gbe/confirm_pay.tmpl"""', (275, 20, 279, 52): "{'dynamic_message': dynamic_message[0].description, 'page_title':\n page_title, 'cart_items': cart_items, 'total': total, 'paypal_button':\n paypal_button}"}, {}), "(request, 'gbe/confirm_pay.tmpl', {'dynamic_message': dynamic_message\n [0].description, 'page_title': page_title, 'cart_items': cart_items,\n 'total': total, 'paypal_button': paypal_button})", False, 'from django.shortcuts import get_object_or_404, render\n'), ((285, 24, 285, 59), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((54, 16, 54, 61), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((64, 30, 65, 40), 'gbe.models.Conference.objects.filter', 'Conference.objects.filter', (), '', False, 'from gbe.models import Conference, UserMessage\n'), ((137, 37, 138, 69), 'gbe.ticketing_idd_interface.get_ticket_form', 'get_ticket_form', ({(137, 53, 137, 76): 'self.bid_class.__name__', (138, 53, 138, 68): 'self.conference'}, {}), '(self.bid_class.__name__, self.conference)', False, 'from gbe.ticketing_idd_interface import get_payment_details, get_ticket_form, fee_paid\n'), ((205, 16, 205, 55), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n'), ((201, 16, 201, 52), 'django.urls.reverse', 'reverse', (), '', False, 'from django.urls import reverse\n')] |
KawashiroNitori/epicteller | epicteller/core/dao/character.py | 264b11e7e6eb58beb0f67ecbbb811d268a533f7a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from collections import defaultdict
from typing import List, Optional, Iterable, Dict
import base62
from sqlalchemy import select, and_
from sqlalchemy.dialects.mysql import insert as mysql_insert
from epicteller.core.model.character import Character
from epicteller.core.tables import table
from epicteller.core.util import ObjectDict
from epicteller.core.util.enum import ExternalType
from epicteller.core.util.seq import get_id
def _format_character(result) -> Optional[Character]:
if not result:
return
character = Character(
id=result.id,
url_token=result.url_token,
member_id=result.member_id,
name=result.name,
avatar=result.avatar,
description=result.description,
is_removed=bool(result.is_removed),
raw_data=result.data,
created=result.created,
updated=result.updated,
)
return character
class CharacterDAO:
t = table.character
select_clause = select([
t.c.id,
t.c.url_token,
t.c.name,
t.c.member_id,
t.c.avatar,
t.c.description,
t.c.is_removed,
t.c.data,
t.c.created,
t.c.updated,
])
@classmethod
async def batch_get_character_by_id(cls, character_ids: Iterable[int]) -> Dict[int, Character]:
query = cls.select_clause.where(cls.t.c.id.in_(character_ids))
result = await table.execute(query)
rows = await result.fetchall()
return {row.id: _format_character(row) for row in rows}
@classmethod
async def batch_get_character_by_url_token(cls, url_tokens: Iterable[str]) -> Dict[str, Character]:
query = cls.select_clause.where(cls.t.c.url_token.in_(url_tokens))
result = await table.execute(query)
rows = await result.fetchall()
return {row.url_token: _format_character(result) for row in rows}
@classmethod
async def get_characters_by_owner(cls, member_id: int) -> List[Character]:
query = cls.select_clause.where(cls.t.c.member_id == member_id)
results = await table.execute(query)
characters = [_format_character(room) for room in await results.fetchall()]
return characters
@classmethod
async def update_character(cls, character_id: int, **kwargs) -> None:
if 'updated' not in kwargs:
kwargs['updated'] = int(time.time())
query = cls.t.update().values(kwargs).where(cls.t.c.id == character_id)
await table.execute(query)
@classmethod
async def create_character(cls, member_id: int, name: str, avatar: str, description: str,
raw_data: dict) -> Character:
created = int(time.time())
url_token = base62.encode(get_id())
values = ObjectDict(
url_token=url_token,
member_id=member_id,
name=name,
avatar=avatar,
description=description,
is_removed=0,
data=raw_data,
created=created,
updated=created,
)
query = cls.t.insert().values(values)
result = await table.execute(query)
values.id = result.lastrowid
character = _format_character(values)
return character
class CharacterCampaignDAO:
t = table.character_campaign_index
@classmethod
async def get_character_id_by_campaign_name(cls, campaign_id: int, name: str) -> Optional[int]:
query = select([cls.t.c.character_id]).where(and_(cls.t.c.campaign_id == campaign_id,
cls.t.c.name == name))
result = await table.execute(query)
row = await result.fetchone()
if not row:
return
return int(row.character_id)
@classmethod
async def get_character_ids_by_campaign_id(cls, campaign_id: int) -> List[int]:
query = select([cls.t.c.character_id]).where(cls.t.c.campaign_id == campaign_id)
results = await table.execute(query)
character_ids = [int(row.character_id) for row in await results.fetchall()]
return character_ids
@classmethod
async def get_campaign_ids_by_character_ids(cls, character_ids: List[int]) -> Dict[int, List[int]]:
query = select([
cls.t.c.character_id,
cls.t.c.campaign_id,
]).where(cls.t.c.character_id.in_(character_ids))
results = await table.execute(query)
rows = await results.fetchall()
campaign_map = defaultdict(list)
for r in rows:
campaign_map[r.character_id].append(r.campaign_id)
return dict(campaign_map)
@classmethod
async def bind_character_to_campaign(cls, character_id: int, name: str, campaign_id: int):
query = mysql_insert(cls.t).values(
character_id=character_id,
name=name,
campaign_id=campaign_id,
).on_duplicate_key_update(
name=name,
)
await table.execute(query)
@classmethod
async def unbind_character_to_campaign(cls, character_id: int, campaign_id: int):
query = cls.t.delete().where(and_(cls.t.c.character_id == character_id, cls.t.c.campaign_id == campaign_id))
await table.execute(query)
class CharacterExternalDAO:
t = table.character_external_id
@classmethod
async def get_external_ids_by_character(cls, character_id: int) -> Dict[ExternalType, str]:
query = select([
cls.t.c.type,
cls.t.c.external_id,
]).where(cls.t.c.character_id == character_id)
result = await table.execute(query)
rows = await result.fetchall()
externals = {ExternalType(row.type): row.external_id for row in rows}
return externals
@classmethod
async def get_character_ids_by_external(cls, external_type: ExternalType, external_id: str) -> List[int]:
query = select([cls.t.c.character_id]).where(and_(cls.t.c.type == int(external_type),
cls.t.c.external_id == external_id))
result = await table.execute(query)
rows = await result.fetchall()
character_ids = [r.character_id for r in rows]
return character_ids
@classmethod
async def bind_character_external_id(cls, character_id: int, external_type: ExternalType, external_id: str):
query = mysql_insert(cls.t).values(
character_id=character_id,
type=int(external_type),
external_id=external_id,
).on_duplicate_key_update(
external_id=external_id,
)
await table.execute(query)
@classmethod
async def unbind_character_external_id(cls, character_id: int, external_type: ExternalType):
query = cls.t.delete().where(and_(cls.t.c.character_id == character_id, cls.t.c.type == int(external_type)))
await table.execute(query)
| [((39, 20, 50, 6), 'sqlalchemy.select', 'select', ({(39, 27, 50, 5): '[t.c.id, t.c.url_token, t.c.name, t.c.member_id, t.c.avatar, t.c.\n description, t.c.is_removed, t.c.data, t.c.created, t.c.updated]'}, {}), '([t.c.id, t.c.url_token, t.c.name, t.c.member_id, t.c.avatar, t.c.\n description, t.c.is_removed, t.c.data, t.c.created, t.c.updated])', False, 'from sqlalchemy import select, and_\n'), ((85, 17, 95, 9), 'epicteller.core.util.ObjectDict', 'ObjectDict', (), '', False, 'from epicteller.core.util import ObjectDict\n'), ((131, 23, 131, 40), 'collections.defaultdict', 'defaultdict', ({(131, 35, 131, 39): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((55, 23, 55, 43), 'epicteller.core.tables.table.execute', 'table.execute', ({(55, 37, 55, 42): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((62, 23, 62, 43), 'epicteller.core.tables.table.execute', 'table.execute', ({(62, 37, 62, 42): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((69, 24, 69, 44), 'epicteller.core.tables.table.execute', 'table.execute', ({(69, 38, 69, 43): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((78, 14, 78, 34), 'epicteller.core.tables.table.execute', 'table.execute', ({(78, 28, 78, 33): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((83, 22, 83, 33), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((84, 34, 84, 42), 'epicteller.core.util.seq.get_id', 'get_id', ({}, {}), '()', False, 'from epicteller.core.util.seq import get_id\n'), ((97, 23, 97, 43), 'epicteller.core.tables.table.execute', 'table.execute', ({(97, 37, 97, 42): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((108, 53, 109, 79), 'sqlalchemy.and_', 'and_', ({(108, 58, 108, 92): 'cls.t.c.campaign_id == campaign_id', (109, 58, 109, 78): 'cls.t.c.name == name'}, {}), '(cls.t.c.campaign_id == campaign_id, cls.t.c.name == name)', False, 'from sqlalchemy import select, and_\n'), ((110, 23, 110, 43), 'epicteller.core.tables.table.execute', 'table.execute', ({(110, 37, 110, 42): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((119, 24, 119, 44), 'epicteller.core.tables.table.execute', 'table.execute', ({(119, 38, 119, 43): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((129, 24, 129, 44), 'epicteller.core.tables.table.execute', 'table.execute', ({(129, 38, 129, 43): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((145, 14, 145, 34), 'epicteller.core.tables.table.execute', 'table.execute', ({(145, 28, 145, 33): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((149, 37, 149, 115), 'sqlalchemy.and_', 'and_', ({(149, 42, 149, 78): 'cls.t.c.character_id == character_id', (149, 80, 149, 114): 'cls.t.c.campaign_id == campaign_id'}, {}), '(cls.t.c.character_id == character_id, cls.t.c.campaign_id == campaign_id)', False, 'from sqlalchemy import select, and_\n'), ((150, 14, 150, 34), 'epicteller.core.tables.table.execute', 'table.execute', ({(150, 28, 150, 33): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((162, 23, 162, 43), 'epicteller.core.tables.table.execute', 'table.execute', ({(162, 37, 162, 42): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((164, 21, 164, 43), 'epicteller.core.util.enum.ExternalType', 'ExternalType', ({(164, 34, 164, 42): 'row.type'}, {}), '(row.type)', False, 'from epicteller.core.util.enum import ExternalType\n'), ((171, 23, 171, 43), 'epicteller.core.tables.table.execute', 'table.execute', ({(171, 37, 171, 42): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((185, 14, 185, 34), 'epicteller.core.tables.table.execute', 'table.execute', ({(185, 28, 185, 33): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((190, 14, 190, 34), 'epicteller.core.tables.table.execute', 'table.execute', ({(190, 28, 190, 33): 'query'}, {}), '(query)', False, 'from epicteller.core.tables import table\n'), ((76, 36, 76, 47), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((108, 16, 108, 46), 'sqlalchemy.select', 'select', ({(108, 23, 108, 45): '[cls.t.c.character_id]'}, {}), '([cls.t.c.character_id])', False, 'from sqlalchemy import select, and_\n'), ((118, 16, 118, 46), 'sqlalchemy.select', 'select', ({(118, 23, 118, 45): '[cls.t.c.character_id]'}, {}), '([cls.t.c.character_id])', False, 'from sqlalchemy import select, and_\n'), ((125, 16, 128, 10), 'sqlalchemy.select', 'select', ({(125, 23, 128, 9): '[cls.t.c.character_id, cls.t.c.campaign_id]'}, {}), '([cls.t.c.character_id, cls.t.c.campaign_id])', False, 'from sqlalchemy import select, and_\n'), ((158, 16, 161, 10), 'sqlalchemy.select', 'select', ({(158, 23, 161, 9): '[cls.t.c.type, cls.t.c.external_id]'}, {}), '([cls.t.c.type, cls.t.c.external_id])', False, 'from sqlalchemy import select, and_\n'), ((169, 16, 169, 46), 'sqlalchemy.select', 'select', ({(169, 23, 169, 45): '[cls.t.c.character_id]'}, {}), '([cls.t.c.character_id])', False, 'from sqlalchemy import select, and_\n'), ((138, 16, 138, 35), 'sqlalchemy.dialects.mysql.insert', 'mysql_insert', ({(138, 29, 138, 34): 'cls.t'}, {}), '(cls.t)', True, 'from sqlalchemy.dialects.mysql import insert as mysql_insert\n'), ((178, 16, 178, 35), 'sqlalchemy.dialects.mysql.insert', 'mysql_insert', ({(178, 29, 178, 34): 'cls.t'}, {}), '(cls.t)', True, 'from sqlalchemy.dialects.mysql import insert as mysql_insert\n')] |
Tongjilibo/bert4torch | examples/sentence_classfication/task_sentiment_classification_roformer_v2.py | 71d5ffb3698730b16e5a252b06644a136787711e | #! -*- coding:utf-8 -*-
# 情感分类例子,RoPE相对位置编码
# 官方项目:https://github.com/ZhuiyiTechnology/roformer-v2
# pytorch参考项目:https://github.com/JunnYu/RoFormer_pytorch
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset
import torch.nn as nn
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
maxlen = 128
batch_size = 16
config_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filenames):
"""加载数据,并尽量划分为不超过maxlen的句子
"""
D = []
seps, strips = u'\n。!?!?;;,, ', u';;,, '
for filename in filenames:
with open(filename, encoding='utf-8') as f:
for l in f:
text, label = l.strip().split('\t')
for t in text_segmentate(text, maxlen - 2, seps, strips):
D.append((t, int(label)))
return D
def collate_fn(batch):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for text, label in batch:
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device)
return [batch_token_ids, batch_segment_ids], batch_labels.flatten()
# 加载数据集
train_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn)
test_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self) -> None:
super().__init__()
# 指定好model和对应的ckpt地址
self.bert, self.config = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, model='roformer_v2', return_model_config=True)
self.dropout = nn.Dropout(0.1)
self.dense = nn.Linear(self.config['hidden_size'], 2)
def forward(self, token_ids, segment_ids):
last_hidden_state = self.bert([token_ids, segment_ids])
output = self.dropout(last_hidden_state[:, 0, :])
output = self.dense(output)
return output
model = Model().to(device)
# 定义使用的loss和optimizer,这里支持自定义
model.compile(
loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率
metrics=['accuracy']
)
# 定义评价函数
def evaluate(data):
total, right = 0., 0.
for x_true, y_true in data:
y_pred = model.predict(x_true).argmax(axis=1)
total += len(y_true)
right += (y_true == y_pred).sum().item()
return right / total
class Evaluator(Callback):
"""评估与保存
"""
def __init__(self):
self.best_val_acc = 0.
def on_epoch_end(self, global_step, epoch, logs=None):
val_acc = evaluate(valid_dataloader)
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# model.save_weights('best_model.pt')
print(f'val_acc: {val_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n')
if __name__ == '__main__':
evaluator = Evaluator()
model.fit(train_dataloader, epochs=20, steps_per_epoch=500, callbacks=[evaluator])
else:
model.load_weights('best_model.pt')
| [((23, 12, 23, 52), 'bert4torch.tokenizers.Tokenizer', 'Tokenizer', (), '', False, 'from bert4torch.tokenizers import Tokenizer\n'), ((20, 19, 20, 44), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((51, 19, 51, 78), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((49, 35, 49, 68), 'bert4torch.snippets.sequence_padding', 'sequence_padding', ({(49, 52, 49, 67): 'batch_token_ids'}, {}), '(batch_token_ids)', False, 'from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset\n'), ((50, 37, 50, 72), 'bert4torch.snippets.sequence_padding', 'sequence_padding', ({(50, 54, 50, 71): 'batch_segment_ids'}, {}), '(batch_segment_ids)', False, 'from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset\n'), ((64, 33, 64, 161), 'bert4torch.models.build_transformer_model', 'build_transformer_model', (), '', False, 'from bert4torch.models import build_transformer_model, BaseModel\n'), ((65, 23, 65, 38), 'torch.nn.Dropout', 'nn.Dropout', ({(65, 34, 65, 37): '0.1'}, {}), '(0.1)', True, 'import torch.nn as nn\n'), ((66, 21, 66, 61), 'torch.nn.Linear', 'nn.Linear', ({(66, 31, 66, 57): "self.config['hidden_size']", (66, 59, 66, 60): '2'}, {}), "(self.config['hidden_size'], 2)", True, 'import torch.nn as nn\n'), ((77, 9, 77, 30), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((37, 29, 37, 76), 'bert4torch.snippets.text_segmentate', 'text_segmentate', ({(37, 45, 37, 49): 'text', (37, 51, 37, 61): '(maxlen - 2)', (37, 63, 37, 67): 'seps', (37, 69, 37, 75): 'strips'}, {}), '(text, maxlen - 2, seps, strips)', False, 'from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset\n')] |
fdmalone/pyscf | pyscf/nao/test/test_0037_aos.py | 021b17ac721e292b277d2b740e2ff8ab38bb6a4a | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
class KnowValues(unittest.TestCase):
def test_aos_libnao(self):
""" Computing of the atomic orbitals """
from pyscf.nao import system_vars_c
from pyscf.tools.cubegen import Cube
sv = system_vars_c().init_siesta_xml(label='water', cd=os.path.dirname(os.path.abspath(__file__)))
cc = Cube(sv, nx=20, ny=20, nz=20)
aos = sv.comp_aos_den(cc.get_coords())
self.assertEqual(aos.shape[0], cc.nx*cc.ny*cc.nz)
self.assertEqual(aos.shape[1], sv.norbs)
if __name__ == "__main__": unittest.main()
| [((31, 27, 31, 42), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import os, unittest, numpy as np\n'), ((26, 9, 26, 38), 'pyscf.tools.cubegen.Cube', 'Cube', (), '', False, 'from pyscf.tools.cubegen import Cube\n'), ((25, 9, 25, 24), 'pyscf.nao.system_vars_c', 'system_vars_c', ({}, {}), '()', False, 'from pyscf.nao import system_vars_c\n'), ((25, 75, 25, 100), 'os.path.abspath', 'os.path.abspath', ({(25, 91, 25, 99): '__file__'}, {}), '(__file__)', False, 'import os, unittest, numpy as np\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.