content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import cv2
import numpy as np
import json
from PIL import Image, ImageDraw, ImageFont
import math
from paddle import inference
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
# params for prediction engine
parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
parser.add_argument("--use_fp16", type=str2bool, default=False)
parser.add_argument("--gpu_mem", type=int, default=500)
# params for text detector
parser.add_argument("--image_dir", type=str)
parser.add_argument("--det_algorithm", type=str, default='DB')
parser.add_argument("--det_model_dir", type=str)
parser.add_argument("--det_limit_side_len", type=float, default=960)
parser.add_argument("--det_limit_type", type=str, default='max')
# DB parmas
parser.add_argument("--det_db_thresh", type=float, default=0.3)
parser.add_argument("--det_db_box_thresh", type=float, default=0.6)
parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5)
parser.add_argument("--max_batch_size", type=int, default=10)
parser.add_argument("--use_dilation", type=bool, default=False)
parser.add_argument("--det_db_score_mode", type=str, default="fast")
# EAST parmas
parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
parser.add_argument("--det_east_cover_thresh", type=float, default=0.1)
parser.add_argument("--det_east_nms_thresh", type=float, default=0.2)
# SAST parmas
parser.add_argument("--det_sast_score_thresh", type=float, default=0.5)
parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2)
parser.add_argument("--det_sast_polygon", type=bool, default=False)
# params for text recognizer
parser.add_argument("--rec_algorithm", type=str, default='CRNN')
parser.add_argument("--rec_model_dir", type=str)
parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320")
parser.add_argument("--rec_char_type", type=str, default='ch')
parser.add_argument("--rec_batch_num", type=int, default=6)
parser.add_argument("--max_text_length", type=int, default=25)
parser.add_argument(
"--rec_char_dict_path",
type=str,
default="./ppocr/utils/ppocr_keys_v1.txt")
parser.add_argument("--use_space_char", type=str2bool, default=True)
parser.add_argument(
"--vis_font_path", type=str, default="./doc/fonts/simfang.ttf")
parser.add_argument("--drop_score", type=float, default=0.5)
# params for e2e
parser.add_argument("--e2e_algorithm", type=str, default='PGNet')
parser.add_argument("--e2e_model_dir", type=str)
parser.add_argument("--e2e_limit_side_len", type=float, default=768)
parser.add_argument("--e2e_limit_type", type=str, default='max')
# PGNet parmas
parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5)
parser.add_argument(
"--e2e_char_dict_path", type=str, default="./ppocr/utils/ic15_dict.txt")
parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext')
parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True)
parser.add_argument("--e2e_pgnet_mode", type=str, default='fast')
# params for text classifier
parser.add_argument("--use_angle_cls", type=str2bool, default=False)
parser.add_argument("--cls_model_dir", type=str)
parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192")
parser.add_argument("--label_list", type=list, default=['0', '180'])
parser.add_argument("--cls_batch_num", type=int, default=6)
parser.add_argument("--cls_thresh", type=float, default=0.9)
parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
parser.add_argument("--cpu_threads", type=int, default=10)
parser.add_argument("--use_pdserving", type=str2bool, default=False)
parser.add_argument("--use_mp", type=str2bool, default=False)
parser.add_argument("--total_process_num", type=int, default=1)
parser.add_argument("--process_id", type=int, default=0)
return parser.parse_args()
def create_predictor(args, mode, logger):
if mode == "det":
model_dir = args.det_model_dir
elif mode == 'cls':
model_dir = args.cls_model_dir
elif mode == 'rec':
model_dir = args.rec_model_dir
else:
model_dir = args.e2e_model_dir
if model_dir is None:
logger.info("not find {} model file path {}".format(mode, model_dir))
sys.exit(0)
model_file_path = model_dir + "/inference.pdmodel"
params_file_path = model_dir + "/inference.pdiparams"
if not os.path.exists(model_file_path):
logger.info("not find model file path {}".format(model_file_path))
sys.exit(0)
if not os.path.exists(params_file_path):
logger.info("not find params file path {}".format(params_file_path))
sys.exit(0)
config = inference.Config(model_file_path, params_file_path)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=inference.PrecisionType.Half
if args.use_fp16 else inference.PrecisionType.Float32,
max_batch_size=args.max_batch_size)
else:
config.disable_gpu()
cpu_threads = args.cpu_threads if hasattr(args, "cpu_threads") else 10
config.set_cpu_math_library_num_threads(cpu_threads)
if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn()
# enable memory optim
config.enable_memory_optim()
config.disable_glog_info()
config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
config.switch_use_feed_fetch_ops(False)
# create predictor
predictor = inference.create_predictor(config)
input_names = predictor.get_input_names()
for name in input_names:
input_tensor = predictor.get_input_handle(name)
output_names = predictor.get_output_names()
output_tensors = []
for output_name in output_names:
output_tensor = predictor.get_output_handle(output_name)
output_tensors.append(output_tensor)
return predictor, input_tensor, output_tensors
def draw_e2e_res(dt_boxes, strs, img_path):
src_im = cv2.imread(img_path)
for box, str in zip(dt_boxes, strs):
box = box.astype(np.int32).reshape((-1, 1, 2))
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
cv2.putText(
src_im,
str,
org=(int(box[0, 0, 0]), int(box[0, 0, 1])),
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=0.7,
color=(0, 255, 0),
thickness=1)
return src_im
def draw_text_det_res(dt_boxes, img_path):
src_im = cv2.imread(img_path)
for box in dt_boxes:
box = np.array(box).astype(np.int32).reshape(-1, 2)
cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
return src_im
def resize_img(img, input_size=600):
"""
resize img and limit the longest side of the image to input_size
"""
img = np.array(img)
im_shape = img.shape
im_size_max = np.max(im_shape[0:2])
im_scale = float(input_size) / float(im_size_max)
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
return img
def draw_ocr(image,
boxes,
txts=None,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
"""
Visualize the results of OCR detection and recognition
args:
image(Image|array): RGB image
boxes(list): boxes with shape(N, 4, 2)
txts(list): the texts
scores(list): txxs corresponding scores
drop_score(float): only scores greater than drop_threshold will be visualized
font_path: the path of font which is used to draw text
return(array):
the visualized img
"""
if scores is None:
scores = [1] * len(boxes)
box_num = len(boxes)
for i in range(box_num):
if scores is not None and (scores[i] < drop_score or
math.isnan(scores[i])):
continue
box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
if txts is not None:
img = np.array(resize_img(image, input_size=600))
txt_img = text_visual(
txts,
scores,
img_h=img.shape[0],
img_w=600,
threshold=drop_score,
font_path=font_path)
img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)
return img
return image
def draw_ocr_box_txt(image,
boxes,
txts,
scores=None,
drop_score=0.5,
font_path="./doc/simfang.ttf"):
h, w = image.height, image.width
img_left = image.copy()
img_right = Image.new('RGB', (w, h), (255, 255, 255))
import random
random.seed(0)
draw_left = ImageDraw.Draw(img_left)
draw_right = ImageDraw.Draw(img_right)
for idx, (box, txt) in enumerate(zip(boxes, txts)):
if scores is not None and scores[idx] < drop_score:
continue
color = (random.randint(0, 255), random.randint(0, 255),
random.randint(0, 255))
draw_left.polygon(box, fill=color)
draw_right.polygon(
[
box[0][0], box[0][1], box[1][0], box[1][1], box[2][0],
box[2][1], box[3][0], box[3][1]
],
outline=color)
box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][
1])**2)
box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][
1])**2)
if box_height > 2 * box_width:
font_size = max(int(box_width * 0.9), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
cur_y = box[0][1]
for c in txt:
char_size = font.getsize(c)
draw_right.text(
(box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)
cur_y += char_size[1]
else:
font_size = max(int(box_height * 0.8), 10)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
draw_right.text(
[box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)
img_left = Image.blend(image, img_left, 0.5)
img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))
img_show.paste(img_left, (0, 0, w, h))
img_show.paste(img_right, (w, 0, w * 2, h))
return np.array(img_show)
def str_count(s):
"""
Count the number of Chinese characters,
a single English character and a single number
equal to half the length of Chinese characters.
args:
s(string): the input of string
return(int):
the number of Chinese characters
"""
import string
count_zh = count_pu = 0
s_len = len(s)
en_dg_count = 0
for c in s:
if c in string.ascii_letters or c.isdigit() or c.isspace():
en_dg_count += 1
elif c.isalpha():
count_zh += 1
else:
count_pu += 1
return s_len - math.ceil(en_dg_count / 2)
def text_visual(texts,
scores,
img_h=400,
img_w=600,
threshold=0.,
font_path="./doc/simfang.ttf"):
"""
create new blank img and draw txt on it
args:
texts(list): the text will be draw
scores(list|None): corresponding score of each txt
img_h(int): the height of blank img
img_w(int): the width of blank img
font_path: the path of font which is used to draw text
return(array):
"""
if scores is not None:
assert len(texts) == len(
scores), "The number of txts and corresponding scores must match"
def create_blank_img():
blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255
blank_img[:, img_w - 1:] = 0
blank_img = Image.fromarray(blank_img).convert("RGB")
draw_txt = ImageDraw.Draw(blank_img)
return blank_img, draw_txt
blank_img, draw_txt = create_blank_img()
font_size = 20
txt_color = (0, 0, 0)
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
gap = font_size + 5
txt_img_list = []
count, index = 1, 0
for idx, txt in enumerate(texts):
index += 1
if scores[idx] < threshold or math.isnan(scores[idx]):
index -= 1
continue
first_line = True
while str_count(txt) >= img_w // font_size - 4:
tmp = txt
txt = tmp[:img_w // font_size - 4]
if first_line:
new_txt = str(index) + ': ' + txt
first_line = False
else:
new_txt = ' ' + txt
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
txt = tmp[img_w // font_size - 4:]
if count >= img_h // gap - 1:
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
if first_line:
new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])
else:
new_txt = " " + txt + " " + '%.3f' % (scores[idx])
draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
# whether add new blank img or not
if count >= img_h // gap - 1 and idx + 1 < len(texts):
txt_img_list.append(np.array(blank_img))
blank_img, draw_txt = create_blank_img()
count = 0
count += 1
txt_img_list.append(np.array(blank_img))
if len(txt_img_list) == 1:
blank_img = np.array(txt_img_list[0])
else:
blank_img = np.concatenate(txt_img_list, axis=1)
return np.array(blank_img)
def base64_to_cv2(b64str):
import base64
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def draw_boxes(image, boxes, scores=None, drop_score=0.5):
if scores is None:
scores = [1] * len(boxes)
for (box, score) in zip(boxes, scores):
if score < drop_score:
continue
box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
return image
if __name__ == '__main__':
test_img = "./doc/test_v2"
predict_txt = "./doc/predict.txt"
f = open(predict_txt, 'r')
data = f.readlines()
img_path, anno = data[0].strip().split('\t')
img_name = os.path.basename(img_path)
img_path = os.path.join(test_img, img_name)
image = Image.open(img_path)
data = json.loads(anno)
boxes, txts, scores = [], [], []
for dic in data:
boxes.append(dic['points'])
txts.append(dic['transcription'])
scores.append(round(dic['scores'], 3))
new_img = draw_ocr(image, boxes, txts, scores)
cv2.imwrite(img_name, new_img)
| 37.004556 | 85 | 0.620499 | [
"Apache-2.0"
] | OcrOrg/PaddleOCR | tools/infer/utility.py | 16,245 | Python |
from django.db import models
import string, random, datetime
from profiles.models import UserProfile, Location, Surcharges, User
from decimal import *
from menu.models import Product, Entree, Pizza, PizzaTopping, Side
from localflavor.us.models import PhoneNumberField, USStateField, USZipCodeField
#modify this to check against prior conf orders.
def make_conf(length=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(length))
class Order(models.Model):
customer = models.ForeignKey(User, blank=True, null=True)
created_date = models.DateTimeField(auto_now=False, auto_now_add=True)
stamped = models.BooleanField(default=False)
stamped_time = models.DateTimeField(auto_now=True, auto_now_add=False, blank=True, null=True)
complete = models.BooleanField(default=False)
delivery = models.BooleanField(default=False)
delivery_available = models.BooleanField(default=False)
location = models.ForeignKey(Location, blank=True, null=True)
total = models.DecimalField(max_digits=20, decimal_places=2, default=0)
subtotal = models.DecimalField(max_digits=20, decimal_places=2, default=0)
taxes = models.DecimalField(max_digits=20, decimal_places=2, default=0)
first_name = models.CharField(max_length=120, blank=True, null=True)
last_name = models.CharField(max_length=120, blank=True, null=True)
street_address = models.CharField(max_length=120, blank=True, null=True)
city = models.CharField(max_length=120, blank=True, null=True)
state = USStateField(blank=True, null=True)
zipcode = USZipCodeField(blank=True, null=True)
phone = PhoneNumberField(blank=True, null=True)
email = models.EmailField(max_length=120, blank=True, null=True)
note = models.TextField(max_length=1000,blank=True, null=True)
conf_number = models.CharField(max_length=20, blank=True, null=True)
#delivery charge needs to be separate from lines
def get_delivery_charge(self):
return Location.objects.get(id=str(self.location)).get_delivery_charge()
def compute_subtotal(self):
lineitems = list(OrderLineItem.objects.filter(order=self.id))
delivery_charge = Location.objects.get(id=str(self.location)).get_delivery_charge()
lines = []
for lineitem in lineitems:
lines.append(lineitem.line_price)
if self.delivery == True:
pre_sub = sum(lines)
subtotal = sum(lines) + delivery_charge
return subtotal
else:
return sum(lines)
def compute_taxes(self):
subtotal = self.compute_subtotal()
loc = Surcharges.objects.get(location=self.location).location
tax_rate = Decimal(str(loc.get_tax_rate()))
return Decimal(round(subtotal * tax_rate, 2)).quantize(Decimal('.01'), rounding=ROUND_HALF_UP)
def compute_total(self):
return Decimal(round(self.compute_subtotal() + self.compute_taxes(), 2)).quantize(Decimal('.01'), rounding=ROUND_HALF_UP)
class Meta:
ordering = ['-stamped_time']
def __unicode__(self):
return str(str(self.created_date) + ' ' + str(self.id)) + str(self.customer)
(PIZZA, 'PIZZA'),
(SIDE, 'SIDE'),
(SOUP,'SOUP'),
(SALAD,'SALAD'),
(BREADSTICKS,'BREADSTICKS'),
(PASTA,'PASTA'),
(WINGS,'WINGS'),
(SANDWICH,'SANDWICH'),
(BEVERAGE,'BEVERAGE'),
class OrderLineItem(models.Model):
order = models.ForeignKey(Order)
product = models.ForeignKey('menu.Product')
size = models.CharField(max_length=7, blank=True, null=True)
PIZZA = 'PIZZA'
SIDE = 'SIDE'
SOUP = 'SOUP'
SALAD = 'SALAD'
BREADSTICKS = 'BREADSTICKS'
PASTA = 'PASTA'
WINGS = 'WINGS'
SANDWICH = 'SANDWICH'
BEVERAGE = 'BEVERAGE'
ITEM_TYPES = (
(PIZZA, 'PIZZA'),
(SIDE,'SIDE'),
(SOUP,'SOUP'),
(SALAD,'SALAD'),
(BREADSTICKS,'BREADSTICKS'),
(PASTA, 'PASTA'),
(WINGS, 'WINGS'),
(SANDWICH,'SANDWICH'),
(BEVERAGE, 'BEVERAGE'),
)
product_type = models.CharField(max_length=50, choices=ITEM_TYPES, default=PIZZA)
qty = models.PositiveIntegerField(default=1)
line_price = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
toppings = models.ManyToManyField(PizzaTopping, blank=True, null=True, related_name='topping')
def get_price(self):
if self.product_type == 'PIZZA':
pizza_price = Pizza.objects.get(product_id=self.product, size=self.size).get_price()
pricing = []
pricing.append(pizza_price)
for topping in self.toppings.all():
pricing.append(topping.price)
return sum(pricing)
elif self.product_type == 'ENTREE':
return Entree.objects.get(product_id=self.product, size=self.size).get_price()
elif self.product_type == 'SIDE':
return Side.objects.get(product_id=self.product, size=self.size).price
def __unicode__(self):
return str(self.product)
| 38.437037 | 129 | 0.664097 | [
"MIT"
] | hellojerry/pizzatime | src/orders/models.py | 5,189 | Python |
import json
import os
from golem.core import utils
from golem.test_runner import test_runner
from golem.report.execution_report import create_execution_directory
from golem.report.execution_report import create_execution_dir_single_test
from golem.report import test_report
from golem.report.test_report import get_test_case_data
from golem.report.test_report import get_test_debug_log
from golem.report.test_report import create_report_directory
from golem.report.test_report import generate_report
class TestGetTestCaseData:
def test_get_test_case_data(self, project_class, test_utils):
_, project = project_class.activate()
exc = test_utils.execute_random_suite(project)
test_name = exc['exec_data']['tests'][0]['name']
test_set = exc['exec_data']['tests'][0]['test_set']
test_data = get_test_case_data(project, test_name, exc['suite_name'],
exc['timestamp'], test_set)
assert test_data['name'] == exc['tests'][0]
assert isinstance(test_data['debug_log'], list) and len(test_data['debug_log'])
assert isinstance(test_data['info_log'], list) and len(test_data['info_log'])
assert test_data['has_finished'] is True
class TestTestReportDirectory:
def test_test_report_directory(self, project_session):
testdir, project = project_session.activate()
suite = 'suite1'
timestamp = '1.2.3.4'
test = 'test1'
test_set = 'test_set1'
path = test_report.test_report_directory(project, suite, timestamp, test, test_set)
expected = os.path.join(testdir, 'projects', project, 'reports', suite, timestamp,
test, test_set)
assert path == expected
class TestTestReportDirectorySingleTest:
def test_test_report_directory_single_test(self, project_session):
testdir, project = project_session.activate()
timestamp = '1.2.3.4'
test = 'test1'
test_set = 'test_set1'
path = test_report.test_report_directory_single_test(project, test, timestamp, test_set)
expected = os.path.join(testdir, 'projects', project, 'reports', 'single_tests',
test, timestamp, test_set)
assert path == expected
class TestGetTestLog:
def test_get_test_x_log(self, project_class, test_utils):
_, project = project_class.activate()
exc = test_utils.execute_random_suite(project)
test_name = exc['exec_data']['tests'][0]['name']
test_set = exc['exec_data']['tests'][0]['test_set']
log = get_test_debug_log(project, exc['timestamp'], test_name, test_set,
suite=exc['suite_name'])
assert 'root DEBUG test does not have setup function' in log
# inexistent test set
log = get_test_debug_log(project, exc['timestamp'], test_name,
'inexistent_test_set', suite=exc['suite_name'])
assert log is None
# inexistent test
log = get_test_debug_log(project, exc['timestamp'], 'inexistent_test_name',
test_set, suite=exc['suite_name'])
assert log is None
class TestCreateReportDirectory:
def test_create_report_directory_test(self, project_session):
testdir, project = project_session.activate()
timestamp = utils.get_timestamp()
test_name = 'testing_report_001'
exec_dir = create_execution_dir_single_test(project, test_name, timestamp)
directory = create_report_directory(exec_dir, test_name, is_suite=False)
assert os.path.isdir(directory)
def test_create_report_directory_suite(self, project_session):
testdir, project = project_session.activate()
timestamp = utils.get_timestamp()
suite_name = 'suite_foo_002'
test_name = 'testing_report_002'
exec_dir = create_execution_directory(project, suite_name, timestamp)
directory = create_report_directory(exec_dir, test_name, is_suite=True)
assert os.path.isdir(directory)
class TestGenerateReport:
def test_generate_report_with_env(self, project_session):
_, project = project_session.activate()
timestamp = utils.get_timestamp()
test_name = 'testing_report_003'
suite_name = 'suite_foo_003'
exec_dir = create_execution_directory(project, suite_name, timestamp)
report_dir = create_report_directory(exec_dir, test_name, is_suite=True)
test_data = {
'env': {
'name': 'env01',
'url': '1.1.1.1'
},
'var2': 'value2'
}
test_data = test_runner.Data(test_data)
result = {
'result': 'success',
'errors': [],
'description': 'description of the test',
'steps': [
{'message': 'step1', 'screenshot': None, 'error': None},
{'message': 'step2', 'screenshot': None, 'error': None}
],
'test_elapsed_time': 22.22,
'test_timestamp': '2018.02.04.02.16.42.729',
'browser': 'chrome',
'browser_full_name': '',
'set_name': 'set_001',
}
generate_report(report_dir, test_name, test_data, result)
path = os.path.join(report_dir, 'report.json')
with open(path) as report_file:
actual = json.load(report_file)
assert len(actual.items()) == 11
assert actual['test_case'] == test_name
assert actual['result'] == 'success'
assert actual['steps'][0]['message'] == 'step1'
assert actual['steps'][1]['message'] == 'step2'
assert actual['description'] == 'description of the test'
assert actual['errors'] == []
assert actual['test_elapsed_time'] == 22.22
assert actual['test_timestamp'] == '2018.02.04.02.16.42.729'
assert actual['browser'] == 'chrome'
assert actual['environment'] == 'env01'
assert actual['set_name'] == 'set_001'
test_data_a = "{'url': '1.1.1.1', 'name': 'env01'}"
test_data_b = "{'name': 'env01', 'url': '1.1.1.1'}"
assert actual['test_data']['env'] in [test_data_a, test_data_b]
assert actual['test_data']['var2'] == "'value2'"
| 41.2 | 96 | 0.628406 | [
"MIT"
] | HybridAU/golem | tests/report/test_report_test.py | 6,386 | Python |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
class Stage5(torch.nn.Module):
def __init__(self):
super(Stage5, self).__init__()
self.layer1 = torch.nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self._initialize_weights()
def forward(self, input0):
out0 = input0.clone()
out1 = self.layer1(out0)
return out1
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
| 34.966667 | 97 | 0.585319 | [
"MIT"
] | MonicaGu/pipedream | runtime/image_classification/models/vgg16/gpus=16_straight/stage5.py | 1,049 | Python |
import twl
wolf = twl.Wolf()
def split(wolf, end=7):
return [wolf.len(n)() for n in range(2, end+1)]
def spell(ltrs, wild=0):
return split(wolf.wild(ltrs, wild), len(ltrs)+wild)
def _munge(func, fix, ltrs, wild=0):
return split(func(fix).wild(fix+ltrs, wild), len(fix+ltrs)+wild)
def starts(fix, ltrs, wild=0):
return _munge(wolf.starts, fix, ltrs, wild)
def ends(fix, ltrs, wild=0):
return _munge(wolf.ends, fix, ltrs, wild)
def contains(fix, ltrs, wild=0):
return _munge(wolf.contains, fix, ltrs, wild)
if __name__ == "__main__":
# print(wolf.len(2).words)
# print(wolf.wild('aa')())
print(contains('a', 'ciodtji'))
| 19.676471 | 68 | 0.638266 | [
"MIT"
] | esoterik0/scrabble-comp | use.py | 669 | Python |
from functools import lru_cache
from injector import inject
from .config_parameter_base import ConfigParameterBase
from ...data.repository import RepositoryProvider
from ...dependency import IScoped
from ...exceptions import RequiredClassException
class ConfigService(IScoped):
@inject
def __init__(self,
repository_provider: RepositoryProvider
):
self.repository_provider = repository_provider
config_subclasses = ConfigParameterBase.__subclasses__()
if config_subclasses is None or len(config_subclasses) == 0:
raise RequiredClassException(f'Requires {ConfigParameterBase.__name__} derived class')
config_class = config_subclasses[0]
self.config_reposiotry = repository_provider.get(config_class)
@lru_cache()
def get_config_by_name(self, name):
parameter = self.config_reposiotry.first(Name=name)
if parameter is not None:
return parameter.Value
else:
return None
| 34.066667 | 98 | 0.715264 | [
"MIT"
] | PythonDataIntegrator/pdip | pdip/configuration/services/config_service.py | 1,022 | Python |
"""Notification."""
from models.metric_notification_data import MetricNotificationData
class Notification:
"""Handle notification contents and status."""
def __init__(self, report, metrics, destination_uuid, destination):
"""Initialise the Notification with the required info."""
self.report_title = report["title"]
self.url = report.get("url")
self.metrics: list[MetricNotificationData] = metrics
self.destination_uuid = destination_uuid
self.destination = destination
def __eq__(self, other):
"""Check if the notification itself is the same, regardless of its metric content."""
return (
self.report_title == other.report_title
and self.destination_uuid == other.destination_uuid
and self.destination == other.destination
)
def merge_notification(self, new_metrics):
"""Merge new metrics into this notification."""
self.metrics.extend(new_metrics)
| 35.571429 | 93 | 0.678715 | [
"Apache-2.0"
] | m-zakeri/quality-time | components/notifier/src/models/notification.py | 996 | Python |
import os
import imageio
import numpy as np
from PIL import Image
from torch.autograd import Variable
from torchvision.utils import save_image
def create_gif(image_path):
frames = []
gif_name = os.path.join("images", 'mnist1.gif')
image_list = os.listdir(image_path)
sorted(image_list)
for image_name in image_list:
frames.append(imageio.imread(os.path.join(image_path, image_name)))
imageio.mimsave(gif_name, frames, 'GIF', duration=0.1)
def resize_img(path):
names = os.listdir(path)
for name in names:
img_path = os.path.join(path, name)
img = Image.open(img_path)
img = img.resize((172, 172))
img.save(img_path)
def sample_image(opt, n_row, batches_done, generator, FloatTensor, LongTensor):
z = Variable(FloatTensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))
labels = np.array([num for _ in range(n_row) for num in range(n_row)])
labels = Variable(LongTensor(labels))
gen_imgs = generator(z, labels)
save_image(gen_imgs.data, "images/%d.png" % batches_done, nrow=n_row, normalize=True)
if __name__ == "__main__":
image_path = "images/example1"
resize_img(image_path)
create_gif(image_path)
| 26.106383 | 90 | 0.691932 | [
"Apache-2.0"
] | GodWriter/GAN-Pytorch | cgan/utils.py | 1,227 | Python |
import RPi.GPIO as GPIO
import hx711
import matplotlib.pyplot as plt
# Read initial calibration and tare weight data then display the plot.
def main():
GPIO.setmode(GPIO.BCM)
hx = hx711.HX711(dout_pin=5, pd_sck_pin=6)
zero_the_scale(hx)
calibrate_scale(hx)
(tare_weight, total_weight) = get_tare_and_full_weight(hx)
plot_reading(hx, tare_weight, total_weight - tare_weight)
# Set scale position to zero. The scale should be empty when this is run.
def zero_the_scale(hx):
err = hx.zero()
if err:
raise ValueError('Tare is unsuccessful.')
zero_reading = hx.get_raw_data_mean()
if zero_reading:
print('Data subtracted by offset: ', zero_reading)
else:
raise ValueError('Invalide zero reading')
# Calibrate the scale with prompts to the user.
def calibrate_scale (hx):
input('Put known weight on the scale and then press Enter')
reading = hx.get_data_mean()
if reading:
print('Mean value from HX711 subtracted by offset:', reading)
user_input = input('Write how many grams it was and press Enter: ')
try:
weight = float(user_input)
print(weight, 'grams')
except ValueError:
print('Expected integer or float and I have got:', user_input)
ratio = reading / weight
hx.set_scale_ratio(ratio)
print('Ratio is set.')
else:
raise ValueError('Cannot calculate mean value.')
# Prompt user and get readings for the tare weight and full pie.
def get_tare_and_full_weight (hx):
input('Put the pie tin on the scale for tare weight and press enter.')
tare_weight = hx.get_weight_mean(20)
print ("Tare weight is ", tare_weight, "g")
input('Put the pie on the scale for a full weight and press enter.')
total_weight = hx.get_weight_mean(20)
print ("Full weight is ", total_weight, "g")
return (tare_weight, total_weight)
# Continually read data from the sensor, update the pie chart, and display.
def plot_reading (hx, tare_weight, full_weight):
while True:
current_weight = hx.get_weight_mean(20)
remaining_weight = max(0,current_weight - tare_weight)
#print ("Current weight is ", current_weight, "g")
labels = ['Remaining', 'Eaten']
sizes = [remaining_weight, max(0,full_weight - remaining_weight)]
colors = ['sandybrown', 'lightgrey']
explode = (0, 0.1)
title_font = { 'color': 'blue', 'weight': 'bold', 'size': 30 }
label_font = { 'color': 'black', 'weight': 'normal', 'size': 20 }
h = plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=180,
textprops=label_font)
plt.title("Pi Day Pie Pie Chart", title_font)
plt.plot()
plt.draw()
plt.pause(1)
plt.clf()
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
print('Happy Pi Day!')
finally:
GPIO.cleanup()
| 33.108696 | 75 | 0.645765 | [
"MIT"
] | mmachenry/pie-pie-chart | pie_pie_chart.py | 3,046 | Python |
import math
from enforce_typing import enforce_types
from engine import SimStrategyBase
from util.constants import S_PER_HOUR
@enforce_types
class SimStrategy(SimStrategyBase.SimStrategyBase):
def __init__(self, no_researchers=2):
#===initialize self.time_step, max_ticks====
super().__init__()
#===set base-class values we want for this netlist====
self.setTimeStep(S_PER_HOUR)
self.setMaxTime(30, 'years') #typical runs: 10 years, 20 years, 150 years
#===new attributes specific to this netlist===
self.TICKS_BETWEEN_PROPOSALS = 6480
self.PRICE_OF_ASSETS = 1000 # OCEAN
self.RATIO_FUNDS_TO_PUBLISH = 0.4 # 40% of grant funding will go towards "doing work" & publishing
self.TRANSACTION_FEES = 0.1
self.FEES_TO_STAKERS = 0.2
self.NUMBER_OF_RESEARCHERS = no_researchers
self.FUNDING_BOUNDARY = 10000
'''
Some additional parameters that will enable more experimentation (not currently in use)
'''
self.FUNDING_TIME_DEPENDENCE = True # meaning that TICKS_BETWEEN_PROPOSALS should be used
self.PROPOSALS_FUNDED_AT_A_TIME = 1 # this would be used if FUNDING_TIME_DEPENDENCE = False, <=> funding as projects finish
self.PROPOSAL_SETUP = {'grant_requested': 1000, # can be used as a parameter in ResearcherAgent in SimState
'assets_generated': 1,
'no_researchers': 10}
self.TREASURY = 'dao_treasury'
# DT parameters
self.DT_init = 100.0
# DATA TOKEN COMPATIBILITY WIP
# # pool
# self.DT_stake = 20.0
# self.pool_weight_DT = 3.0
# self.pool_weight_OCEAN = 7.0
# assert (self.pool_weight_DT + self.pool_weight_OCEAN) == 10.0 | 42.209302 | 131 | 0.656749 | [
"Apache-2.0"
] | opscientia/darcspice | assets/netlists/opsci_profit_sharing/SimStrategy.py | 1,815 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetExpressRouteGatewayResult',
'AwaitableGetExpressRouteGatewayResult',
'get_express_route_gateway',
]
@pulumi.output_type
class GetExpressRouteGatewayResult:
"""
ExpressRoute gateway resource.
"""
def __init__(__self__, auto_scale_configuration=None, etag=None, express_route_connections=None, id=None, location=None, name=None, provisioning_state=None, tags=None, type=None, virtual_hub=None):
if auto_scale_configuration and not isinstance(auto_scale_configuration, dict):
raise TypeError("Expected argument 'auto_scale_configuration' to be a dict")
pulumi.set(__self__, "auto_scale_configuration", auto_scale_configuration)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if express_route_connections and not isinstance(express_route_connections, list):
raise TypeError("Expected argument 'express_route_connections' to be a list")
pulumi.set(__self__, "express_route_connections", express_route_connections)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="autoScaleConfiguration")
def auto_scale_configuration(self) -> Optional['outputs.ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration']:
"""
Configuration for auto scaling.
"""
return pulumi.get(self, "auto_scale_configuration")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteConnections")
def express_route_connections(self) -> Sequence['outputs.ExpressRouteConnectionResponse']:
"""
List of ExpressRoute connections to the ExpressRoute gateway.
"""
return pulumi.get(self, "express_route_connections")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the express route gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> 'outputs.VirtualHubIdResponse':
"""
The Virtual Hub where the ExpressRoute gateway is or will be deployed.
"""
return pulumi.get(self, "virtual_hub")
class AwaitableGetExpressRouteGatewayResult(GetExpressRouteGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetExpressRouteGatewayResult(
auto_scale_configuration=self.auto_scale_configuration,
etag=self.etag,
express_route_connections=self.express_route_connections,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_hub=self.virtual_hub)
def get_express_route_gateway(express_route_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteGatewayResult:
"""
ExpressRoute gateway resource.
API Version: 2020-08-01.
:param str express_route_gateway_name: The name of the ExpressRoute gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expressRouteGatewayName'] = express_route_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network:getExpressRouteGateway', __args__, opts=opts, typ=GetExpressRouteGatewayResult).value
return AwaitableGetExpressRouteGatewayResult(
auto_scale_configuration=__ret__.auto_scale_configuration,
etag=__ret__.etag,
express_route_connections=__ret__.express_route_connections,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_hub=__ret__.virtual_hub)
| 36.967568 | 201 | 0.663109 | [
"Apache-2.0"
] | pulumi/pulumi-azure-nextgen | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | 6,839 | Python |
"""
Support for Xiaomi Gateways.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/xiaomi_aqara/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_XIAOMI_GW
from homeassistant.const import (
ATTR_BATTERY_LEVEL, CONF_HOST, CONF_MAC, CONF_PORT,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.util import slugify
REQUIREMENTS = ['PyXiaomiGateway==0.10.0']
_LOGGER = logging.getLogger(__name__)
ATTR_GW_MAC = 'gw_mac'
ATTR_RINGTONE_ID = 'ringtone_id'
ATTR_RINGTONE_VOL = 'ringtone_vol'
ATTR_DEVICE_ID = 'device_id'
CONF_DISCOVERY_RETRY = 'discovery_retry'
CONF_GATEWAYS = 'gateways'
CONF_INTERFACE = 'interface'
CONF_KEY = 'key'
CONF_DISABLE = 'disable'
DOMAIN = 'xiaomi_aqara'
PY_XIAOMI_GATEWAY = "xiaomi_gw"
TIME_TILL_UNAVAILABLE = timedelta(minutes=150)
SERVICE_PLAY_RINGTONE = 'play_ringtone'
SERVICE_STOP_RINGTONE = 'stop_ringtone'
SERVICE_ADD_DEVICE = 'add_device'
SERVICE_REMOVE_DEVICE = 'remove_device'
GW_MAC = vol.All(
cv.string,
lambda value: value.replace(':', '').lower(),
vol.Length(min=12, max=12)
)
SERVICE_SCHEMA_PLAY_RINGTONE = vol.Schema({
vol.Required(ATTR_RINGTONE_ID):
vol.All(vol.Coerce(int), vol.NotIn([9, 14, 15, 16, 17, 18, 19])),
vol.Optional(ATTR_RINGTONE_VOL):
vol.All(vol.Coerce(int), vol.Clamp(min=0, max=100))
})
SERVICE_SCHEMA_REMOVE_DEVICE = vol.Schema({
vol.Required(ATTR_DEVICE_ID):
vol.All(cv.string, vol.Length(min=14, max=14))
})
GATEWAY_CONFIG = vol.Schema({
vol.Optional(CONF_MAC, default=None): vol.Any(GW_MAC, None),
vol.Optional(CONF_KEY):
vol.All(cv.string, vol.Length(min=16, max=16)),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=9898): cv.port,
vol.Optional(CONF_DISABLE, default=False): cv.boolean,
})
def _fix_conf_defaults(config):
"""Update some configuration defaults."""
config['sid'] = config.pop(CONF_MAC, None)
if config.get(CONF_KEY) is None:
_LOGGER.warning(
'Key is not provided for gateway %s. Controlling the gateway '
'will not be possible', config['sid'])
if config.get(CONF_HOST) is None:
config.pop(CONF_PORT)
return config
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_GATEWAYS, default={}):
vol.All(cv.ensure_list, [GATEWAY_CONFIG], [_fix_conf_defaults]),
vol.Optional(CONF_INTERFACE, default='any'): cv.string,
vol.Optional(CONF_DISCOVERY_RETRY, default=3): cv.positive_int
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Xiaomi component."""
gateways = []
interface = 'any'
discovery_retry = 3
if DOMAIN in config:
gateways = config[DOMAIN][CONF_GATEWAYS]
interface = config[DOMAIN][CONF_INTERFACE]
discovery_retry = config[DOMAIN][CONF_DISCOVERY_RETRY]
async def xiaomi_gw_discovered(service, discovery_info):
"""Perform action when Xiaomi Gateway device(s) has been found."""
# We don't need to do anything here, the purpose of Home Assistant's
# discovery service is to just trigger loading of this
# component, and then its own discovery process kicks in.
discovery.listen(hass, SERVICE_XIAOMI_GW, xiaomi_gw_discovered)
from xiaomi_gateway import XiaomiGatewayDiscovery
xiaomi = hass.data[PY_XIAOMI_GATEWAY] = XiaomiGatewayDiscovery(
hass.add_job, gateways, interface)
_LOGGER.debug("Expecting %s gateways", len(gateways))
for k in range(discovery_retry):
_LOGGER.info("Discovering Xiaomi Gateways (Try %s)", k + 1)
xiaomi.discover_gateways()
if len(xiaomi.gateways) >= len(gateways):
break
if not xiaomi.gateways:
_LOGGER.error("No gateway discovered")
return False
xiaomi.listen()
_LOGGER.debug("Gateways discovered. Listening for broadcasts")
for component in ['binary_sensor', 'sensor', 'switch', 'light', 'cover',
'lock']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
def stop_xiaomi(event):
"""Stop Xiaomi Socket."""
_LOGGER.info("Shutting down Xiaomi Hub")
xiaomi.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
def play_ringtone_service(call):
"""Service to play ringtone through Gateway."""
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {'mid': ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if ring_vol is not None:
kwargs['vol'] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call):
"""Service to stop playing ringtone on Gateway."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call):
"""Service to add a new sub-device within the next 30 seconds."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission='yes')
hass.components.persistent_notification.async_create(
'Join permission enabled for 30 seconds! '
'Please press the pairing button of the new device once.',
title='Xiaomi Aqara Gateway')
def remove_device_service(call):
"""Service to remove a sub-device from the gateway."""
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(xiaomi, vol.Schema({}))
hass.services.register(
DOMAIN, SERVICE_PLAY_RINGTONE, play_ringtone_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_PLAY_RINGTONE))
hass.services.register(
DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service,
schema=gateway_only_schema)
hass.services.register(
DOMAIN, SERVICE_ADD_DEVICE, add_device_service,
schema=gateway_only_schema)
hass.services.register(
DOMAIN, SERVICE_REMOVE_DEVICE, remove_device_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_REMOVE_DEVICE))
return True
class XiaomiDevice(Entity):
"""Representation a base Xiaomi device."""
def __init__(self, device, device_type, xiaomi_hub):
"""Initialize the Xiaomi device."""
self._state = None
self._is_available = True
self._sid = device['sid']
self._name = '{}_{}'.format(device_type, self._sid)
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._device_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device['data'], device['raw_data'])
self.parse_voltage(device['data'])
if hasattr(self, '_data_key') \
and self._data_key: # pylint: disable=no-member
self._unique_id = slugify("{}-{}".format(
self._data_key, # pylint: disable=no-member
self._sid))
else:
self._unique_id = slugify("{}-{}".format(self._type, self._sid))
def _add_push_data_job(self, *args):
self.hass.add_job(self.push_data, *args)
async def async_added_to_hass(self):
"""Start unavailability tracking."""
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@callback
def _async_set_unavailable(self, now):
"""Set state to UNAVAILABLE."""
self._remove_unavailability_tracker = None
self._is_available = False
self.async_schedule_update_ha_state()
@callback
def _async_track_unavailable(self):
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass, self._async_set_unavailable,
utcnow() + TIME_TILL_UNAVAILABLE)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def push_data(self, data, raw_data):
"""Push from Hub."""
_LOGGER.debug("PUSH >> %s: %s", self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if is_data or is_voltage or was_unavailable:
self.async_schedule_update_ha_state()
def parse_voltage(self, data):
"""Parse battery level data sent by gateway."""
if 'voltage' not in data:
return False
max_volt = 3300
min_volt = 2800
voltage = data['voltage']
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = ((voltage - min_volt) / (max_volt - min_volt)) * 100
self._device_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
raise NotImplementedError()
def _add_gateway_to_schema(xiaomi, schema):
"""Extend a voluptuous schema with a gateway validator."""
def gateway(sid):
"""Convert sid to a gateway."""
sid = str(sid).replace(':', '').lower()
for gateway in xiaomi.gateways.values():
if gateway.sid == sid:
return gateway
raise vol.Invalid('Unknown gateway sid {}'.format(sid))
gateways = list(xiaomi.gateways.values())
kwargs = {}
# If the user has only 1 gateway, make it the default for services.
if len(gateways) == 1:
kwargs['default'] = gateways[0]
return schema.extend({
vol.Required(ATTR_GW_MAC, **kwargs): gateway
})
| 33.006006 | 77 | 0.672186 | [
"Apache-2.0"
] | phispi/home-assistant | homeassistant/components/xiaomi_aqara.py | 10,991 | Python |
from abc import ABC, abstractmethod
import numpy as np
from .constants import EPSILON
import torch
class Loss(ABC):
def __init__(self, expected_output, predict_output):
self._expected_output = expected_output
self._predict_output = predict_output
@abstractmethod
def get_loss(self):
pass
def crossEntropy(expected_output, predict_output):
return -(expected_output * torch.log(predict_output) +
(1-expected_output) * torch.log(1-predict_output+EPSILON)).mean()
def l2(expected_output, predict_output):
return ((predict_output - expected_output) ** 2).mean()
| 25.875 | 78 | 0.724638 | [
"MIT"
] | exitudio/neural-network-pytorch | exit/losses.py | 621 | Python |
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python file with invalid syntax, used by scripts/linters/
python_linter_test.py. This file doesnot import from __future__.
"""
class FakeClass:
"""This is a fake docstring for valid syntax purposes."""
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, name):
"""This doesn't do anything.
Args:
name: str. Means nothing.
Yields:
tuple(str, str). The argument passed in but twice in a tuple.
"""
yield (name, name)
| 30.421053 | 74 | 0.693772 | [
"Apache-2.0"
] | Aarjav-Jain/oppia | scripts/linters/test_files/invalid_python_three.py | 1,156 | Python |
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as ip
from scipy.ndimage import gaussian_filter1d
from utils.helpers import find_index, peakdet, replace_nan
from params import fall_params
def calc_fall_flush_timings_durations(flow_matrix, summer_timings):
max_zero_allowed_per_year = fall_params['max_zero_allowed_per_year']
max_nan_allowed_per_year = fall_params['max_nan_allowed_per_year']
min_flow_rate = fall_params['min_flow_rate']
sigma = fall_params['sigma'] # Smaller filter to find fall flush peak
wet_sigma = fall_params['wet_sigma'] # Larger filter to find wet season peak
peak_sensitivity = fall_params['peak_sensitivity'] # smaller is more peak
max_flush_duration = fall_params['max_flush_duration'] # Maximum duration from start to end, for fall flush peak
wet_threshold_perc = fall_params['wet_threshold_perc'] # Return to wet season flow must be certain percentage of that year's max flow
flush_threshold_perc = fall_params['flush_threshold_perc'] # Size of flush peak, from rising limb to top of peak, has great enough change
min_flush_threshold = fall_params['min_flush_threshold']
date_cutoff = fall_params['date_cutoff'] # Latest accepted date for fall flush, in Julian Date counting from Oct 1st = 0. (i.e. Dec 15th = 75)
start_dates = []
wet_dates = []
durations = []
mags = []
for column_number, column_flow in enumerate(flow_matrix[0]):
start_dates.append(None)
wet_dates.append(None)
durations.append(None)
mags.append(None)
"""Check to see if water year has more than allowed nan or zeros"""
if np.isnan(flow_matrix[:, column_number]).sum() > max_nan_allowed_per_year or np.count_nonzero(flow_matrix[:, column_number]==0) > max_zero_allowed_per_year or max(flow_matrix[:, column_number]) < min_flow_rate:
continue;
"""Get flow data"""
flow_data = flow_matrix[:, column_number]
x_axis = list(range(len(flow_data)))
"""Interpolate between None values"""
flow_data = replace_nan(flow_data)
"""Return to Wet Season"""
wet_filter_data = gaussian_filter1d(flow_data, wet_sigma)
return_date = return_to_wet_date(wet_filter_data, wet_threshold_perc)
wet_dates[-1] = return_date + 10
"""Filter noise data with small sigma to find fall flush hump"""
filter_data = gaussian_filter1d(flow_data, sigma)
"""Fit spline"""
x_axis = list(range(len(filter_data)))
spl = ip.UnivariateSpline(x_axis, filter_data, k=3, s=3)
"""Find the peaks and valleys of the filtered data"""
mean_flow = np.nanmean(filter_data)
maxarray, minarray = peakdet(spl(x_axis), mean_flow * peak_sensitivity)
"""Find max and min of filtered flow data"""
max_flow = max(filter_data[20:])
max_flow_index = find_index(filter_data[20:], max_flow) + 20
min_flow = min(wet_filter_data[:max_flow_index])
"""If could not find any max and find"""
if not list(maxarray) or not list(minarray) or minarray[0][0] > max_flow_index:
continue;
"""Get flow magnitude threshold from previous summer's baseflow"""
baseflows = []
if column_number == 0:
wet_date = wet_dates[0]
baseflow = list(flow_matrix[:wet_date, column_number])
bs_mean = np.mean(baseflow)
bs_med = np.nanpercentile(baseflow, 50)
else:
summer_date = summer_timings[column_number -1]
if wet_dates[column_number] > 20:
wet_date = wet_dates[column_number] - 20
baseflow = list(flow_matrix[summer_date:,column_number -1]) + list(flow_matrix[:wet_date, column_number])
bs_mean = np.mean(baseflow)
bs_med = np.nanpercentile(baseflow, 50)
"""Get fall flush peak"""
counter = 0
half_duration = int(max_flush_duration/2) # Only test duration for first half of fall flush peak
if bs_med > 25:
min_flush_magnitude = bs_med * 1.5 # if median baseflow is large (>25), magnitude threshold is 50% above median baseflow of previous summer
else:
min_flush_magnitude = bs_med * 2 # otherwise magnitude threshold is 100% above median baseflow of previous summer
if min_flush_magnitude < min_flush_threshold:
min_flush_magnitude = min_flush_threshold
for flow_index in maxarray:
if counter == 0:
if flow_index[0] < half_duration and flow_index[0] != 0 and flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude:
"""if index found is before the half duration allowed"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
elif bool((flow_index[1] - spl(maxarray[counter][0] - half_duration)) / flow_index[1] > flush_threshold_perc or minarray[counter][0] - maxarray[counter][0] < half_duration) and flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude:
"""If peak and valley is separted by half duration, or half duration to the left is less than 30% of its value"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
elif counter == len(minarray):
start_dates[-1]=None
mags[-1]=None
break;
elif bool(minarray[counter][0] - maxarray[counter][0] < half_duration or maxarray[counter][0] - minarray[counter-1][0] < half_duration) and bool(flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude and flow_index[0] <= date_cutoff):
"""valley and peak are distanced by less than half dur from either side"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
elif (spl(flow_index[0] - half_duration) - min_flow) / (flow_index[1] - min_flow) < flush_threshold_perc and (spl(flow_index[0] + half_duration) - min_flow) / (flow_index[1] - min_flow) < flush_threshold_perc and flow_index[1] > wet_filter_data[int(flow_index[0])] and flow_index[1] > min_flush_magnitude and flow_index[0] <= date_cutoff:
"""both side of flow value at the peak + half duration index fall below flush_threshold_perc"""
start_dates[-1]=int(flow_index[0])
mags[-1]=flow_index[1]
break
counter = counter + 1
"""Check to see if last start_date falls behind the max_allowed_date"""
if bool(start_dates[-1] is None or start_dates[-1] > wet_dates[-1]) and wet_dates[-1]:
start_dates[-1] = None
mags[-1] = None
"""Get duration of each fall flush"""
current_duration, left, right = calc_fall_flush_durations_2(filter_data, start_dates[-1])
durations[-1] = current_duration
_plotter(x_axis, flow_data, filter_data, wet_filter_data, start_dates, wet_dates, column_number, left, right, maxarray, minarray, min_flush_magnitude)
return start_dates, mags, wet_dates, durations
def calc_fall_flush_durations(flow_data, wet_filter_data, date):
duration_left = None
duration_right = None
duration = None
if date:
date = int(date)
for index_left, flow_left in enumerate(reversed(flow_data[:date])):
if flow_left < wet_filter_data[date - index_left]:
duration_left = index_left
break
for index_right, flow_right in enumerate(flow_data[date:]):
if flow_right < wet_filter_data[date + index_right]:
duration_right = index_right
break
if duration_left and duration_right:
duration = duration_left + duration_right
else:
duration = None
return duration
def calc_fall_flush_durations_2(filter_data, date):
"""Left side sharp"""
der_percent_threshold_left = 50 # Slope of rising limb (i.e. derivative) must be "sharp"
flow_percent_threshold_left = 80
"""Right side mellow"""
der_percent_threshold_right = 30 # Slope of falling limb (i.e. derivative) has lower requirement to be part of flush duration
flow_percent_threshold_right = 80
duration = None
left = 0
right = 0
if date or date == 0:
date = int(date)
left_maxarray, left_minarray = peakdet(filter_data[:date], 0.01)
right_maxarray, right_minarray = peakdet(filter_data[date:], 0.01)
if not list(left_minarray):
left = 0
else:
left = int(left_minarray[-1][0])
if not list(right_minarray):
right = 0
else:
right = int(date - 2 + right_minarray[0][0])
if date - left > 10:
"""create spline, and find derivative"""
x_axis_left = list(range(len(filter_data[left:date])))
spl_left = ip.UnivariateSpline(x_axis_left, filter_data[left:date], k=3, s=3)
spl_first_left = spl_left.derivative(1)
"""check if derivative value falls below certain threshold"""
spl_first_left_median = np.nanpercentile(spl_first_left(x_axis_left), der_percent_threshold_left)
"""check if actual value falls below threshold, avoiding the rounded peak"""
median_left = np.nanpercentile(list(set(filter_data[left:date])), flow_percent_threshold_left)
for index_left, der in enumerate(reversed(spl_first_left(x_axis_left))):
# print(der < spl_first_left_median, filter_data[date - index_left] < median_left)
if der < spl_first_left_median and filter_data[date - index_left] < median_left:
left = date - index_left
break
if right - date > 10:
x_axis_right = list(range(len(filter_data[date:right])))
spl_right = ip.UnivariateSpline(x_axis_right, filter_data[date:right], k=3, s=3)
spl_first_right = spl_right.derivative(1)
spl_first_right_median = abs(np.nanpercentile(spl_first_right(x_axis_right), der_percent_threshold_right))
median_right = np.nanpercentile(list(set(filter_data[date:right])), flow_percent_threshold_right)
for index_right, der in enumerate(spl_first_right(x_axis_right)):
# print(date+index_right, der < spl_first_right_median, filter_data[date + index_right] < median_right)
if abs(der) < spl_first_right_median and filter_data[date + index_right] < median_right:
right = date + index_right
break
if left:
duration = int(date - left)
elif not left and right:
duration = int(right - date)
else:
duration = 0
return duration, left, right
def return_to_wet_date(wet_filter_data, wet_threshold_perc):
max_wet_peak_mag = max(wet_filter_data[20:])
max_wet_peak_index = find_index(wet_filter_data, max_wet_peak_mag)
min_wet_peak_mag = min(wet_filter_data[:max_wet_peak_index])
"""Loop backwards from max flow index to beginning, to search for wet season"""
for index, value in enumerate(reversed(wet_filter_data[:max_wet_peak_index])):
if index == len(wet_filter_data[:max_wet_peak_index] - 1):
return None
elif (value - min_wet_peak_mag) / (max_wet_peak_mag - min_wet_peak_mag) < wet_threshold_perc:
"""If value percentage falls below wet_threshold_perc"""
return_date = max_wet_peak_index - index
return return_date
def _plotter(x_axis, flow_data, filter_data, wet_filter_data, start_dates, wet_dates, column_number, left, right, maxarray, minarray, min_flush_magnitude):
plt.figure()
#plt.plot(x_axis, flow_data, '-')
plt.plot(x_axis, filter_data, '-', color='#5993E5') #greyish blue
#plt.plot(x_axis, wet_filter_data)
# for data in maxarray:
# plt.plot(data[0], data[1], '^')
# for data in minarray:
# plt.plot(data[0], data[1], 'v')
if start_dates[-1] is not None:
plt.axvline(start_dates[-1], color='blue', ls=':')
plt.axvline(wet_dates[-1], color="green", ls=':')
#plt.axvline(left, ls=":")
#plt.axvline(right, ls=":")
if min_flush_magnitude is not None:
plt.axhline(min_flush_magnitude, ls=':', color = 'red')
#plt.yscale('log')
plt.savefig('post_processedFiles/Boxplots/{}.png'.format(column_number))
| 48.59387 | 350 | 0.653552 | [
"MIT"
] | NoellePatterson/func-flow-plot | utils/calc_fall_flush.py | 12,683 | Python |
from prometheus_client import start_http_server, Gauge, Counter
all_users = Gauge('users_in_all_guilds', 'All users the bot is able to see.')
all_guilds = Gauge('guilds_bot_is_in', 'The amount of guilds the bot is in.')
ready_events = Counter('ready_events', 'Amount of READY events recieved during uptime.')
message_events = Counter('message_events', 'Amount of messages sent during uptime.')
reconnects = Counter('reconnects', 'Amount of reconnects the bot has done to Discords API.')
def startup_prometheus():
start_http_server(9091)
| 45.333333 | 92 | 0.775735 | [
"MIT"
] | trilleplay/kanelbulle | bot/utils/prometheus_tools.py | 544 | Python |
"""projeto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from blog import views
"Nome do App blog + views"
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index),
]
| 33.72 | 79 | 0.698695 | [
"MIT"
] | godah/s2b-python | projeto/urls.py | 843 | Python |
# -*- coding: utf-8 -*-
import logging
from ocp_resources.constants import PROTOCOL_ERROR_EXCEPTION_DICT
from ocp_resources.resource import TIMEOUT, Resource
from ocp_resources.utils import TimeoutSampler
LOGGER = logging.getLogger(__name__)
class CDIConfig(Resource):
"""
CDIConfig object.
"""
api_group = Resource.ApiGroup.CDI_KUBEVIRT_IO
@property
def scratch_space_storage_class_from_spec(self):
return self.instance.spec.scratchSpaceStorageClass
@property
def scratch_space_storage_class_from_status(self):
return self.instance.status.scratchSpaceStorageClass
@property
def upload_proxy_url(self):
return self.instance.status.uploadProxyURL
def wait_until_upload_url_changed(self, uploadproxy_url, timeout=TIMEOUT):
"""
Wait until upload proxy url is changed
Args:
timeout (int): Time to wait for CDI Config.
Returns:
bool: True if url is equal to uploadProxyURL.
"""
LOGGER.info(
f"Wait for {self.kind} {self.name} to ensure current URL == uploadProxyURL"
)
samples = TimeoutSampler(
wait_timeout=timeout,
sleep=1,
exceptions_dict=PROTOCOL_ERROR_EXCEPTION_DICT,
func=self.api.get,
field_selector=f"metadata.name=={self.name}",
)
for sample in samples:
if sample.items:
status = sample.items[0].status
current_url = status.uploadProxyURL
if current_url == uploadproxy_url:
return
| 27.965517 | 87 | 0.646732 | [
"Apache-2.0"
] | amastbau/openshift-python-wrapper | ocp_resources/cdi_config.py | 1,622 | Python |
"""Database schemas, examples, and tools"""
import copy
from warnings import warn
from cerberus import Validator
from .sorters import POSITION_LEVELS
SORTED_POSITION = sorted(POSITION_LEVELS.keys(), key=POSITION_LEVELS.get)
ACTIVITIES_TYPE = ["teaching", "research"]
AGENCIES = ["nsf", "doe"]
APPOINTMENTS_TYPE = ["gra", "ss", "pd", "ug"]
COMMITTEES_TYPE = ["phdoral", "phddefense", "phdproposal", "promotion"]
COMMITTEES_LEVEL = ["department", "school", "university", "external"]
EXPENSES_TYPE = ["unsubmitted", "submitted", "reimbursed"]
FACILITIES_TYPE = ["teaching", "research", "shared", "other", "teaching_wish",
"research_wish"]
POSITION_STATUS = ["pi", "adjunct", "high-school", "undergrad","ms", "phd",
"postdoc","visitor-supported","visitor-unsupported"]
PRESENTATION_TYPE = ["award", "colloquium", "contributed_oral", "invited", "keynote",
"plenary", "poster", "seminar", "tutorial"]
PRESENTATION_STATUS = ["in-prep", "submitted", "accepted", "declined",
"cancelled", "postponed"]
PROJECT_TYPE = ["ossoftware", "funded"]
PROPOSAL_STATI = ["pending", "declined", "accepted", "inprep", "submitted"]
PUBLICITY_TYPE = ["online", "article"]
REVIEW_STATI = ["invited", "accepted", "declined", "downloaded", "inprogress",
"submitted", "cancelled"]
REVIEW_RECOMMENDATION = ["reject", "asis", "smalledits", "diffjournal", "majoredits"]
SERVICE_TYPE = ["profession", "university", "school", "department"]
EXEMPLARS = {
"abstracts": {
"_id": "Mouginot.Model",
"coauthors": "P.P.H. Wilson",
"email": "[email protected]",
"firstname": "Baptiste",
"institution": "University of Wisconsin-Madison",
"lastname": "Mouginot",
"references": "[1] B. MOUGINOT, “cyCLASS: CLASS "
"models for Cyclus,”, Figshare, "
"https://dx.doi.org/10.6084/"
"m9.figshare.3468671.v2 (2016).",
"text": "The CLASS team has developed high "
"quality predictors based on pre-trained "
"neural network...",
"timestamp": "5/5/2017 13:15:59",
"title": "Model Performance Analysis",
},
"assignments": {
"_id": "hw01-rx-power",
"category": "homework",
"courses": ["EMCH-558-2016-S", "EMCH-758-2016-S"],
"points": [1, 2, 3],
"questions": ["1-9", "1-10", "1-12"],
},
"beamplan": {
'_id': "test",
'beamtime': '2020-1-XPD',
'begin_date': '2020-01-01',
'end_date': '2020-01-02',
'devices': ['cryostream'],
'exp_plan': ['load samples on the holder',
'scan the holder to locate the samples',
'take room temperature measurement of sample and the subtrate',
'ramp down temperature to 100K',
'ramp up, measure PDF at temperature 100K ~ 300K, 10K stepsize, 1 min exposure'],
'holder': 'film holder (1 cm * 1 cm * 1 mm)',
'measurement': 'Tramp',
'objective': 'temperature ramping PDF of one WO3 film (100, 300K, 10K)',
'pipeline': 'usual',
'prep_plan': ['films will be made by kriti'],
'project': '20ks_wo3',
'project_lead': 'kseth',
'samples': ['WO3 film', 'glass subtrate'],
'scanplan': ['Scanplan(bt, Tramp, 30, 80, 500, 10)'],
'ship_plan': ['seal and ship to CU', 'carry to the beamline'],
'time': 190,
'todo': ["todo something"]},
"beamtime": {
"_id": "2020-1-XPD",
"begin_date": "2020-02-14",
"begin_time": "8:00 am",
"end_date": "2020-02-17",
"end_time": "8:00 am"
},
"blog": {
"_id": "my-vision",
"author": "Anthony Scopatz",
"day": 18,
"month": "September",
"original": "https://scopatz.com/my-vision/",
"post": "I would like see things move forward. Deep, I know!",
"title": "My Vision",
"year": 2015,
},
"citations": {
"_id": "meurer2016sympy",
"author": [
"Meurer, Aaron",
"Smith, Christopher P",
"Paprocki, Mateusz",
"{\\v{C}}ert{\\'\\i}k, Ond{\\v{r}}ej",
"Rocklin, Matthew",
"Kumar, AMiT",
"Ivanov, Sergiu",
"Moore, Jason K",
"Singh, Sartaj",
"Rathnayake, Thilina",
"Sean Vig",
"Brian E Granger",
"Richard P Muller",
"Francesco Bonazzi",
"Harsh Gupta",
"Shivam Vats",
"Fredrik Johansson",
"Fabian Pedregosa",
"Matthew J Curry",
"Ashutosh Saboo",
"Isuru Fernando",
"Sumith Kulal",
"Robert Cimrman",
"Anthony Scopatz",
],
"doi": "10.1021/nn501591g",
"entrytype": "article",
"journal": "PeerJ Computer Science",
"month": "Jan",
"pages": "e103",
"publisher": "PeerJ Inc. San Francisco, USA",
"synopsis": "The description of symbolic computing in Python",
"tags": "pdf",
"title": "SymPy: Symbolic computing in Python",
"volume": "4",
"year": "2017",
},
"contacts": {
"_id": "afriend",
"aka": [
"A. B. Friend",
"AB Friend",
"Tony Friend"
],
"department": "physics",
"email": "[email protected]",
"institution": "columbiau",
"name": "Anthony B Friend",
"notes": ["The guy I meet for coffee sometimes"],
"title": "Mr.",
"month": "January",
"year": 2020,
"day": 15,
"uuid": "76f2a4c7-aa63-4fa3-88b5-396b0c15d368",
},
"courses": {
"_id": "EMCH-552-2016-F",
"active": False,
"department": "EMCH",
"number": 552,
"scale": [
[0.875, "A"],
[0.8125, "B+"],
[0.75, "B"],
[0.6875, "C+"],
[0.625, "C"],
[0.5625, "D+"],
[0.5, "D"],
[-1.0, "F"],
],
"season": "F",
"students": ["Human A. Person", "Human B. Person"],
"syllabus": "emch552-2016-f-syllabus.pdf",
"weights": {
"class-notes": 0.15,
"final": 0.3,
"homework": 0.35,
"midterm": 0.2,
},
"year": 2016,
},
"expenses": {
"_id": "test",
"expense_type": "business",
"grant_percentages": ["50", "50"],
"grants": ["dmref15", "SymPy-1.1"],
"itemized_expenses": [
{
"day": i,
"month": "Jan",
"year": 2018,
"purpose": "test",
"unsegregated_expense": 10 * i,
"segregated_expense": 0,
}
for i in range(1, 11)
],
"payee": "scopatz",
"project": "Cyclus",
"overall_purpose": "testing the databallectionsse",
},
"grades": {
"_id": "Human A. Person-rx-power-hw02-EMCH-758-2017-S",
"student": "hap",
"assignment": "2017-rx-power-hw02",
"course": "EMCH-758-2017-S",
"scores": [1, 1.6, 3],
},
"grants": [
{
"_id": "SymPy-1.1",
"amount": 3000.0,
"alias": "sym",
"begin_day": 1,
"begin_month": "May",
"begin_year": 2030,
"call_for_proposals": "https://groups.google.com/d/msg"
"/numfocus/wPjhdm8NJiA/S8JL1_NZDQAJ",
"end_day": 31,
"end_month": "December",
"end_year": 2030,
"funder": "NumFOCUS",
"narrative": "https://docs.google.com/document/d/1nZxqoL"
"-Ucni_aXLWmXtRDd3IWqW0mZBO65CEvDrsXZM/edit?usp"
"=sharing",
"program": "Small Development Grants",
"team": [
{
"institution": "University of South Carolina",
"name": "Anthony Scopatz",
"position": "pi",
},
{
"institution": "University of South Carolina",
"name": "Aaron Meurer",
"position": "researcher",
},
],
"status": "pending",
"title": "SymPy 1.1 Release Support",
"budget": [
{"begin_date": "2030-05-01",
"end_date": "2030-06-30",
"student_months": 0.5,
"postdoc_months": 0.0,
"ss_months": 1.0,
"amount": 1000.0,
},
{"begin_date": "2030-07-01",
"end_date": "2030-09-30",
"student_months": 1.5,
"postdoc_months": 0.0,
"ss_months": 2.0,
"amount": 1000.0,
},
{"begin_date": "2030-10-01",
"end_date": "2030-12-31",
"student_months": 3.0,
"postdoc_months": 0.0,
"ss_months": 0.0,
"amount": 1000.0,
},
],
"proposal_id": "SymPy-1.1"
},
{
"_id": "SymPy-2.0",
"amount": 3000.0,
"alias": "sym2.0",
"begin_day": 1,
"begin_month": 6,
"begin_year": 2019,
"call_for_proposals": "https://groups.google.com/d/msg"
"/numfocus/wPjhdm8NJiA/S8JL1_NZDQAJ",
"end_day": 31,
"end_month": "December",
"end_year": 2030,
"funder": "NumFOCUS",
"narrative": "https://docs.google.com/document/d/1nZxqoL"
"-Ucni_aXLWmXtRDd3IWqW0mZBO65CEvDrsXZM/edit?usp"
"=sharing",
"program": "Small Development Grants",
"team": [
{
"institution": "University of South Carolina",
"name": "Anthony Scopatz",
"position": "pi",
},
{
"institution": "University of South Carolina",
"name": "Aaron Meurer",
"position": "researcher",
},
],
"status": "pending",
"title": "SymPy 1.1 Release Support",
"budget": [
{"begin_date": "2019-06-01",
"end_date": "2024-12-31",
"student_months": 12.0,
"postdoc_months": 24.0,
"ss_months": 14.0,
"amount": 1500.0,
},
{"begin_date": "2025-01-01",
"end_date": "2030-12-31",
"student_months": 12.0,
"postdoc_months": 24.0,
"ss_months": 0.0,
"amount": 1500.0,
},
],
"proposal_id": "SymPy-2.0",
},
{
"_id": "dmref15",
"alias": "dmref15",
"account": "GG012345",
"amount": 982785.0,
"funder": "NSF",
"grant_id": "DMREF-1534910",
"institution": "Columbia University",
"notes": "Designing Materials to Revolutionize and Engineer our "
"Future (DMREF)",
"person_months_academic": 0.0,
"person_months_summer": 0.25,
"program": "DMREF",
"scope": "This grant is to develop complex modeling methods for regularizing "
"ill-posed nanostructure inverse problems using data analytic and "
"machine learning based approaches. This does not overlap with any "
"other grant.",
"team": [
{
"institution": "Columbia University",
"name": "qdu",
"position": "co-pi",
},
{
"institution": "Columbia University",
"name": "dhsu",
"position": "co-pi",
},
{
"institution": "Columbia University",
"name": "Anthony Scopatz",
"position": "pi",
"subaward_amount": 330000.0,
},
],
"title": "DMREF: Novel, data validated, nanostructure determination "
"methods for accelerating materials discovery",
"budget": [
{"begin_date": "2018-05-01",
"end_date": "2018-09-30",
"student_months": 12.0,
"postdoc_months": 0.0,
"ss_months": 6.0,
"amount": 327595.0,
},
{"begin_date": "2018-10-01",
"end_date": "2019-01-30",
"student_months": 8.0,
"postdoc_months": 0.0,
"ss_months": 12.0,
"amount": 327595.0,
},
{"begin_date": "2019-02-01",
"end_date": "2019-05-01",
"student_months": 12.0,
"postdoc_months": 0.0,
"ss_months": 6.0,
"amount": 327595.0,
},
],
"proposal_id": "dmref15"
},
{"_id": "abc42",
"alias": "abc42",
"amount": 42000.0,
"begin_date": "2020-06-01",
"end_date": "2020-12-31",
"funder": "Life",
"program": "Metaphysical Grants",
"team": [
{"institution": "University of Pedagogy",
"name": "Chief Pedagogue",
"position": "pi"
},
{"institution": "University of Pedagogy",
"name": "Pedagogue Jr.",
"position": "co-pi"
},
],
"title": "The answer to life, the universe, and everything",
"budget": [
{"begin_date": "2020-06-01",
"end_date": "2020-12-31",
"student_months": 0.0,
"postdoc_months": 0.0,
"ss_months": 1.0,
"amount": 42000.0,
}
],
"proposal_id": "abc42",
},
{"_id": "ta",
"amount": 0.0,
"begin_date": "2020-06-01",
"end_date": "2020-12-31",
"funder": "Life",
"program": "Underground Grants",
"team": [
{"institution": "Ministry of Magic",
"name": "Chief Witch",
"position": "pi"
},
{"institution": "Ministry of Magic",
"name": "Chief Wizard",
"position": "co-pi"
},
],
"title": "Support for teaching assistants",
"budget": [
{"begin_date": "2020-06-01",
"end_date": "2020-08-30",
"student_months": 0.0,
"postdoc_months": 0.0,
"ss_months": 0.0,
"amount": 0.0,
}
]
},
],
"groups": {
"_id": "ergs",
"pi_name": "Anthony Scopatz",
"department": "Mechanical Engineering",
"institution": "University of South Carolina",
"name": "ERGS",
"aka": ["Energy Research Group Something", "Scopatz Group"],
"website": "www.ergs.sc.edu",
"mission_statement": """<b>ERGS</b>, or <i>Energy Research Group:
Scopatz</i>, is the Computational
<a href="http://www.me.sc.edu/nuclear/">Nuclear Engineering</a>
research group at the
<a href="http://sc.edu/">University of South Carolina</a>.
Our focus is on uncertainty quantification & predictive modeling, nuclear
fuel cycle simulation, and improving nuclear engineering techniques through
automation.
We are committed to open & accessible research tools and methods.""",
"projects": """ERGS is involved in a large number of computational
projects. Please visit the <a href="projects.html">projects page</a> for
more information!
""",
"email": "<b>scopatz</b> <i>(AT)</i> <b>cec.sc.edu</b>",
},
"institutions": [{
"_id": "columbiau",
"aka": ["Columbia University", "Columbia"],
"city": "New York",
"country": "USA",
"day": 30,
"departments": {
"physics": {
"name": "Department of Physics",
"aka": ["Dept. of Physics", "Physics"],
},
"chemistry": {
"name": "Department of Chemistry",
"aka": ["Chemistry", "Dept. of Chemistry"],
},
"apam": {
"name": "Department of Applied Physics " "and Applied Mathematics",
"aka": ["APAM"],
},
},
"month": "May",
"name": "Columbia University",
"schools": {
"seas": {
"name": "School of Engineering and " "Applied Science",
"aka": [
"SEAS",
"Columbia Engineering",
"Fu Foundation School of Engineering " "and Applied Science",
],
}
},
"state": "NY",
"street": "500 W 120th St",
"updated": "2020-05-30",
"uuid": "avacazdraca345rfsvwre",
"year": 2020,
"zip": "10027",
},
{
"_id": "usouthcarolina",
"aka": ["The University of South Carolina"],
"city": "Columbia",
"country": "USA",
"day": 30,
"departments": {
"physics": {
"name": "Department of Physics",
"aka": ["Dept. of Physics", "Physics"],
},
"chemistry": {
"name": "Department of Chemistry",
"aka": ["Chemistry", "Dept. of Chemistry"],
},
"apam": {
"name": "Department of Applied Physics" "and Applied Mathematics",
"aka": ["APAM"],
},
"mechanical engineering": {
"name": "Department of Mechanical Engineering",
"aka": ["Mechanical", "Dept. of Mechanical"],
}
},
"month": "May",
"name": "The University of South Carolina",
"schools": {
"cec": {
"name": "College of Engineering and" "Computing",
"aka": [
"CEC",
"College of Engineering and Computing",
],
}
},
"state": "SC",
"street": "1716 College Street",
"updated": "2020-06-30",
"uuid": "4E89A0DD-19AE-45CC-BCB4-83A2D84545E3",
"year": 2020,
"zip": "29208",
},
],
"jobs": {
"_id": "0004",
"background_fields": [
"Data Science",
"Data Engineering",
"Computer Engineering",
"Computer Science",
"Applied Mathematics",
"Physics",
"Nuclear Engineering",
"Mechanical Engineering",
"Or similar",
],
"compensation": [
"Salary and compensation will be based on prior work " "experience."
],
"contact": "Please send CV or resume to Prof. Scopatz at "
"scopatzATcec.sc.edu.",
"day": 1,
"description": "<p>We are seeking a dedicated individual to "
"help to aid in ...",
"month": "July",
"open": False,
"positions": ["Scientific Software Developer", "Programmer"],
"start_date": "ASAP",
"title": "Open Source Scientific Software Maintainer",
"year": 2015,
},
"meetings": [{
"_id": "grp1000-01-01",
"actions": [
"(Everyone) Update overdue milestones",
"(Professor Billinge) Explore, and plan a machine learning project for DSI"
"(Professor Billinge, Emil, Yevgeny, Songsheng) Come up with a Kaggle competition for this DSI project"
"(Emil) Set up the slack channel for the DSI project"
],
"agenda": ["Review actions", "Fargo is not free on any streaming platforms",
"Review Airtable for deliverables and celebrate",
"Mention diversity action initiative", "Songsheng's journal club presentation",
"(Vivian and Zicheng) Finish rest of crystallography presentation next week",
"Emil's 7th inning Yoga Stretch", "Crystallography talk", "Presentation"],
"buddies": [
" Jaylyn C. Umana, "
" Simon J. L. Billinge",
" Long Yang, "
" Emil Kjaer",
" Sani Harouna-Mayer,"
" Akshay Choudhry",
" Vivian Lin, "
" Songsheng Tao",
" Ran Gu, "
" Adiba Ejaz",
" Zach Thatcher, "
" Yevgeny Rakita",
" Zicheng 'Taylor' Liu, "
" Eric Shen ",
" Hung Vuong, "
" Daniela Hikari Yano",
" Ahmed Shaaban, "
" Jiawei Zang",
" Berrak Ozer, "
" Michael Winitch",
" Shomik Ghose",
],
"day": 1,
"journal_club": {
"doi": "10.1107/S2053273319005606",
"presenter": "sbillinge",
},
"lead": "sbillinge",
"minutes": [
"Talked about eyesight and prescription lenses",
"Professor Billinge tells everyone a Logician/Mathematician joke",
"Mentioned pyjokes, a package in Python that lists bad jokes",
"Jaylyn greets everyone",
"Reviewed action items from last time",
"Talked about fargo, and the merits (or lack thereof) of the Dakotas",
"Celebrated finished prums",
"Songhsheng holds journal club presentation on Machine Learning techniques",
"Discussed Linear Classification, Gradient Descent, Perceptrons, Convolution and other ML topics",
"Discussed how we can derive scientific meaning from ML algorithms",
"Discussed real space versus reciprocal space",
"Finished journal club, had to postpone Akshay's presentation, and the Yoga session to next week",
],
"month": 1,
"place": "Mudd 1106",
"presentation": {
"title": "PDF Distance Extraction",
"link": "2007ac_grpmtg",
"presenter": "sbillinge",
},
"scribe": "sbillinge",
"time": '0',
"updated": "2020-07-31 23:27:50.764475",
"uuid": "3fbee8d9-e283-48e7-948f-eecfc2a123b7",
"year": 1000
},
{
"_id": "grp2020-07-31",
"actions": [
"(Everyone) Update overdue milestones",
"(Professor Billinge) Explore, and plan a machine learning project for DSI"
"(Professor Billinge, Emil, Yevgeny, Songsheng) Come up with a Kaggle competition for this DSI project"
"(Emil) Set up the slack channel for the DSI project"
],
"agenda": ["Review actions", "Fargo is not free on any streaming platforms",
"Review Airtable for deliverables and celebrate",
"Mention diversity action initiative", "Songsheng's journal club presentation",
"(Vivian and Zicheng) Finish rest of crystallography presentation next week",
"Emil's 7th inning Yoga Stretch", "Crystallography talk", "Presentation"],
"buddies": [
" Jaylyn C. Umana, "
" Simon J. L. Billinge",
" Long Yang, "
" Emil Kjaer",
" Sani Harouna-Mayer,"
" Akshay Choudhry",
" Vivian Lin, "
" Songsheng Tao",
" Ran Gu, "
" Adiba Ejaz",
" Zach Thatcher, "
" Yevgeny Rakita",
" Zicheng 'Taylor' Liu, "
" Eric Shen ",
" Hung Vuong, "
" Daniela Hikari Yano",
" Ahmed Shaaban, "
" Jiawei Zang",
" Berrak Ozer, "
" Michael Winitch",
" Shomik Ghose",
],
"day": 1,
"journal_club": {
"doi": "10.1107/S2053273319005606",
"presenter": "sbillinge",
},
"lead": "sbillinge",
"minutes": [
"Talked about eyesight and prescription lenses",
"Professor Billinge tells everyone a Logician/Mathematician joke",
"Mentioned pyjokes, a package in Python that lists bad jokes",
"Jaylyn greets everyone",
"Reviewed action items from last time",
"Talked about fargo, and the merits (or lack thereof) of the Dakotas",
"Celebrated finished prums",
"Songhsheng holds journal club presentation on Machine Learning techniques",
"Discussed Linear Classification, Gradient Descent, Perceptrons, Convolution and other ML topics",
"Discussed how we can derive scientific meaning from ML algorithms",
"Discussed real space versus reciprocal space",
"Finished journal club, had to postpone Akshay's presentation, and the Yoga session to next week",
],
"month": 1,
"place": "Mudd 1106",
"presentation": {
"title": "PDF Distance Extraction",
"link": "2007ac_grpmtg",
"presenter": "sbillinge",
},
"scribe": "sbillinge",
"time": '0',
"updated": "2020-07-31 23:27:50.764475",
"uuid": "3fbee8d9-e283-48e7-948f-eecfc2a123b7",
"year": 7000
}
],
"news": {
"_id": "56b4eb6d421aa921504ef2a9",
"author": "Anthony Scopatz",
"body": "Dr. Robert Flanagan joined ERGS as a post-doctoral " "scholar.",
"day": 1,
"month": "February",
"year": 2016,
},
"people": [{
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"avatar": "https://avatars1.githubusercontent.com/u/320553?v" "=3&s=200",
"appointments": {
"f19": {
"begin_year": 2019,
"begin_month": 9,
"begin_day": 1,
"end_year": 2019,
"end_month": 10,
"end_day": 31,
"grant": "dmref15",
"type": "pd",
"loading": 0.75,
"status": "finalized",
"notes": ["forgetmenot"]
},
"s20": {
"begin_date": "2020-01-01",
"end_date": "2020-05-15",
"grant": "sym",
"type": "pd",
"loading": 1.0,
"status": "finalized",
"notes": ["fully appointed", "outdated grant"]
},
"ss20": {
"begin_date": "2020-06-01",
"end_date": "2020-08-31",
"grant": "abc42",
"type": "ss",
"loading": 0.8,
"status": "proposed",
"notes": []
}
},
"bio": "Anthony Scopatz is currently an Assistant Professor",
"bios": ["Anthony Scopatz is currently an Assistant Professor but will go on to do great things"],
"committees": [{
"name": "Heather Stanford",
"type": "phdoral",
"year": 2020,
"month": 3,
"day": 1,
"level": "department",
"unit": "apam"
},
{"name": "Heather Stanford",
"type": "promotion",
"year": 2020,
"month": 3,
"day": 1,
"level": "school",
"unit": "seas"
},
{"name": "Heather Stanford",
"type": "phddefense",
"year": 2020,
"month": 3,
"day": 1,
"notes": "something else to remember about it, not published",
"level": "external",
"unit": "U Denmark"
},
{"name": "Heather Stanford",
"type": "promotion",
"year": 2020,
"month": 3,
"day": 1,
"unit": "columbiau",
"level": "university",
}],
"education": [
{
"advisor": "ascopatz",
"begin_year": 2008,
"degree": "Ph.D. Mechanical Engineering, "
"Nuclear and Radiation Engineering "
"Program",
"end_year": 2011,
"group": "ergs",
"institution": "The University of Texas at Austin",
"department": "apam",
"location": "Austin, TX",
"other": [
"Adviser: Erich A. Schneider",
"Dissertation: Essential Physics for Fuel Cycle "
"Modeling & Analysis",
],
},
{
"begin_year": 2006,
"degree": "M.S.E. Mechanical Engineering, Nuclear and "
"Radiation Engineering Program",
"end_year": 2007,
"institution": "The University of Texas at Austin",
"location": "Austin, TX",
"other": [
"Adviser: Erich A. Schneider",
"Thesis: Recyclable Uranium Options under the Global "
"Nuclear Energy Partnership",
],
},
{
"begin_year": 2002,
"begin_month": "Sep",
"begin_day": 1,
"degree": "B.S. Physics",
"end_year": 2006,
"end_month": 5,
"end_day": 20,
"institution": "University of California, Santa Barbara",
"location": "Santa Barbara, CA",
"other": [
"Graduated with a Major in Physics and a Minor in " "Mathematics"
],
},
{
"begin_year": 2008,
"degree": "ongoing",
"group": "life",
"institution": "solar system",
"department": "earth",
"location": "land, mostly",
},
],
"email": "[email protected]",
"employment": [
{
"advisor": "ascopatz",
"begin_year": 2015,
"coworkers": ["afriend"],
"group": "ergs",
"location": "Columbia, SC",
"organization": "The University of South Carolina",
"other": [
"Cyclus: An agent-based, discrete time nuclear fuel "
"cycle simulator.",
"PyNE: The Nuclear Engineering Toolkit.",
"Website: http://www.ergs.sc.edu/",
],
"permanent": True,
"position": "assistant professor",
"position_full": "Assistant Professor, Mechanical Engineering " "Department",
},
{
"begin_year": 2013,
"begin_month": "Jun",
"begin_day": 1,
"end_year": 2015,
"end_month": 3,
"end_day": 15,
"location": "Madison, WI",
"organization": "CNERG, The University of " "Wisconsin-Madison",
"department": "Physics",
"other": [
"Cyclus: An agent-based, discrete time nuclear fuel "
"cycle simulator.",
"PyNE: The Nuclear Engineering Toolkit.",
"Website: https://cnerg.github.io/",
],
"position": "associate scientist",
"position_full": "Associate Scientist, Engineering Physics " "Department",
},
{
"begin_day": 1,
"begin_month": "Nov",
"begin_year": 2011,
"end_month": "May",
"end_year": 2013,
"location": "Chicago, IL",
"organization": "The FLASH Center, The University of " "Chicago",
"other": [
"NIF: Simulation of magnetic field generation from "
"neutral plasmas using FLASH.",
"CosmoB: Simulation of magnetic field generation "
"from neutral plasmas using FLASH.",
"FLASH4: High-energy density physics capabilities "
"and utilities.",
"Simulated Diagnostics: Schlieren, shadowgraphy, "
"Langmuir probes, etc. from FLASH.",
"OpacPlot: HDF5-based equation of state and opacity "
"file format.",
"Website: http://flash.uchicago.edu/site/",
],
"position": "post-doctoral scholar",
"position_full": "Research Scientist, Postdoctoral Scholar",
"status": "pi"
},
],
"funding": [
{
"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013,
},
{"name": "NIF User's Group Travel Award", "value": 1150,
"year": 2013},
],
"google_scholar_url": "https://scholar.google.com/citations?user=dRm8f",
"github_id": "ascopatz",
"hindex": [{
"h": 25,
"h_last_five": 46,
"citations": 19837,
"citations_last_five": 9419,
"origin": "Google Scholar",
"since": 1991,
"year": 2020,
"month": 2,
"day": 19
}],
"home_address": {
"street": "123 Wallabe Ln",
"city": "The big apple",
"state": "plasma",
"zip": "007",
},
"initials": "AMS",
"membership": [
{
"begin_year": 2006,
"organization": "American Nuclear Society",
"position": "Member",
},
{
"begin_year": 2013,
"organization": "Python Software Foundation",
"position": "Fellow",
},
],
"name": "Anthony Scopatz",
"orcid_id": "0000-0002-9432-4248",
"position": "professor",
"research_focus_areas": [
{"begin_year": 2010, "description": "software applied to nuclear "
"engineering and life"}
],
"service": [{
"name": "International Steering Committee",
"role": "chair",
"type": "profession",
"year": 2020,
"month": 3,
"notes": ["something"],
}, {
"name": "National Steering Committee",
"type": "profession",
"begin_year": 2018,
"end_year": 2021,
"notes": "something",
},
],
"skills": [
{"category": "Programming Languages", "level": "expert",
"name": "Python"},
{"category": "Programming Languages", "level": "expert",
"name": "Cython"},
],
"teaching": [
{
"course": "EMCH 552: Intro to Nuclear Engineering",
"courseid": "EMCH 552",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": "tbd",
"month": "August",
"organization": "University of South Carolina",
"position": "professor",
"semester": "Spring",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2017,
},
{
"course": "EMCH 558/758: Reactor Power Systems",
"courseid": "EMCH 558",
"description": "This course covers conventional " "reactors.",
"enrollment": 28,
"evaluation": {
"response_rate": 66.76,
"amount_learned": 3.5,
"appropriateness_workload": 3.15,
"course_overall": 3.67,
"fairness_grading": 3.54,
"organization": 3.25,
"classroom_delivery": 4,
"approachability": 4.3,
"instructor_overall": 3.5,
"comments": ["super duper", "dandy"]
},
"month": "January",
"organization": "University of South Carolina",
"position": "professor",
"syllabus": "https://docs.google.com/document/d"
"/1uMAx_KFZK9ugYyF6wWtLLWgITVhaTBkAf8"
"-PxiboYdM/edit?usp=sharing",
"year": 2017,
},
],
"title": "Dr.",
},
{
"_id": "sbillinge",
"active": True,
"activities": [{
"type": "teaching",
"name": "course development",
"year": 2018,
"other": "Developed a new course for Materials Science"
}],
"aka": [
"Billinge",
],
"avatar": "https://avatars1.githubusercontent.com/u/320553?v" "=3&s=200",
"bio": "Simon teaches and does research",
"committees": [{
"name": "Same Old",
"type": "phddefense",
"year": 2018,
"unit": "Materials Science",
"level": "department",
"notes": "something"
}],
"education": [
{
"begin_year": 2008,
"degree": "Ph.D. Mechanical Engineering, "
"Nuclear and Radiation Engineering "
"Program",
"end_year": 2011,
"group": "ergs",
"institution": "The University of Texas at Austin",
"department": "apam",
"location": "Austin, TX",
"other": [
"Adviser: Erich A. Schneider",
"Dissertation: Essential Physics for Fuel Cycle "
"Modeling & Analysis",
],
},
],
"email": "[email protected]",
"employment": [
{
"begin_year": 2015,
"group": "ergs",
"location": "Columbia, SC",
"organization": "The University of South Carolina",
"other": [
"Cyclus: An agent-based, discrete time nuclear fuel "
"cycle simulator.",
"PyNE: The Nuclear Engineering Toolkit.",
"Website: http://www.ergs.sc.edu/",
],
"position": "assistant professor",
},
],
"facilities": [{
"type": "other",
"name": "Shared {Habanero} compute cluster",
"begin_year": 2015
},
{
"type": "research_wish",
"name": "Shared access to wet lab",
"begin_year": 2015
},
{
"type": "teaching",
"name": "Courseworks2",
"begin_year": 2017
},
{
"type": "teaching_wish",
"name": "nothing right now",
"begin_year": 2019
},
{
"type": "research",
"name": "I don't have one",
"begin_year": 2008
},
],
"funding": [
{
"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013,
},
{"name": "NIF User's Group Travel Award", "value": 1150,
"year": 2013},
],
"google_scholar_url": "https://scholar.google.com/citations?user=dRm8f",
"grp_mtg_active": True,
"hindex": [{
"h": 65,
"h_last_five": 43,
"citations": 17890,
"citations_last_five": 8817,
"origin": "Google Scholar",
"since": 1991,
"year": 2019,
"month": "May",
"day": 12,
}],
"office": "1105 Seely W. Mudd Building (inner office)",
"home_address": {
"street": "123 Wallabe Ln",
"city": "The big apple",
"state": "plasma",
"zip": "007",
},
"initials": "SJLB",
"membership": [
{
"begin_year": 2006,
"organization": "American Nuclear Society",
"position": "Member",
},
],
"miscellaneous": {
"metrics_for_success": [
"publications(quality, quantity)",
"invite talks",
"funding",
"citations",
],
},
"name": "Simon J. L. Billinge",
"orcid_id": "0000-0002-9432-4248",
"position": "professor",
"publicity": [{
"type": "online",
"publication": "Brookhaven National Laboratory Web Story",
"topic": "LDRD Provenance project",
"title": "An awesome project and well worth the money",
"day": 24,
"month": "Jul",
"year": 2019,
"grant": "bnlldrd18",
"url": "http://www.google.com"
},
],
"research_focus_areas": [
{"begin_year": 2010, "description": "software applied to materials "
"engineering and life"}
],
"service": [
{
"type": "profession",
"name": "Master of Ceremonies and Organizer Brown University "
'"Chemistry: Believe it or Not" public chemistry '
"demonstration",
"year": 2017,
"month": "August"
},
{
"type": "department",
"name": "Applied Physics program committee",
"year": 2018,
"month": 1
},
{
"type": "school",
"name": "Ad hoc tenure committee",
"year": 2017,
"month": 6,
"notes": "Albert Einstein"
},
{
"type": "profession",
"name": "Co-organizer JUAMI",
"year": 2017,
"month": 12,
"role": "co-organizer",
"other": "great way to meet people",
},
],
"skills": [
{"category": "Programming Languages", "level": "expert",
"name": "Python"},
],
"teaching": [
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "f17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "August",
"organization": "Columbia University",
"position": "professor",
"semester": "Fall",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2016,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "f17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "August",
"organization": "Columbia University",
"position": "professor",
"semester": "Fall",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2017,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "s17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "Jan",
"organization": "Columbia University",
"position": "professor",
"semester": "Spring",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2018,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "s17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "Jan",
"organization": "Columbia University",
"position": "professor",
"semester": "Spring",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2017,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "s17-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"month": "Jan",
"organization": "Columbia University",
"position": "professor",
"semester": "Spring",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2019,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "f18-3010",
"description": "This course is an introduction to nuclear " "physics.",
"enrollment": 18,
"evaluation": {
"response_rate": 58.33,
"amount_learned": 4.57,
"appropriateness_workload": 4.29,
"fairness_grading": 4.57,
"course_overall": 4.43,
"organization": 4.0,
"classroom_delivery": 4.29,
"approachability": 4.86,
"instructor_overall": 4.43,
"comments": [
"Great teacher but disorganized",
"Wears pink pants. Why?",
]},
"month": "August",
"organization": "Columbia University",
"position": "professor",
"semester": "Fall",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2018,
},
{
"course": 'MSAE-3010: Introduction to Materials Science',
"courseid": "f19-3010",
"description": "This course is an introduction to nuclear " "physics.",
"month": "August",
"organization": "Columbia University",
"position": "professor",
"semester": "Fall",
"syllabus": "https://drive.google.com/open?id"
"=0BxUpd34yizZreDBCMEJNY2FUbnc",
"year": 2019,
},
],
"title": "Dr.",
"todos": [
{"description": "read paper",
"due_date": "2020-07-19",
"begin_date": "2020-06-15",
"duration": 60.0,
"importance": 2,
"status": "started",
"assigned_by": "scopatz",
"running_index": 1
},
{"description": "prepare the presentation",
"due_date": "2020-07-29",
"begin_date": "2020-06-22",
"duration": 30.0,
"importance": 0,
"status": "started",
"notes": ["about 10 minutes", "don't forget to upload to the website"],
"assigned_by": "sbillinge",
"running_index": 2
}
],
},
{"_id": "abeing",
"active": False,
"aka": ["being", "human", "person"],
"avatar": "https://xkcd.com/1221/",
"bio": "Abstract Being is an exemplar human existence",
"education": [
{"degree": "bachelors", "institution": "University of Laughs", "begin_year": 2010},
],
"employment": [
{"group": "bg", "begin_date": "2015-06-01", "end_date": "2015-08-31", "organization": "columbiau",
"position": "intern"},
{"group": "agroup", "begin_date": "2020-01-01", "end_date": "2030-12-31", "organization": "usouthcarolina",
"position": "intern"},
{"group": "bg", "begin_date": "2010-06-01", "end_date": "2012-08-31", "organization": "columbiau",
"position": "intern"},
{"group": "bg", "begin_date": "2017-06-01", "end_date": "2019-08-31", "organization": "columbiau",
"position": "intern"},
],
"position": "intern",
"name": "Abstract Being",
}
],
"presentations": [
{
"_id": "18sb_this_and_that",
"abstract": "We pulled apart graphite with tape",
"authors": ["scopatz", "afriend"],
"begin_year": 2018,
"begin_month": 5,
"begin_day": 22,
"department": "apam",
"institution": "columbiau",
"location": "Upton NY",
"meeting_name": "Meeting to check flexibility on dates",
"notes": [
"We hope the weather will be sunny",
"if the weather is nice we will go to the " "beach",
],
"project": "18sob_clustermining",
"status": "accepted",
"title": "Graphitic Dephenestration",
"type": "award",
"webinar": False,
},
{
"_id": "18sb_nslsii",
"abstract": "We pulled apart graphite with tape",
"authors": ["scopatz"],
"begin_year": 2018,
"begin_month": 5,
"begin_day": 22,
"department": "apam",
"end_year": 2018,
"end_month": 5,
"end_day": 22,
"institution": "columbiau",
"location": "Upton NY",
"meeting_name": "2018 NSLS-II and CFN Users Meeting",
"notes": [
"We hope the weather will be sunny",
"if the weather is nice we will go to the " "beach",
],
"project": "18sob_clustermining",
"status": "accepted",
"title": "ClusterMining: extracting core structures of "
"metallic nanoparticles from the atomic pair "
"distribution function",
"type": "poster",
},
{
"_id": "18sb04_kentstate",
"abstract": "We made the case for local structure",
"authors": ["scopatz"],
"begin_year": 2018,
"begin_month": "May",
"begin_day": 22,
"department": "physics",
"end_year": 2018,
"end_month": 5,
"end_day": 22,
"institution": "columbiau",
"notes": ["what a week!"],
"project": "18kj_conservation",
"status": "accepted",
"title": "Nanostructure challenges and successes from "
"16th Century warships to 21st Century energy",
"type": "colloquium",
"webinar": True,
},
],
"projecta": {
"_id": "sb_firstprojectum",
"begin_date": "2020-04-28",
"collaborators": ["aeinstein", "pdirac"],
"deliverable": {
"audience": ["beginning grad in chemistry"],
"due_date": "2021-05-05",
"success_def": "audience is happy",
"scope": ["UCs that are supported or some other scope description "
"if it is software", "sketch of science story if it is paper"
],
"platform": "description of how and where the audience will access "
"the deliverable. Journal if it is a paper",
"roll_out": [
"steps that the audience will take to access and interact with "
"the deliverable", "not needed for paper submissions"],
"notes": ["deliverable note"],
"status": "proposed"
},
"description": "My first projectum",
"end_date": "2020-06-05",
"grants": "SymPy-1.1",
"group_members": ["ascopatz"],
"kickoff": {
"date": "2020-05-05",
"due_date": "2020-05-06",
"name": "Kick off meeting",
"objective": "introduce project to the lead",
"audience": ["lead", "pi", "group_members"],
"notes": ["kickoff note"],
"status": "finished"
},
"lead": "ascopatz",
"log_url": "https://docs.google.com/document/d/1YC_wtW5Q",
"milestones": [{
'due_date': '2020-05-20',
'name': 'Project lead presentation',
'notes': ["do background reading", "understand math"],
'objective': 'lead presents background reading and '
'initial project plan',
'audience': ['lead', 'pi', 'group_members'],
'status': 'proposed',
'type': 'meeting'
},
{'due_date': '2020-05-27',
'name': 'planning meeting',
'objective': 'develop a detailed plan with dates',
'audience': ['lead', 'pi', 'group_members'],
'status': 'proposed',
'type': 'pr',
}],
"name": "First Projectum",
"pi_id": "scopatz",
"status": "started"
},
"projects": {
"_id": "Cyclus",
"name": "Cyclus",
"description": "Agent-Based Nuclear Fuel Cycle Simulator",
"group": "ergs",
"highlights": [
{"year": 2020, "month": 5,
"description": "high profile pub in Nature"}
],
"logo": "http://fuelcycle.org/_static/big_c.png",
"other": [
"Discrete facilities with discrete material transactions",
"Low barrier to entry, rapid payback to adoption",
],
"repo": "https://github.com/cyclus/cyclus/",
"team": [
{
"begin_month": "June",
"begin_year": 2013,
"end_month": "July",
"end_year": 2015,
"name": "Anthony Scopatz",
"position": "Project Lead",
}
],
"type": "funded",
"website": "http://fuelcycle.org/",
"grant": "dmref15",
},
"proposalReviews": [
{
"_id": "1906doeExample",
"adequacy_of_resources": [
"The resources available to the PI seem adequate"
],
"agency": "doe",
"competency_of_team": ["super competent!"],
"doe_appropriateness_of_approach": [
"The proposed approach is highly innovative"
],
"doe_reasonableness_of_budget": [
"They could do it with half the money"],
"doe_relevance_to_program_mission": ["super relevant"],
"does_how": [
"they will find the cause of Malaria",
"when they find it they will determine a cure",
],
"due_date": "2020-04-10",
"does_what": "Find a cure for Malaria",
"freewrite": [
"I can put extra things here, such as special instructions from the",
"program officer",
],
"goals": [
"The goals of the proposal are to put together a team to find a cure"
"for Malaria, and then to find it"
],
"importance": ["save lives", "lift people from poverty"],
"institutions": "columbiau",
"month": "May",
"names": ["B. Cause", "A.N. Effect"],
"nsf_broader_impacts": [],
"nsf_create_original_transformative": [],
"nsf_plan_good": [],
"nsf_pot_to_advance_knowledge": [],
"nsf_pot_to_benefit_society": [],
"requester": "Lane Wilson",
"reviewer": "sbillinge",
"status": "submitted",
"summary": "dynamite proposal",
"title": "A stunning new way to cure Malaria",
"year": 2019,
},
{
"_id": "1906nsfExample",
"adequacy_of_resources": [
"The resources available to the PI seem adequate"
],
"agency": "nsf",
"competency_of_team": ["super competent!"],
"doe_appropriateness_of_approach": [],
"doe_reasonableness_of_budget": [],
"doe_relevance_to_program_mission": [],
"does_how": [
"they will find the cause of Poverty",
"when they find it they will determine a cure",
],
"does_what": "Find a cure for Poverty",
"due_date": "2020-04-10",
"freewrite": [
"I can put extra things here, such as special instructions from the",
"program officer",
],
"goals": [
"The goals of the proposal are to put together a team to find a cure"
"for Poverty, and then to find it"
],
"importance": ["save lives", "lift people from poverty"],
"institutions": "upenn",
"month": "May",
"names": ["A Genius"],
"nsf_broader_impacts": ["Poor people will be made unpoor"],
"nsf_create_original_transformative": [
"transformative because lives will be transformed"
],
"nsf_plan_good": [
"I don't see any issues with the plan",
"it should be very straightforward",
],
"nsf_pot_to_advance_knowledge": [
"This won't advance knowledge at all"],
"nsf_pot_to_benefit_society": [
"Society will benefit by poor people being made unpoor if they want "
"to be"
],
"requester": "Tessemer Guebre",
"reviewer": "sbillinge",
"status": "submitted",
"summary": "dynamite proposal",
"title": "A stunning new way to cure Poverty",
"year": 2019,
},
],
"proposals": [
{
"_id": "mypropsal",
"amount": 1000000.0,
"authors": ["Anthony Scopatz", "Robert Flanagan"],
"begin_day": 1,
"begin_month": "May",
"begin_year": 2030,
"currency": "USD",
"submitted_day": 18,
"duration": 3,
"end_day": 31,
"end_month": "December",
"end_year": 2030,
"full": {
"benefit_of_collaboration": "http://pdf.com"
"/benefit_of_collaboration",
"cv": ["http://pdf.com/scopatz-cv",
"http://pdf.com/flanagan-cv"],
"narrative": "http://some.com/pdf",
},
"submitted_month": "Aug",
"notes": "Quite an idea",
"pi": "Anthony Scopatz",
"pre": {
"benefit_of_collaboration": "http://pdf.com"
"/benefit_of_collaboration",
"cv": ["http://pdf.com/scopatz-cv",
"http://pdf.com/flanagan-cv"],
"day": 2,
"month": "Aug",
"narrative": "http://some.com/pdf",
"year": 1998,
},
"status": "submitted",
"title": "A very fine proposal indeed",
"submitted_year": 1999,
},
{
"_id": "dmref15",
"amount": 982785.0,
"authors": ["qdu", "dhsu", "sbillinge"],
"call_for_proposals": "http://www.nsf.gov/pubs/2014/nsf14591/"
"nsf14591.htm",
"begin_day": 1,
"begin_month": "May",
"begin_year": 2018,
"cpp_info": {
"cppflag": True,
"other_agencies_submitted": "None",
"institution": "Columbia University",
"person_months_academic": 0,
"person_months_summer": 1,
"project_scope": "lots to do but it doesn't overlap with any "
"other of my grants",
"single_pi": True
},
"currency": "USD",
"submitted_date": "2015-02-02",
"duration": 3,
"end_day": 1,
"end_month": "May",
"end_year": 2019,
"funder": "NSF",
"notes": "Quite an idea",
"pi": "Simon Billinge",
"status": "accepted",
"team": [
{
"institution": "Columbia University",
"name": "qdu",
"position": "co-pi",
},
{
"institution": "Columbia University",
"name": "dhsu",
"position": "co-pi",
},
{
"institution": "Columbia University",
"name": "sbillinge",
"position": "pi",
"subaward_amount": 330000.0,
},
],
"title": "DMREF: Novel, data validated, nanostructure determination "
"methods for accelerating materials discovery",
"title_short": "DMREF nanostructure",
},
{
"_id": "SymPy-1.1",
"amount": 3000.0,
"begin_date": "2030-05-01",
"end_date": "2030-12-31",
"cpp_info": {
"cppflag": True,
"other_agencies_submitted": "None",
"institution": "Columbia University",
"person_months_academic": 0,
"person_months_summer": 1,
"project_scope": ""
},
"currency": "USD",
"pi": "sbillinge",
"status": "submitted",
"title": "SymPy 1.1 Release Support",
},
{
"_id": "SymPy-2.0",
"amount": 3000.0,
"begin_date": "2019-06-01",
"end_date": "2030-12-31",
"cpp_info": {
"cppflag": True,
"other_agencies_submitted": "None",
"institution": "Columbia University",
"person_months_academic": 0,
"person_months_summer": 1,
"project_scope": ""
},
"currency": "USD",
"pi": "sbillinge",
"status": "submitted",
"title": "SymPy 1.1 Release Support",
},
{
"_id": "abc42",
"amount": 42000.0,
"begin_date": "2020-06-01",
"end_date": "2020-12-31",
"cpp_info": {
"cppflag": True,
"other_agencies_submitted": "None",
"institution": "Columbia University",
"person_months_academic": 0,
"person_months_summer": 1,
"project_scope": ""
},
"currency": "USD",
"pi": "sbillinge",
"status": "submitted",
"title": "The answer to life, the universe, and everything",
}
],
"reading_lists": {
"_id": "getting_started_with_pdf",
"day": "15",
"month": "12",
"papers": [{"doi": "10.1107/97809553602060000935",
"text": "Very basic, but brief, intro to powder diffraction in general"},
{"doi": "10.1039/9781847558237-00464",
"text": "Lightest weight overview of PDF analysis around. Good starting point"
},
{"url": "http://www.diffpy.org",
"text": "Download and install PDFgui software and run through the step by step tutorial under the help tab"}
],
"purpose": "Beginning reading about PDF",
"title": "A step-by-step pathway towards PDF understanding. It is recommended to read the papers in the order they are listed here.",
"year": 2019,
},
"refereeReports": {
"_id": "1902nature",
"claimed_found_what": ["gravity waves"],
"claimed_why_important": ["more money for ice cream"],
"did_how": ["measured with a ruler"],
"did_what": ["found a much cheaper way to measure gravity waves"],
"due_date": '2020-04-11',
"editor_eyes_only": "to be honest, I don't believe a word of it",
"final_assessment": ["The authors should really start over"],
"first_author_last_name": "Wingit",
"freewrite": "this comment didn't fit anywhere above",
"journal": "Nature",
"recommendation": "reject",
"requester": "Max Planck",
"reviewer": "sbillinge",
"status": "submitted",
"submitted_date": "2019-01-01",
"title": "a ruler approach to measuring gravity waves",
"validity_assessment": ["complete rubbish"],
"year": 2019,
},
"students": {
"_id": "Human A. Person",
"aka": ["H. A. Person"],
"email": "[email protected]",
"university_id": "HAP42",
},
}
SCHEMAS = {
"abstracts": {
"_description": {
"description": "Abstracts for a conference or workshop. This is "
"generally public information"
},
"_id": {
"description": "Unique identifier for submission. This generally "
"includes the author name and part of the title.",
"required": True,
"type": "string",
},
"coauthors": {
"description": "names of coauthors",
"required": False,
"type": "string",
},
"email": {
"description": "contact email for the author.",
"required": True,
"type": "string",
},
"firstname": {
"description": "first name of the author.",
"required": True,
"type": "string",
},
"institution": {
"description": "name of the institution",
"required": True,
"type": "string",
},
"lastname": {
"description": "last name of the author.",
"required": True,
"type": "string",
},
"references": {
"description": "HTML string of reference for the abstract itself",
"required": False,
"type": "string",
},
"text": {
"description": "HTML string of the abstract.",
"required": True,
"type": "string",
},
"timestamp": {
"description": "The time when the abstract was submitted.",
"required": True,
"type": "string",
},
"title": {
"description": "title of the presentation/paper.",
"required": True,
"type": "string",
},
},
"assignments": {
"_description": {
"description": "Information about assignments for classes."},
"_id": {
"description": "A unique id for the assignment, such as "
"HW01-EMCH-558-2016-S",
"required": True,
"type": "string",
},
"category": {
"description": "such as 'homework' or 'final'",
"required": True,
"type": "string",
},
"courses": {
"description": "ids of the courses that have this assignment",
"required": True,
"anyof_type": ["string", "list"],
},
"file": {
"description": "path to assignment file in store",
"required": False,
"type": "string",
},
"points": {
"description": "list of number of points possible for each "
"question. Length is the number of questions",
"required": True,
"type": "list",
"schema": {"anyof_type": ["integer", "float"]},
},
"questions": {
"description": "titles for the questions on this assignment",
"required": False,
"type": "list",
},
"solution": {
"description": "path to solution file in store",
"required": False,
"type": "string",
},
},
"beamplan": {
"_id": {
"description": "Unique identifier for the experiment plan. It should have a format '{year:2d}{month:2d}{people_id:s}_{plan_name:s}'",
"required": True,
"type": "string"
},
"_description": {
"description": "Information about the experiment plan for the beamtime."},
"project_lead": {
"description": "The id for person who put out this plan. It should be inside the people.yml.",
"required": True,
"type": "string"
},
"project": {
"description": "The id for the project which the plan belongs to. It should be on airtable.",
"required": True,
"type": "string"
},
"begin_date": {
"description": "The begin date of the beam time.",
"required": True,
"anyof_type": ["string", "datetime", "date"]
},
"end_date": {
"description": "The end date of the beam time.",
"required": True,
"anyof_type": ["string", "datetime", "date"]
},
"beamtime": {
"description": "The id for the beamtime. Check the Airtable.",
"required": True,
"type": "string"
},
"holder": {
"description": "Sample holder used during the measurement, e. g. 3 mm OD tubes holder.",
"required": True,
"type": "string"
},
"devices": {
"description": "The dictionary of devices used in the measurement e. g. ",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"measurement": {
"description": "What data to be measured, e. g. PDF, XRD, SAXS. This will determine the setup.",
"required": True,
"type": "string"
},
"samples": {
"description": "The list of samples to be measured.",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"time": {
"description": "The total time of executing the exp_plan. Unit: min.",
"required": True,
"type": "integer"
},
"objective": {
"description": "What to study in the experiments. What goal to achieve.",
"required": True,
"type": "string"
},
"prep_plan": {
"description": "Steps to prepare the samples. Do NOT need details.",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"ship_plan": {
"description": "Steps to carry the samples from the producer to the BNL. Do NOT need details.",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"exp_plan": {
"description": "Steps to carry out the experiments at BNL. Need details",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"scanplan": {
"description": "The scanplan for the experiment, e. g. tseries, Tramp, ct.",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"pipeline": {
"description": "The analysis pipeline for the experiment. If no new pipeline is needed, use 'usual'.",
"required": True,
"type": "string",
"default": "usual"
},
"todo": {
"description": "The TODO list before the beamtime.",
"required": True,
"type": "list",
"schema": {
"type": "string"
}
},
"notes": {
"description": "Notes of the plan, e. g. the preferred time.",
"required": False,
"anyof_type": [
"list",
"string"
],
"schema": {
"type": "string"
}
}
},
"blog": {
"_description": {
"description": "This collection represents blog posts written by "
"the members of the research group."
},
"_id": {
"description": "short representation, such as this-is-my-title",
"required": True,
"type": "string",
},
"author": {
"description": "name or AKA of author",
"required": True,
"type": "string",
},
"day": {"description": "Publication day", "required": True,
"type": "integer"},
"month": {
"description": "Publication month",
"required": True,
"anyof_type": ["string", "integer"],
},
"original": {
"description": "URL of original post, if this is a repost",
"required": False,
"type": "string",
},
"post": {
"description": "actual contents of the post",
"required": True,
"type": "string",
},
"title": {
"description": "full human readable title",
"required": True,
"type": "string",
},
"year": {
"description": "Publication year",
"required": True,
"type": "integer",
},
},
"contacts": {
"_description": {"description": "a lighter version of people. Fewer required fields"
"for capturing people who are less tightly coupled"
},
"_id": {
"description": "id of the person, e.g., first letter first name "
"plus last name, but unique",
"required": True,
},
"aka": {
"required": False,
"type": "list",
"description": "other names for the person",
},
"date": {
"description": "date when the entry was created in ISO format",
"required": False,
"anyof_type": ["string", "date"],
},
'day': {
"description": "day when the entry was created",
"required": False,
"type": "integer",
},
"department": {
"description": "Department at the institution",
"type": "string",
"required": False,
},
"email": {
"description": "Contact email for the contact",
"type": "string",
"required": False,
},
"institution": {
"description": "the institution where they are located. This is"
"required for building a COI list of coauthors, but"
"not in general. It can be institute id or anything"
"in the aka or name",
"required": False,
"type": "string"
},
'month': {
"description": "month when the entry was created",
"required": False,
"anyof_type": ["string", "integer"],
},
"name": {
"description": "the person's canonical name",
"required": True,
"type": "string",
},
"notes": {
"description": "notes about the person",
"required": False,
"anyof_type": ["list", "string"]
},
"title": {
"description": "how the person is addressed",
"required": False,
"type": "string",
},
'updated': {
"description": "most recently updated",
"required": False,
"anyof_type": ["string", "datetime", "date"],
},
'year': {
"description": "year when the entry was created",
"required": False,
"type": "integer",
},
'uuid': {
"description": "universally unique identifier",
"required": False,
"type": "string",
},
},
"expenses": {
"_description": {
"description": "This collection records expenses for the "
"group. It should most likely be private"
},
"_id": {
"description": "short representation, such as this-is-my-name",
"required": True,
"type": "string",
},
"begin_date": {
"description": "begin date in YYYY-MM-DD",
"anyof_type": ["string", "date"],
},
"end_date": {
"description": "end date in YYYY-MM-DD",
"anyof_type": ["string", "date"],
},
"grant_percentages": {
"description": "the percentage of the reimbursement amount to put "
"on each grant. This list must be the same length as"
"the grants list and the percentages placed in the "
"order that the grants appear in that list",
"required": False,
"type": "list",
},
"grants": {
"description": "the grants in a list, or a string if only one grant",
"required": True,
"anyof_type": ["string", "list"],
},
"project": {
"description": "project or list of projects that this "
"presentation is associated with. Should "
"be discoverable in projects collection",
"anyof_type": ["string", "list"],
},
"payee": {
"description": "The name or id of the payee filing the expense",
"required": True,
"type": "string",
},
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": False,
"type": "integer",
},
"date": {
"description": "Expense date",
"required": False,
"anyof_type": ["string", "date"],
},
"month": {
"description": "Expense month",
"required": False,
"anyof_type": ["string", "integer"],
},
"year": {
"description": "Expense year",
"required": False,
"type": "integer",
},
"purpose": {
"description": "reason for expense",
"type": "string",
"required": True,
},
"unsegregated_expense": {
"description": "The allowed expenses",
"type": "float",
},
"segregated_expense": {
"description": "The unallowed expenses",
"type": "float",
},
"original_currency": {
"description": "The currency the payment was made in",
"type": "float",
},
},
},
},
"overall_purpose": {
"description": "The reason for the expenses",
"type": "string",
"required": True,
},
"notes": {
"description": "Notes about the expense",
"type": "list",
},
"status": {
"description": "The status of the expense",
"eallowed": EXPENSES_TYPE,
"type": "string"
},
"reimbursements": {
"description": "Reimbursements for the expense",
"schema": {
"schema": {
'amount': {"description": 'amount for reimbursements',
"type": "float",
},
'date': {"description": "date of reimbursement",
"anyof_type": ["string", "date"],
},
'submission_date': {"description": "date of submission",
"anyof_type": ["string", "date"],
},
'submission_day': {"description": "day of submission. deprecated but here for "
"backwards compatibility",
"type": "integer",
},
'submission_month': {"description": "month of submission. deprecated but here for "
"backwards compatibility",
"anyof_type": ["integer", "string"],
},
'submission_year': {"description": "year of submission. deprecated but here for "
"backwards compatibility",
"type": "integer",
},
'day': {"description": "day of reimbursement. deprecated but here for "
"backwards compatibility",
"type": "integer",
},
'month': {"description": "month of reimbursement. deprecated but here for "
"backwards compatibility",
"anyof_type": ["string", "integer"],
},
'year': {"description": "year of reimbursement. deprecated but here for "
"backwards compatibility",
"type": "integer",
},
'where': {"description": 'where the reimbursement has been sent',
"type": 'string',
},
},
"type": "dict"
},
"type": "list"
},
"expense_type": {
"description": "The type of expense",
"allowed": ["travel", "business"],
"required": True,
},
},
"grades": {
"_description": {
"description": "The grade for a student on an assignment. This "
"information should be private."
},
"_id": {
"description": "unique id, typically the " "student-assignment-course",
"required": True,
"type": "string",
},
"assignment": {
"description": "assignment id",
"required": True,
"type": "string",
},
"course": {"description": "course id", "required": True,
"type": "string"},
"filename": {
"description": "path to file in store",
"required": False,
"type": "string",
},
"scores": {
"description": "the number of points earned on each question",
"required": True,
"type": "list",
"schema": {"anyof_type": ["integer", "float"]},
},
"student": {"description": "student id", "required": True,
"type": "string"},
},
"grants": {
"_description": {
"description": "This collection represents grants that have been "
"awarded to the group."
},
"_id": {
"description": "short representation, such as this-is-my-name",
"required": True,
"type": ("string", "integer", "float"),
},
"account": {
"description": "the account number which holds the funds",
"required": False,
"type": "string",
},
"admin": {
"description": "the group administering the grant",
"type": "string",
"required": False,
},
"alias": {
"description": "the alias of the grant",
"type": "string",
"required": False,
},
"amount": {
"description": "value of award",
"required": True,
"type": ("integer", "float"),
},
"begin_date": {
"description": "start date of the grant (if string, in format YYYY-MM-DD)",
"required": False,
"anyof_type": ["string", "date"]
},
"begin_day": {
"description": "start day of the grant",
"required": False,
"type": "integer",
},
"begin_month": {
"description": "start month of the grant",
"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {
"description": "start year of the grant",
"required": False,
"type": "integer",
},
"benefit_of_collaboration": {
"description": "",
"required": False,
"type": "string",
},
# TODO: maybe this should move to proposals?
"call_for_proposals": {"description": "", "required": False,
"type": "string"},
"currency": {
"description": "typically '$' or 'USD'",
"required": False,
"type": "string",
},
"end_date": {
"description": "start date of the grant (if string, in format YYYY-MM-DD)",
"required": False,
"anyof_type": ["string", "date"]
},
"end_day": {
"description": "end day of the grant",
"required": False,
"type": ("string", "integer"),
},
"end_month": {
"description": "end month of the grant",
"required": False,
"anyof_type": ["string", "integer"],
},
"end_year": {
"description": "end year of the grant",
"required": False,
"type": "integer",
},
"funder": {
"description": "the agency funding the work",
"required": True,
"type": "string",
},
"grant_id": {
"description": "the identifier for this work",
"required": False,
"type": "string",
},
"institution": {
"description": "the host institution for the grant",
"type": "string",
"required": False,
},
"narrative": {"description": "", "required": False, "type": "string"},
"notes": {
"description": "notes about the grant",
"required": False,
"type": "string",
},
"person_months_academic": {
"description": "Number of months of funding during the academic" "year",
"required": False,
"anyof_type": ["integer", "float"],
},
"person_months_summer": {
"description": "Number of months of funding during the summer",
"required": False,
"anyof_type": ["integer", "float"],
},
"program": {
"description": "the program the work was funded under",
"required": True,
"type": "string",
},
# TODO: maybe this should be moved to proposals?
"status": {
"allowed": ["pending", "declined", "accepted", "in-prep"],
"description": "status of the grant",
"required": False,
"type": "string",
},
"scope": {
"description": "The scope of the grant, answers the prompt: "
'"Describe Research Including Synergies and '
'Delineation with Respect to this Proposal/Award:"',
"required": False,
"type": "string",
},
# TODO: maybe this should be duplicated in proposals?
"team": {
"description": "information about the team members participating "
"in the grant.",
"required": True,
"schema": {
"schema": {
"cv": {"required": False, "type": "string"},
"institution": {"required": True, "type": "string"},
"name": {"required": True, "type": "string"},
"position": {"required": True, "type": "string"},
"subaward_amount": {
"required": False,
"type": ("integer", "float"),
},
},
"type": "dict",
},
"type": "list",
},
"title": {
"description": "actual title of proposal / grant",
"required": True,
"type": "string",
},
"budget": {
"description": "budget periods of grant",
"required": False,
"schema": {
"schema": {
"begin_date": {
"description": "start date of the budget period in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"],
},
"end_date": {
"description": "end date of the budget period in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"],
},
"student_months": {
"description": "number of months of funding for student members during the academic year",
"required": False,
"anyof_type": ["float", "integer"]
},
"postdoc_months": {
"description": "number of months of funding for postdoc members during the academic year",
"required": False,
"anyof_type": ["float", "integer"]
},
"ss_months": {
"description": "number of months of funding for the summer",
"required": False,
"anyof_type": ["float", "integer"]
},
"amount": {
"description": "subaward for this budget period",
"required": False,
"anyof_type": ["float", "integer"]
}
},
"type": "dict",
},
"type": "list",
},
"proposal_id": {
"description": "initial proposal made for grant",
"required": False,
"type": "string",
}
},
"groups": {
"_description": {
"description": "Information about the research group"
"this is generally public information"
},
"_id": {
"description": "Unique identifier for submission. This generally "
"includes the author name and part of the title.",
"required": True,
"type": "string",
},
"aka": {
"required": True,
"type": "list",
"description": "other names for the group",
},
"banner": {
"required": False,
"type": "string",
"description": "name of image file with the group banner",
},
"pi_name": {
"description": "The name of the Principle Investigator",
"required": True,
"type": "string",
},
"department": {
"description": "Name of host department",
"required": True,
"type": "string",
},
"institution": {
"description": "Name of the host institution",
"required": True,
"type": "string",
},
"name": {
"description": "Name of the group",
"required": True,
"type": "string",
},
"website": {"description": "URL to group webpage", "type": "string"},
"mission_statement": {
"description": "Mission statement of the group",
"type": "string",
},
"projects": {
"description": "About line for projects",
"type": "string",
"required": True,
},
"email": {
"description": "Contact email for the group",
"type": "string",
"required": True,
},
},
"institutions": {
"_description": {
"description": "This collection will contain all the institutions"
"in the world and their departments and addresses"
},
"_id": {
"description": "unique identifier for the institution.",
"required": True,
"type": "string",
},
"aka": {
"description": "list of all the different names this "
"the institution is known by",
"required": False,
"type": "list",
},
"city": {
"description": "the city where the institution is",
"required": True,
"type": "string",
},
"country": {
"description": "The country where the institution is",
"required": True,
"type": "string",
},
"date": {
"description": "Expense date",
"required": False,
"anyof_type": ["string", "date"],
},
"day": {
"description": "the day the entry was created",
"required": False,
"type": "integer",
},
"departments": {
"description": "all the departments and centers and"
"various units in the institution",
"required": False,
"type": "dict",
# Allow unkown department names, but check their content
"valuesrules": {
"type": "dict",
"schema": {
"name": {
"description": "The canonical name",
"required": True,
"type": "string",
},
"aka": {"required": False, "type": "list"},
},
},
},
"month": {
"description": "the month the entry was created",
"required": False,
"anyof_type": ["string", "integer"]
},
"name": {
"description": "the canonical name of the institutions",
"required": True,
"type": "string",
},
"schools": {
"description": "this is more for universities, but it "
"be used for larger divisions in big "
"organizations",
"required": False,
"type": "dict",
"valuesrules": {
"type": "dict",
"schema": {
"name": {
"description": "The canonical name",
"required": True,
"type": "string",
},
"aka": {"required": False, "type": "list"},
},
},
},
"state": {
"description": "the state where the institution is",
"required": False,
"type": "string",
},
"street": {
"description": "the street where the institution is",
"required": False,
"type": "string",
},
"updated": {
"description": "a datetime when the entry was updated",
"required": False,
"anyof_type": ["string", "datetime", "date"]
},
"uuid": {
"description": "a uuid for the entry",
"required": False,
"type": "string",
},
"year": {
"description": "the year the entry was created",
"required": False,
"type": "integer",
},
"zip": {
"description": "the zip or postal code of the institution",
"required": False,
"anyof_type": ["integer", "string"],
},
},
"meetings": {
"_id": {
"description": "unique identifier for the date of the group meeting",
"required": True,
"type": "string",
},
"_description": {
"description": "the group meeting."
},
"actions": {
"description": "action items expected from the group members for that particular meeting week",
"required": False,
"type": "list",
},
"agenda": {
"description": "schedule of the current meeting",
"required": False,
"type": "list",
},
"buddies": {
"description": "list of pairs of group members that are selected for the buddy round robin",
"required": False,
"type": "list",
},
"day": {
"description": "day of the group meeting",
"required": False,
"type": "integer",
},
"journal_club": {
"description": "indicating the doi of the journal and the presenting group member as the presenter",
"required": False,
"type": "dict",
},
"lead": {
"description": "person who will be leading the meeting of the current week",
"required": False,
"type": "string",
},
"minutes": {
"description": "meeting notes in a chronological order according to comments made by the group members",
"required": False,
"type": "list",
},
"month": {
"description": "month in which the meeting is taking place",
"required": False,
"anyof_type": ["string", "integer"]
},
"place": {
"description": "location where the meeting is taking place on campus",
"required": False,
"type": "string",
},
"presentation": {
"description": "indicating the title of the presentation along with the link and the presenter ",
"required": False,
"type": "dict",
},
"scribe": {
"description": "person who will be taking notes and updating minutes accordingly",
"required": False,
"type": "string",
},
"time": {
"description": "person who will be taking notes and updating minutes accordingly"
"If an integer is minutes past midnight, so 13:30 is 810 for"
"example.",
"required": False,
"anyof_type": ["string", "integer"]
},
"updated": {
"description": "person who will be taking notes and updating minutes accordingly",
"required": False,
"anyof_type": ["string", "datetime", "date"],
},
"uuid": {
"description": "person who will be taking notes and updating minutes accordingly",
"required": False,
"type": "string",
},
"year": {
"description": "person who will be taking notes and updating minutes accordingly",
"required": False,
"type": "integer",
},
},
"people": {
"_description": {
"description": "This collection describes the members of the "
"research group. This is normally public data."
},
"_id": {
"description": "unique identifier for the group member",
"required": True,
"type": "string",
},
"active": {
"description": "If the person is an active member, default True.",
"required": False,
"type": "boolean",
},
"aka": {
"description": "list of aliases (also-known-as), useful for "
"identifying the group member in citations or "
"elsewhere.",
"required": True,
"type": ["string", "list"],
},
"appointments": {
"type": "dict",
"required": False,
"description": "begin and end date, grant loading status and notes about appointments"
},
"activities": {
"type": "list",
"required": False,
"description": "activities may be teaching or research things",
"schema": {
"type": "dict",
"schema": {
"day": {
"required": False,
"description": "the day the activity took place",
"type": "integer",
},
"type": {
"required": True,
"description": "the type of the acitivity",
"type": "string",
"eallowed": ACTIVITIES_TYPE
},
"month": {
"required": False,
"description": "the month the activity took place",
"anyof_type": ["integer", "string"],
},
"name": {
"required": True,
"description": "brief statement of the activity",
"type": "string",
},
"other": {
"required": False,
"description": "longer statement of the activity",
"type": "string",
},
"year": {
"required": True,
"description": "the year the activity took place",
"type": "integer",
},
}
}
},
"avatar": {"description": "URL to avatar", "required": True,
"type": "string"},
"bio": {
"description": "short biographical text",
"required": True,
"type": "string",
},
"bios": {
"description": "longer biographical text if needed",
"required": False,
"anyof_type": ["string", "list"]
},
"collab": {
"description": "If the person is a collaborator, default False.",
"required": False,
"type": "boolean",
},
"committees": {
"description": "Committees that are served on",
"required": False,
"schema": {
"type": "dict",
"schema": {
"name": {"required": True, "type": "string",
"description": "name of committee, or person if it "
"is a phd committee"},
"day": {"required": False, "type": "integer"},
"month": {"required": False,
"anyof_type": ["string", "integer"],
},
"notes": {"required": False,
"description": "extra things you want to record about the thing",
"anyof_type": ["string", "list"],
},
"year": {"required": True, "type": "integer"},
"unit": {"required": False, "type": "string",
"description": "name of department or school etc."},
"type": {"required": False, "type": "string",
"description": "type of committee, department, school, university, external",
"eallowed": COMMITTEES_TYPE},
"level": {
"required": True,
"type": "string",
"description": "department or school or university, or external",
"eallowed": COMMITTEES_LEVEL
},
"group": {
"required": False,
"type": "string",
"description": "this employment is/was in"
"a group in groups coll",
},
},
},
"type": "list",
},
"education": {
"description": "This contains the educational information for "
"the group member.",
"required": True,
"schema": {
"type": "dict",
"schema": {
"advisor": {"required": False, "type": "string",
"description": "name or id of advisor for this degree"},
"begin_day": {"required": False,
"type": "integer"},
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {"required": True, "type": "integer"},
"degree": {"required": True, "type": "string"},
"department": {
"required": False,
"type": "string",
"description": "department within" "the institution",
},
"group": {
"required": False,
"type": "string",
"description": "this employment is/was in"
"a group in groups coll",
},
"end_day": {"required": False,
"type": "integer"},
"end_month": {"required": False,
"anyof_type": ["string", "integer"],
},
# Could be ongoing with undefined end
"end_year": {"required": False, "type": "integer"},
"gpa": {"required": False, "type": ("float", "string")},
"institution": {"required": True, "type": "string"},
"location": {"required": False, "type": "string"},
"other": {"required": False, "type": "list"},
},
},
"type": "list",
},
"email": {
"description": "email address of the group member",
"required": False,
"type": "string",
},
"employment": {
"description": "Employment information, similar to educational "
"information.",
"required": False,
"type": "list",
"schema": {
"type": "dict",
"schema": {
"advisor": {"required": False, "type": "string",
"description": "name or id of "
"advisor/mentor/manager"},
"begin_day": {"required": False, "type": "integer"},
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {"required": False, "type": "integer"},
"begin_date": {"required": False, "anyof_type": ["string", "date", "datetime"],
"description": "begin date of employment in format YYYY-MM-DD"},
"coworkers": {"required": False, "type": "list",
"description": "list of coworkers. If"
"position is editor, these are "
"assumed to be coeditors in"
"conflict of interest builder"},
"department": {"required": False, "type": "string"},
"end_day": {"required": False, "type": "integer"},
"end_month": {"required": False,
},
"end_year": {"required": False, "type": "integer"},
"end_date": {"required": False, "anyof_type": ["string", "date", "datetime"],
"description": "end date of employment in format YYYY-MM-DD"},
"group": {
"required": False,
"type": "string",
"description": "this employment is/was in"
"a group in groups coll",
},
"location": {"required": False, "type": "string"},
"organization": {"required": True, "type": "string"},
"other": {"required": False, "type": "list"},
"permanent": {"required": False, "type": "boolean",
"description": "true if the position is open " \
"ended and has no fixed end-date"},
"position": {"required": True, "type": "string",
"eallowed": list(SORTED_POSITION)},
"position_full": {
"description": "The full on title of the position. This will be "
"typeset if it is here, or if not Position will be "
"used. Position will be used for sorting and must "
"come from a fixed list of positions",
"required": False,
"type": "string",
},
"status": {"required": False, "type": "string", "eallowed": POSITION_STATUS,
},
},
},
},
"facilities": {
"type": "list",
"required": False,
"description": "facilities may be teaching or research things",
"schema": {
"type": "dict",
"schema": {
"begin_day": {
"required": False,
"description": "the day facility, or the wish for the "
"facility, started",
"type": "integer",
},
"end_day": {
"required": False,
"description": "the day facility started",
"type": "integer",
},
"type": {
"required": True,
"description": "the type of the facility. Columbia asks"
"for wished-for facilities, so there are "
"teaching-wish and research-wish fields.",
"type": "string",
"eallowed": FACILITIES_TYPE
},
"begin_month": {
"required": False,
"description": "the month the facility (or wish) started",
"anyof_type": ["integer", "string"],
},
"end_month": {
"required": False,
"description": "the month the faclity went away",
"anyof_type": ["integer", "string"],
},
"name": {
"required": True,
"description": "description of the facility",
"type": "string",
},
"notes": {
"required": False,
"description": "anything else you want to jot down",
"anyof_type": ["string", "list"]
},
"begin_year": {
"required": True,
"description": "the year the facility (or wish) started",
"type": "integer",
},
"end_year": {
"required": False,
"description": "the year the facility (or wish) went away",
"type": "integer",
},
}
}
},
"funding": {
"description": "Funding and scholarship that the group member "
"has individually obtained in the past. "
"**WARNING:** this is not to be confused with the "
"**grants** collection",
"required": False,
"schema": {
"type": "dict",
"schema": {
"currency": {"required": False, "type": "string"},
"duration": {"required": False, "type": "string"},
"month": {"required": False,
"anyof_type": ["string", "integer"],
},
"name": {"required": True, "type": "string"},
"value": {"required": True, "type": ("float", "integer")},
"year": {"required": True, "type": "integer"},
},
},
"type": "list",
},
"github_id": {"required": False, "type": "string",
"description": "Your GitHub ID"},
"google_scholar_url": {"required": False, "type": "string",
"description": "URL of your Google Scholar "
"rofile"},
"grp_mtg_active": {"required": False, "type": "boolean",
"description": "Whether to schedule tasks at group meeting "
"or not"},
"hindex": {
"description": "details of hindex pulled on a certain date",
"required": False,
"schema": {
"type": "dict",
"schema": {
"h": {"description": "the value of the h index",
"required": True, "type": "integer"},
"h_last_five": {"description": "h index over past 5 years",
"required": False, "type": "integer"},
"citations": {"description": "total number of citations",
"required": False, "type": "integer"},
"citations_last_five": {"description": "number of citations"
"in the past 5 years",
"required": False, "type": "integer"},
"origin": {"description": "where the numbers came from",
"required": False, "type": "string"},
"since": {"description": "year of first citation",
"required": False, "type": "integer"},
"year": {"description": "year when the data were pulled",
"required": False, "type": "integer"},
"month": {"description": "month when the data were pulled",
"required": False, "anyof_type": ["string", "integer"]},
"day": {"description": "day when the data were pulled",
"required": False, "type": "integer"},
}
},
"type": "list",
},
"home_address": {
"description": "The person's home address",
"type": "dict",
"schema": {
"street": {"type": "string", "description": "street address"},
"city": {"type": "string", "description": "name of home city"},
"state": {"type": "string", "description": "name o home state"},
"zip": {"type": "string", "description": "zip code"},
},
},
"honors": {
"description": "Honors that have been awarded to this " "group member",
"required": False,
"schema": {
"type": "dict",
"schema": {
"description": {"required": False, "type": "string"},
"month": {"required": False,
"anyof_type": ["string", "integer"],
},
"name": {"required": True, "type": "string"},
"year": {"required": True, "type": "integer"},
},
},
"type": "list",
},
"initials": {
"description": "The canonical initials for this group member",
"required": False,
"type": "string",
},
# TODO: include `link`
"membership": {
"description": "Professional organizations this member is " "a part of",
"required": False,
"schema": {
"type": "dict",
"schema": {
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {"required": True, "type": "integer"},
"description": {"required": False, "type": "string"},
"end_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"end_year": {"required": False, "type": "integer"},
"organization": {"required": True, "type": "string"},
"position": {"required": True, "type": "string"},
"website": {"required": False, "type": "string"},
},
},
"type": "list",
},
"miscellaneous": {
"description": "Place to put weird things needed for special reporta",
"required": False,
"type": "dict",
"schema": {
"metrics_for_success": {
"description": "How do I want to be judged",
"required": False,
"type": "list",
},
},
},
"name": {
"description": "Full, canonical name for the person",
"required": True,
"type": "string",
},
"office": {
"description": "The person's office",
"type": "string",
"required": False
},
"orcid_id": {
"description": "The ORCID ID of the person",
"required": False,
"type": "string",
},
"position": {
"description": "such as professor, graduate student, or scientist",
"required": False,
"type": "string",
"eallowed": list(SORTED_POSITION),
},
"position_full": {
"description": "The full on title of the position. This will be "
"typeset if it is here, or if not Position will be "
"used. Position will be used for sorting and must "
"come from a fixed list of positions",
"required": False,
"type": "string",
},
"publicity": {
"description": "summary of publicity that person has received",
"required": False,
"schema": {
"type": "dict",
"schema": {
"type": {"required": True, "type": "string",
"eallowed": PUBLICITY_TYPE},
"topic": {"required": False, "type": "string",
"description": "The short sentence of what the "
"publicity was about",
},
"title": {"required": True, "type": "string",
"description": "The title of the piece",
},
"day": {"required": False, "type": "integer",
"description": "The day the piece appeared"
},
"month": {"required": False, "anyof_type": ["string",
"integer"],
"description": "The month the piece appeared"
},
"publication": {"required": False, "type": "string",
"description": "The place where the "
"publicity was placed"
},
"text": {"required": False, "type": "string",
"description": "The text of the publicity",
},
"url": {"required": False, "type": "string",
"description": "The URL where the piece may be found"
},
"year": {"required": True, "type": "integer",
"description": "The year the piece appeared"
},
"grant": {"required": True, "type": "string",
"description": "The identifier of the grant "
"associated with the piece"
},
},
},
"type": "list"
},
"research_focus_areas": {
"description": "summary of research projects that are ongoing. Used"
"in Annual appraisal for example",
"required": False,
"schema": {
"type": "dict",
"schema": {
"begin_year": {"required": False, "type": "integer"},
"end_year": {"required": False, "type": "integer"},
"description": {"required": False, "type": "string"}
},
},
"type": "list"
},
"research_summary": {
"description": "Brief summary of overarching research goals",
"required": False,
"type": "string",
},
"service": {
"description": "Service that this group member has provided",
"required": False,
"schema": {
"type": "dict",
"schema": {
"description": {"required": False, "type": "string"},
"duration": {"required": False, "type": "string"},
"month": {"description": "Use month and year if the service"
"doesn't extend more than one year."
"Otherwise use begin_year and end_year",
"required": False,
"anyof_type": ["string", "integer"]
},
"name": {"required": True, "type": "string"},
"role": {"required": False, "type": "string",
"description": "the role played in the activity, e.g., co-chair"},
"notes": {"required": False, "anyof_type": ["string", "list"]},
"year": {"required": False, "type": "integer"},
"begin_year": {"required": False, "type": "integer"},
"begin_day": {"required": False, "type": "integer"},
"begin_month": {"description": "Use month and year if the service"
"doesn't extend more than one year."
"Otherwise use begin_year/month and end_year/month",
"required": False,
"anyof_type": ["string", "integer"]
},
"end_year": {"required": False, "type": "integer"},
"end_month": {"description": "Use month and year if the service"
"doesn't extend more than one year."
"Otherwise use begin_year and end_year",
"required": False,
"anyof_type": ["string", "integer"]
},
"end_day": {"required": False, "type": "integer"},
"other": {"required": False,
"anyof_type": ["string", "list"]},
"type": {"required": True, "type": "string",
"description": "profession, department, school, university",
"eallowed": SERVICE_TYPE},
},
},
"type": "list",
},
"skills": {
"description": "Skill the group member has",
"required": False,
"schema": {
"type": "dict",
"schema": {
"category": {"required": True, "type": "string"},
"level": {"required": True, "type": "string"},
"name": {"required": True, "type": "string"},
},
},
"type": "list",
},
"teaching": {
"description": "Courses that this group member has taught, if any",
"required": False,
"type": "list",
"schema": {
"type": "dict",
"schema": {
"course": {"required": True, "type": "string"},
"courseid": {"required": True, "type": "string"},
"description": {"required": False, "type": "string"},
"end_month": {"required": False,
"anyof_type": ["string", "integer"]},
"end_year": {"required": False, "type": "integer"},
"enrollment": {"required": False, "anyof_type": ["integer", "string"]},
"evaluation": {
"type": "dict",
"required": False,
"schema": {
"response_rate": {"type": "number", "required": True},
"amount_learned": {"type": "number", "required": True},
"appropriateness_workload": {"type": "number", "required": True},
"course_overall": {"type": "number", "required": True},
"fairness_grading": {"type": "number", "required": True},
"organization": {"type": "number", "required": True},
"classroom_delivery": {"type": "number", "required": True},
"approachability": {"type": "number", "required": True},
"instructor_overall": {"type": "number", "required": True},
"comments": {"type": "list", "required": False,
"description": "student comments"},
},
},
"materials": {"required": False, "type": "string"},
"month": {"required": False,
"anyof_type": ["string", "integer"],
},
"organization": {"required": True, "type": "string"},
"position": {"required": True, "type": "string"},
"semester": {"required": False, "type": "string"},
"syllabus": {"required": False, "type": "string"},
"video": {"required": False, "type": "string"},
"website": {"required": False, "type": "string"},
"year": {"required": True, "type": "integer"},
},
},
},
"title": {
"description": "for example, Dr., etc.",
"required": False,
"type": "string",
},
"todos": {
"description": "a list of the todo tasks",
"required": False,
"type": "list",
"schema": {
"type": "dict",
"schema": {
"description": {"description": "the description of the to-do task",
"required": True,
"type": "string"},
"due_date": {"description": "the due date",
"required": False,
"anyof_type": ["string", "date"]},
"begin_date": {"description": "the begin date",
"required": False,
"anyof_type": ["string", "date"]},
"end_date": {"description": "the end date",
"required": False,
"anyof_type": ["string", "date"]},
"duration": {
"description": "the size of the task/ the estimated duration it will take to finish the task. Unit: miniutes.",
"required": False,
"type": "float"},
"importance": {
"description": "the importance, from 0 to 2",
"required": False,
"type": "integer"},
"status": {"description": "the status: started/finished/cancelled",
"required": True,
"type": "string"},
"notes": {"description": "additional notes for this task",
"required": False,
"type": "list",
"schema": {"type": "string"}
},
"running_index": {
"description": "Index of a certain task used to update that task in the enumerated todo list.",
"required": False,
"type": "integer"},
"assigned_by": {
"description": "ID of the member that assigns the task",
"required": False,
"type": "string"},
}
}
},
},
"presentations": {
"_description": {
"description": "This collection describes presentations that group"
"members make at conferences, symposia, seminars and"
"so on."
},
"_id": {
"description": "unique id for the presentation",
"required": True,
"type": "string",
},
"abstract": {
"description": "abstract of the presentation",
"required": False,
"type": "string",
},
"authors": {
"description": "Author list.",
"required": True,
"anyof_type": ["string", "list"],
},
"begin_date": {
"description": "begin date in YYYY-MM-DD",
"anyof_type": ["date", "string"],
},
"end_date": {
"description": "end_date in YYYY-MM-DD",
"anyof_type": ["date", "string"],
},
"begin_year": {
"description": "year the conference or trip begins.",
"required": False,
"type": "integer",
},
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_day": {"required": False, "type": "integer"},
"department": {
"description": "department of the institution where the"
"presentation will be made, if "
"applicable. should be discoverable in "
"institutions.",
"required": False,
"type": "string",
},
"end_year": {
"description": "year the conference or trip ends",
"required": False,
"type": "integer",
},
"end_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"end_day": {"required": False, "type": "integer"},
"institution": {
"description": "institution where the"
"presentation will be made, if "
"applicable.",
"required": False,
"type": "string",
},
"meeting_name": {
"description": "full name of the conference or "
"meeting. If it is a departmental "
"seminar or colloquium, write Seminar"
"or Colloquium and fill in department "
"and institution fields",
"required": False,
"type": "string",
},
# TODO: conditional validation. If type=colloq or seminar, required is
# institution and department, otherwise location
"location": {
"description": "city and {state or country} of meeting",
"required": False,
"type": "string",
},
"notes": {
"description": "any reminder or memory aid about anything",
"required": False,
"anyof_type": ["list", "string"],
},
"project": {
"description": "project or list of projects that this "
"presentation is associated with. Should "
"be discoverable in projects collection",
"required": False,
"anyof_type": ["string", "list"],
},
"status": {
"description": "Is the application in prep or submitted, "
"was the invitation accepted or declined, was "
"the trip cancelled?",
"required": True,
"type": "string",
"eallowed": PRESENTATION_STATUS,
},
"title": {
"description": "title of the presentation",
"required": True,
"type": "string",
},
"type": {
"description": "type of presentation",
"eallowed": PRESENTATION_TYPE,
"required": True,
"type": "string",
},
"webinar": {
"description": "true if a webinar. Default to False",
"required": False,
"type": "boolean",
},
},
"projects": {
"_description": {
"description": "This collection describes the research group "
"projects. This is normally public data."
},
"_id": {
"description": "Unique project identifier.",
"required": True,
"type": "string",
},
"active": {
"description": "true if the project is active",
"required": False,
"anyof_type": ["string", "boolean"],
},
"description": {
"description": "brief project description.",
"required": True,
"type": "string",
},
"grant": {
"description": "Grant id if there is a grant supporting this " "project",
"required": False,
"type": "string",
},
"group": {
"description": "id for the group in the groups collection whose project this is",
"required": False,
"type": "string",
},
"highlights": {
"description": "list of things to highlight in a report or website, such as releases for for software or high profile publications",
"required": False,
"type": "list",
"schema": {
"type": "dict",
"schema": {
"year": {"description": "the year of the highlight",
"required": True,
"type": "integer"},
"month": {"description": "the month of the highlight",
"required": True,
"anyof_type": ["string", "integer"]},
"description": {"description": "the highlight",
"required": True,
"type": "string"},
}
}
},
"logo": {
"description": "URL to the project logo",
"required": False,
"type": "string",
},
"name": {
"description": "name of the project.",
"required": True,
"type": "string",
},
"other": {
"description": "other information about the project",
"required": False,
"type": ["list", "string"],
},
"repo": {
"description": "URL of the source code repo, if available",
"required": False,
"type": "string",
},
"team": {
"description": "People who are/have been working on this project.",
"required": True,
"schema": {
"type": "dict",
"schema": {
"begin_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"begin_year": {"required": True, "type": "integer"},
"end_month": {"required": False,
"anyof_type": ["string", "integer"],
},
"end_year": {"required": False, "type": "integer"},
"name": {"required": True, "type": "string"},
"position": {"required": True, "type": "string"},
},
},
"type": "list",
},
"type": {
"description": "The type of project",
"required": False,
"anyof_type": ["string"],
"eallowed": PROJECT_TYPE
},
"website": {
"description": "URL of the website.",
"required": False,
"type": "string",
},
},
"proposalReviews": {
"_description": {
"description": "This collection contains reviews of funding proposals"
},
"_id": {
"description": "ID, e.g. 1906_doe_example",
"required": True,
"type": ("string", "integer", "float"),
},
"adequacy_of_resources": {
"description": "Are the resources of the PI adequate",
"required": True,
"type": "list",
},
"agency": {
"description": "currently nsf or doe",
"type": "string",
"eallowed": AGENCIES,
},
"competency_of_team": {
"description": "Is the team competent",
"required": True,
"type": "list",
},
"doe_appropriateness_of_approach": {
"description": "Appropriateness of Research. only used if agency is doe.",
"required": False,
"type": "list",
},
"doe_reasonableness_of_budget": {
"description": "Reasonableness of budget. only used if agency is doe.",
"required": False,
"type": "list",
},
"doe_relevance_to_program_mission": {
"description": "Relevance to program mission. only used if agency is doe.",
"required": False,
"type": "list",
},
"does_how": {
"description": "How will the research be done",
"required": True,
"type": "list",
},
"does_what": {
"description": "What will the team do",
"required": True,
"type": "string",
},
"due_date": {
"description": "date the review is due in ISO format",
"required": True,
"anyof_type": ["string", "date"],
},
"freewrite": {
"description": "Anything and this will appear in the built document"
"right before the summary. This section often used "
"for extra review criteria for the particular proposal",
"required": False,
"type": "list",
},
"goals": {
"description": "What are the main goals of the proposed research",
"required": True,
"type": "list",
},
"importance": {
"description": "The importance of the Research",
"required": True,
"type": "list",
},
"institutions": {
"description": "The institutions of the authors in the same order",
"required": True,
"anyof_type": ["string", "list"]
},
"month": {
"description": "The month the review was submitted",
"required": True,
"anyof_type": ["string", "integer"],
},
"names": {
"description": "The names of the PIs",
"required": True,
"anyof_type": ["list", "string"],
},
"nsf_broader_impacts": {
"description": "The broader impacts of the research. Only used if "
"agency is nsf",
"required": False,
"type": "list",
},
"nsf_create_original_transformative": {
"description": "Answer to the question how the work is creative, "
"original or transformative. Only used if agency is "
"nsf",
"required": False,
"type": "list",
},
"nsf_plan_good": {
"description": "Is the plan good? Only used if agency is nsf",
"required": False,
"type": "list",
},
"nsf_pot_to_advance_knowledge": {
"description": "Answer to the question how the work will advance"
"knowledge. Only used if agency is nsf",
"required": False,
"type": "list",
},
"nsf_pot_to_benefit_society": {
"description": "Answer to the question how the work has the potential"
"to benefit society. Only used if agency is nsf",
"required": False,
"type": "list",
},
"requester": {
"description": "Name of the program officer who requested the review",
"required": True,
"type": "string",
},
"reviewer": {
"description": "short name of the reviewer. Will be used in the "
"filename of the resulting text file",
"required": True,
"type": "string",
},
"status": {
"description": "the status of the review",
"type": "string",
"eallowed": REVIEW_STATI,
},
"summary": {
"description": "Summary statement",
"required": True,
"type": "string",
},
"title": {
"description": "The title of the proposal",
"required": True,
"type": "string",
},
"year": {
"description": "The year the review was submitted",
"required": True,
"type": "integer",
},
},
"proposals": {
"_description": {
"description": "This collection represents proposals that have "
"been submitted by the group."
},
"_id": {
"description": "short representation, such as this-is-my-name",
"required": True,
"type": ("string", "integer", "float"),
},
"amount": {
"description": "value of award",
"required": True,
"type": ("integer", "float"),
},
"authors": {
"description": "other investigator names",
"required": False,
"anyof_type": ["list", "string"],
},
"begin_date": {
"description": "start date of the proposed grant in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"]
},
"begin_day": {
"description": "start day of the proposed grant",
"required": False,
"type": "integer",
},
"begin_month": {
"description": "start month of the proposed grant",
"required": False,
"anyof_type": ["string", "integer"]
},
"begin_year": {
"description": "start year of the proposed grant",
"required": False,
"type": "integer",
},
"call_for_proposals": {
"description": "",
"required": False,
"type": "string",
},
"cpp_info": {
"description": "extra information needed for building current and "
"pending form ",
"required": False,
"schema": {
"cppflag": {"required": False, "type": "boolean"},
"other_agencies_submitted": {"required": False,
"anyof_type": ["string", "boolean"]},
"institution": {"required": False, "type": "string",
"description": "place where the proposed grant will be located"},
"person_months_academic": {"required": False,
"anyof_type": ["float", "integer"]},
"person_months_summer": {"required": False,
"anyof_type": ["float", "integer"]},
"project_scope": {"required": False, "type": "string"},
"single_pi": {"required": False, "type": "boolean",
"description": "set to true if there are no co-pi's"},
},
"type": "dict",
},
"currency": {
"description": "typically '$' or 'USD'",
"required": True,
"type": "string",
},
"due_date": {
"description": "day that the proposal is due",
"required": False,
"anyof_type": ["string", "date"],
},
"duration": {
"description": "number of years",
"required": False,
"type": ("integer", "float"),
},
"end_date": {
"description": "end date of the proposed grant in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"]
},
"end_day": {
"description": "end day of the proposed grant",
"required": False,
"type": ("string", "integer"),
},
"end_month": {
"description": "end month of the proposed grant",
"required": False,
"anyof_type": ["string", "integer"]
},
"end_year": {
"description": "end year of the proposed grant",
"required": False,
"type": "integer",
},
"funder": {
"description": "who will fund the proposal"
"as funder in grants",
"required": False,
"type": "string",
},
"full": {
"description": "full body of the proposal",
"required": False,
"type": "dict",
},
"notes": {
"description": "anything you want to note",
"required": False,
"anyof_type": ["string", "list"],
},
"pi": {
"description": "principal investigator name",
"required": True,
"type": "string",
},
"pre": {
"description": "Information about the pre-proposal",
"required": False,
"type": "dict",
},
"status": {
"description": "e.g. 'pending', 'accepted', 'declined'",
"required": True,
"type": "string",
"eallowed": PROPOSAL_STATI,
},
"submitted_date": {
"description": "date that the proposal was submitted",
"required": False,
"anyof_type": ["string", "date"],
},
"submitted_day": {
"description": "day that the proposal was submitted",
"required": False,
"type": "integer",
},
"submitted_month": {
"description": "month that the proposal was submitted",
"required": False,
"anyof_type": ["string", "integer"]
},
"submitted_year": {
"description": "Year that the proposal was submitted",
"required": False,
"type": "integer",
},
"team": {
"description": "information about the team members participating "
"in the grant.",
"required": False,
"schema": {
"schema": {
"cv": {"required": False, "type": "string"},
"email": {"required": False, "type": "string"},
"institution": {"required": False, "type": "string"},
"name": {"required": False, "type": "string"},
"position": {"required": False, "type": "string"},
"subaward_amount": {
"required": False,
"type": ("integer", "float"),
},
},
"type": "dict",
},
"type": "list",
},
"title": {
"description": "actual title of proposal",
"required": True,
"type": "string",
},
"title_short": {
"description": "short title of proposal",
"required": False,
"type": "string",
},
},
"refereeReports": {
"_description": {
"description": "This is a collection of information that will be "
"be used to build a referee report. This should probably be private."
},
"_id": {"description": "the ID", "required": True, "type": "string"},
"claimed_found_what": {
"description": "What the authors claim to have found",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"claimed_why_important": {
"description": "What importance the authors claim",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"did_how": {
"description": "How the study was done",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"did_what": {
"description": "What the study was",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"due_date": {
"description": "date the review is due in ISO format",
"required": True,
"anyof_type": ["string", "date"],
},
"editor_eyes_only": {
"description": "Comments you don't want passed to the author",
"required": False,
"type": "string",
},
"final_assessment": {
"description": "Summary of impressions of the study",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"first_author_last_name": {
"description": "Last name of first author will be referred to "
"with et al.",
"required": True,
"type": "string",
},
"freewrite": {
"description": "Things that you want to add that don't fit into "
"any category above",
"required": False,
"type": "string",
},
"journal": {
"description": "name of the journal",
"required": True,
"type": "string",
},
"month": {
"description": "the month the entry was created",
"required": False,
"anyof_type": ["string", "integer"]
},
"recommendation": {
"description": "Your publication recommendation",
"required": True,
"type": "string",
"eallowed": REVIEW_RECOMMENDATION,
},
"requester": {
"description": "Name of the program officer who requested the review",
"required": True,
"type": "string",
},
"reviewer": {
"description": "name of person reviewing the paper",
"required": True,
"type": "string",
},
"status": {
"description": "Where you are with the review",
"required": True,
"type": "string",
"eallowed": REVIEW_STATI,
},
"submitted_date": {
"description": "submitted date in ISO YYYY-MM-DD format",
"required": True,
"anyof_type": ["string", "date"],
},
"title": {
"description": "title of the paper under review",
"required": True,
"type": "string",
},
"validity_assessment": {
"description": "List of impressions of the validity of the claims",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"year": {
"description": "year when the review is being done",
"required": True,
"anyof_type": ["string", "integer"],
},
},
"students": {
"_description": {
"description": "This is a collection of student names and "
"metadata. This should probably be private."
},
"_id": {
"description": "short representation, such as this-is-my-name",
"required": True,
"type": "string",
},
"aka": {
"description": "list of aliases",
"required": False,
"schema": {"type": "string"},
"type": ("list", "string"),
},
"email": {"description": "email address", "required": False,
"type": "string"},
"university_id": {
"description": "The university identifier for the student",
"required": False,
"type": "string",
},
},
}
for s in SCHEMAS:
SCHEMAS[s]["files"] = {
"description": "Files associated with the document",
# TODO: fix this since this is currently comming out a CommentedMap
# "type": "list",
# "schema": {"type": "string"},
"required": False,
}
class NoDescriptionValidator(Validator):
def _validate_description(self, description, field, value):
"""Don't validate descriptions
The rule's arguments are validated against this schema:
{'type': 'string'}"""
if False:
pass
def _validate_eallowed(self, eallowed, field, value):
"""Test if value is in list
The rule's arguments are validated against this schema:
{'type': 'list'}
"""
if value not in eallowed:
warn(
'"{}" is not in the preferred entries for "{}", please '
"consider changing this entry to conform or add this to the "
"``eallowed`` field in the schema.".format(value, field)
)
def validate(coll, record, schemas):
"""Validate a record for a given db
Parameters
----------
coll : str
The name of the db in question
record : dict
The record to be validated
schemas : dict
The schema to validate against
Returns
-------
rtn : bool
True is valid
errors: dict
The errors encountered (if any)
"""
if coll in schemas:
schema = copy.deepcopy(schemas[coll])
v = NoDescriptionValidator(schema)
return v.validate(record), v.errors
else:
return True, ()
| 39.058422 | 145 | 0.424287 | [
"CC0-1.0"
] | priyankaanehra/regolith | regolith/schemas.py | 165,807 | Python |
from .services import * # noqa
| 16 | 31 | 0.6875 | [
"BSD-3-Clause"
] | sot/mica | mica/archive/cda/__init__.py | 32 | Python |
import pysftp
import os.path
def upload_sprog_to_namecheap(tmpdir, passwords):
with pysftp.Connection(passwords["NAMECHEAP_SERVER"], username=passwords["NAMECHEAP_USERNAME"],
password=passwords["NAMECHEAP_PASSWORD"], port=passwords["NAMECHEAP_PORT"]) as sftp:
with sftp.cd('public_html'):
print("uploading sprog.pdf")
sftp.put("sprog.pdf")
print("uploading sprog_small.pdf")
sftp.put("small_sprog.pdf", "sprog_small.pdf")
print("uploading sprog.html")
sftp.put(os.path.join(tmpdir, "sprog.html"), "sprog.html")
print("uploading sprog.json")
sftp.put("poems.json", "poems.json")
print("uploading sprog.json.gz")
sftp.put("poems.json.gz", "poems.json.gz")
print("uploading sprog_60days.json.gz")
sftp.put("poems_60days.json.gz", "poems_60days.json.gz")
print("uploading sprog.rss")
sftp.put("sprog.rss", "sprog.rss")
print("uploading sprog_no_context.rss")
sftp.put("sprog_no_context.rss", "sprog_no_context.rss")
| 45.8 | 111 | 0.611354 | [
"MIT",
"BSD-3-Clause"
] | PaulKlinger/Sprog-Backend | sprog/namecheap_ftp_upload.py | 1,145 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from handlers.Home_Core import Home_Core
class ITM_Core(Home_Core):
def __init__(self, request, response, itm_db):
super(ITM_Core, self).__init__(request, response)
self.itmDB = itm_db
self.db = itm_db.db
| 23.636364 | 51 | 0.723077 | [
"BSD-3-Clause"
] | Jai-Chaudhary/termite-data-server | server_src/modules/handlers/ITM_Core.py | 260 | Python |
from typing import Dict, List, TypeVar, Generic
import warnings
import torch
import numpy
from allennlp.common import Registrable
from allennlp.data.tokenizers.token import Token
from allennlp.data.vocabulary import Vocabulary
TokenType = TypeVar("TokenType", int, List[int], numpy.ndarray)
class TokenIndexer(Generic[TokenType], Registrable):
"""
A ``TokenIndexer`` determines how string tokens get represented as arrays of indices in a model.
This class both converts strings into numerical values, with the help of a
:class:`~allennlp.data.vocabulary.Vocabulary`, and it produces actual arrays.
Tokens can be represented as single IDs (e.g., the word "cat" gets represented by the number
34), or as lists of character IDs (e.g., "cat" gets represented by the numbers [23, 10, 18]),
or in some other way that you can come up with (e.g., if you have some structured input you
want to represent in a special way in your data arrays, you can do that here).
# Parameters
token_min_padding_length : ``int``, optional (default=``0``)
The minimum padding length required for the :class:`TokenIndexer`. For example,
the minimum padding length of :class:`SingleIdTokenIndexer` is the largest size of
filter when using :class:`CnnEncoder`.
Note that if you set this for one TokenIndexer, you likely have to set it for all
:class:`TokenIndexer` for the same field, otherwise you'll get mismatched tensor sizes.
"""
default_implementation = "single_id"
has_warned_for_as_padded_tensor = False
def __init__(self, token_min_padding_length: int = 0) -> None:
self._token_min_padding_length: int = token_min_padding_length
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
"""
The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training
data (possibly doing some frequency filtering and using an OOV, or out of vocabulary,
token). This method takes a token and a dictionary of counts and increments counts for
whatever vocabulary items are present in the token. If this is a single token ID
representation, the vocabulary item is likely the token itself. If this is a token
characters representation, the vocabulary items are all of the characters in the token.
"""
raise NotImplementedError
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary, index_name: str
) -> Dict[str, List[TokenType]]:
"""
Takes a list of tokens and converts them to one or more sets of indices.
This could be just an ID for each token from the vocabulary.
Or it could split each token into characters and return one ID per character.
Or (for instance, in the case of byte-pair encoding) there might not be a clean
mapping from individual tokens to indices.
"""
raise NotImplementedError
def get_padding_token(self) -> TokenType:
"""
Deprecated. Please just implement the padding token in `as_padded_tensor` instead.
TODO(Mark): remove in 1.0 release. This is only a concrete implementation to preserve
backward compatability, otherwise it would be abstract.
When we need to add padding tokens, what should they look like? This method returns a
"blank" token of whatever type is returned by :func:`tokens_to_indices`.
"""
warnings.warn(
"Using a Field with get_padding_token as an inherited method,"
" which will be depreciated in 1.0.0."
"Please implement as_padded_tensor instead.",
FutureWarning,
)
return 0 # type: ignore
def get_padding_lengths(self, token: TokenType) -> Dict[str, int]:
"""
This method returns a padding dictionary for the given token that specifies lengths for
all arrays that need padding. For example, for single ID tokens the returned dictionary
will be empty, but for a token characters representation, this will return the number
of characters in the token.
"""
raise NotImplementedError
def get_token_min_padding_length(self) -> int:
"""
This method returns the minimum padding length required for this TokenIndexer.
For example, the minimum padding length of `SingleIdTokenIndexer` is the largest
size of filter when using `CnnEncoder`.
"""
return self._token_min_padding_length
def as_padded_tensor(
self,
tokens: Dict[str, List[TokenType]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, torch.Tensor]:
"""
This method pads a list of tokens to ``desired_num_tokens`` and returns that padded list
of input tokens as a torch Tensor. If the input token list is longer than ``desired_num_tokens``
then it will be truncated.
``padding_lengths`` is used to provide supplemental padding parameters which are needed
in some cases. For example, it contains the widths to pad characters to when doing
character-level padding.
Note that this method should be abstract, but it is implemented to allow backward compatability.
"""
if not self.has_warned_for_as_padded_tensor:
warnings.warn(
"Using a Field with pad_token_sequence, which will be depreciated in 1.0.0."
"Please implement as_padded_tensor instead.",
FutureWarning,
)
self.has_warned_for_as_padded_tensor = True
padded = self.pad_token_sequence(tokens, desired_num_tokens, padding_lengths)
return {key: torch.LongTensor(array) for key, array in padded.items()}
def pad_token_sequence(
self,
tokens: Dict[str, List[TokenType]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int],
) -> Dict[str, TokenType]:
"""
Deprecated. Please use `as_padded_tensor` instead.
TODO(Mark): remove in 1.0 release.
"""
raise NotImplementedError
def get_keys(self, index_name: str) -> List[str]:
"""
Return a list of the keys this indexer return from ``tokens_to_indices``.
"""
return [index_name]
def __eq__(self, other) -> bool:
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
| 44.234899 | 104 | 0.678046 | [
"Apache-2.0"
] | loopylangur/allennlp | allennlp/data/token_indexers/token_indexer.py | 6,591 | Python |
from pyowm import OWM
owm = OWM('21ff51d901692fd3e2f5ecc04d3617f1')
place = input('Input Place: ')
mgr = owm.weather_manager()
observation = mgr.weather_at_place(place)
w = observation.weather
wind = w.detailed_status
t = w.temperature('celsius')
print(wind)
print(t)
exit_ = input('')
| 24.75 | 46 | 0.727273 | [
"MIT"
] | Geimers228/PyDev | weather.py | 297 | Python |
from __future__ import print_function
import os
import datetime
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_method
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import Query, scoped_session, sessionmaker
from sqlalchemy_mixins import SmartQueryMixin, ReprMixin, JOINED, smart_query
def log(msg):
print('\n{}\n'.format(msg))
#################### setup ######################
Base = declarative_base()
# we also use ReprMixin which is optional
class BaseModel(Base, SmartQueryMixin, ReprMixin):
__abstract__ = True
__repr__ = ReprMixin.__repr__
pass
class User(BaseModel):
__tablename__ = 'user'
__repr_attrs__ = ['name']
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
# to smart query relationship, it should be explicitly set,
# not to be a backref
posts = sa.orm.relationship('Post')
comments = sa.orm.relationship('Comment')
# below relationship will just return query (without executing)
# this query can be customized
# see http://docs.sqlalchemy.org/en/latest/orm/collections.html#dynamic-relationship
#
# we will use this relationship for demonstrating real-life example
# of how smart_query() function works (see 3.2.2)
comments_ = sa.orm.relationship('Comment', lazy="dynamic") # this will return query
class Post(BaseModel):
__tablename__ = 'post'
id = sa.Column(sa.Integer, primary_key=True)
body = sa.Column(sa.String)
user_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
archived = sa.Column(sa.Boolean, default=False)
# to smart query relationship, it should be explicitly set,
# not to be a backref
user = sa.orm.relationship('User')
comments = sa.orm.relationship('Comment')
@hybrid_property
def public(self):
return not self.archived
@public.expression
def public(cls):
return ~cls.archived
@hybrid_method
def is_commented_by_user(cls, user, mapper=None):
# in real apps, Comment class can be obtained from relation
# to avoid cyclic imports like so:
# Comment = cls.comments.property.argument()
mapper = mapper or cls
# from sqlalchemy import exists
# return exists().where((Comment.post_id == mapper.id) & \
# (Comment.user_id == user.id))
return mapper.comments.any(Comment.user_id == user.id)
@hybrid_method
def is_public(cls, value, mapper=None):
# in real apps, Comment class can be obtained from relation
# to avoid cyclic imports like so:
# Comment = cls.comments.property.argument()
mapper = mapper or cls
return mapper.public == value
class Comment(BaseModel):
__tablename__ = 'comment'
__repr_attrs__ = ['body']
id = sa.Column(sa.Integer, primary_key=True)
body = sa.Column(sa.String)
user_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
post_id = sa.Column(sa.Integer, sa.ForeignKey('post.id'))
rating = sa.Column(sa.Integer)
created_at = sa.Column(sa.DateTime)
# to smart query relationship, it should be explicitly set,
# not to be a backref
user = sa.orm.relationship('User')
post = sa.orm.relationship('Post')
#################### setup ORM ######################
db_file = os.path.join(os.path.dirname(__file__), 'test.sqlite')
engine = create_engine('sqlite:///{}'.format(db_file), echo=True)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
session = scoped_session(sessionmaker(bind=engine))
BaseModel.set_session(session)
#################### setup some data ######################
u1 = User(name='Bill u1')
session.add(u1)
session.commit()
u2 = User(name='Alex u2')
session.add(u2)
session.commit()
u3 = User(name='Bishop u3')
session.add(u3)
session.commit()
session.commit()
p11 = Post(
id=11,
body='1234567890123',
archived=True,
user=u1
)
session.add(p11)
session.commit()
p12 = Post(
id=12,
body='1234567890',
user=u1
)
session.add(p12)
session.commit()
p21 = Post(
id=21,
body='p21',
user=u2
)
session.add(p21)
session.commit()
p22 = Post(
id=22,
body='p22',
user=u2
)
session.add(p22)
session.commit()
cm11 = Comment(
id=11,
body='cm11',
user=u1,
post=p11,
rating=1,
created_at=datetime.datetime(2014, 1, 1)
)
session.add(cm11)
session.commit()
cm12 = Comment(
id=12,
body='cm12',
user=u2,
post=p12,
rating=2,
created_at=datetime.datetime(2015, 10, 20)
)
session.add(cm12)
session.commit()
cm21 = Comment(
id=21,
body='cm21',
user=u1,
post=p21,
rating=1,
created_at=datetime.datetime(2015, 11, 21)
)
session.add(cm21)
session.commit()
cm22 = Comment(
id=22,
body='cm22',
user=u3,
post=p22,
rating=3,
created_at=datetime.datetime(2016, 11, 20)
)
session.add(cm22)
session.commit()
cm_empty = Comment(
id=29,
# no body
# no user
# no post
# no rating
)
session.add(cm_empty)
session.commit()
#################### Demo ######################
# ['id', 'body', 'user_id', 'archived', # normal columns
# 'user', 'comments', # relations
# 'public', # hybrid attributes
# 'is_public', 'is_commented_by_user' # hybrid methods
# ]
log(Post.filterable_attributes)
#### 1. Filters ####
##### 1.1 filter by hybrid_property 'public' #####
# low-level filter_expr()
log(session.query(Post).filter(*Post.filter_expr(user=u1, public=True)).all())
# high-level SmartQueryMixin.where() method
log(Post.where(user=u1, public=True).all())
# you can unpack dict (in real world app you will do this)
filters = {'user': u1, 'public': True}
log(Post.where(**filters).all())
##### 1.2 filter by hybrid_method 'is_commented_by_user' #####
# low-level filter_expr()
log(session.query(Post).filter(
*Post.filter_expr(is_commented_by_user=u1)).all())
# high-level SmartQueryMixin.where() method
log(Post.where(is_commented_by_user=u1).all())
##### 1.3 operators #####
# rating == None
log(Comment.where(rating=None).all()) # cm_empty
log(Comment.where(rating__isnull=2).all()) # cm_empty
# rating == 2
# when no operator, 'exact' operator is assumed
log(Comment.where(rating=2).all()) # cm12
# assumed
log(Comment.where(rating__exact=2).all()) # cm12
# rating > 2
log(Comment.where(rating__gt=2).all()) # cm22
# rating >= 2
log(Comment.where(rating__ge=2).all()) # cm12, cm22
# rating < 2
log(Comment.where(rating__lt=2).all()) # cm11, cm21
# rating <= 2
log(Comment.where(rating__le=2).all()) # cm11, cm12, cm21
# rating in [1,3]
log(Comment.where(rating__in=[1, 3]).all()) # cm11, cm21, cm22
log(Comment.where(rating__in=(1, 3)).all()) # cm11, cm21, cm22
log(Comment.where(rating__in={1, 3}).all()) # cm11, cm21, cm22
# rating between 2 and 3
log(Comment.where(rating__between=[2, 3]).all()) # cm12, cm22
log(Comment.where(rating__between=(2, 3)).all()) # cm12, cm22
# likes
log(Comment.where(body__like=u'cm12 to p12').all()) # cm12
log(Comment.where(body__like='%cm12%').all()) # cm12
log(Comment.where(body__ilike='%CM12%').all()) # cm12
log(Comment.where(body__startswith='cm1').all()) # cm11, cm12
log(Comment.where(body__istartswith='CM1').all()) # cm11, cm12
log(Comment.where(body__endswith='to p12').all()) # cm12
log(Comment.where(body__iendswith='TO P12').all()) # cm12
# dates
# year
log(Comment.where(created_at__year=2014).all()) # cm11
log(Comment.where(created_at__year=2015).all()) # cm12, cm21
# month
log(Comment.where(created_at__month=1).all()) # cm11
log(Comment.where(created_at__month=11).all()) # cm21, cm22
# day
log(Comment.where(created_at__day=1).all()) # cm11
log(Comment.where(created_at__day=20).all()) # cm12, cm22
# whole date
log(Comment.where(created_at__year=2014, created_at__month=1,
created_at__day=1).all()) # cm11
##### 1.4 where() with auto-joined relations #####
# when have no joins, where() is a shortcut for filter_expr
log(session.query(Comment).filter(
*Comment.filter_expr(rating__gt=2, body__startswith='cm1')).all())
log(Comment.where(rating__gt=2, body__startswith='cm1').all())
# but where() can automatically join relations
# users having posts which are commented by user 2
log(User.where(posts___comments___user_id=u2.id).all())
# comments where user name starts with 'Bi'
# !! ATTENTION !!
# about Comment.post:
# although we have Post.comments relationship,
# it's important to **add relationship Comment.post** too,
# not just use backref !!!
log(Comment.where(user___name__startswith='Bi').all())
# non-public posts commented by user 1
log(Post.where(public=False, is_commented_by_user=u1).all())
#### 2. sort ####
#### 2.1 simple demo ####
##### 2.1.1 low-level order_expr()
# '-rating', 'created_at' means 'ORDER BY rating DESC, created_at ASC'
log(session.query(Comment).order_by(
*Comment.order_expr('-rating', 'created_at')).all())
##### 2.1.2 high-level sort()
log(Comment.sort('-rating', 'created_at'))
# in real world apps, you will keep attrs in list
sort_attrs = ['-rating', 'created_at']
log(Comment.sort(*sort_attrs))
##### 2.1.3 hybrid properties
log(session.query(Post).order_by(*Post.order_expr('-public')).all())
log(Post.sort('-public').all())
#### 2.2 sort() with auto-joined relations ####
# sort by name of user ASC (user relation will be auto-joined), then by
# created_at DESC
log(Comment.sort('user___name', '-created_at').all())
# get comments on public posts first, then order by post user name
# Post and User tables will be auto-joined
log(Comment.sort('-post___public', 'post___user___name').all())
#### 3. smart_query() : combination of where(), sort() and eager load ####
schema = {
'post': {
'user': JOINED
}
}
# schema can use class properties too (see EagerLoadMixin):
# schema = {
# Comment.post: {
# Post.user: JOINED
# }
# }
##### 3.1 high-level smart_query() class method #####
res = Comment.smart_query(
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm12, cm21, cm22
##### 3.2 more flexible smart_query() function #####
##### 3.2.1. The same as 3.1
query = Comment.query # could be any query you want
res = smart_query(query,
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm12, cm21, cm22
##### 3.2.2. Real-life example with lazy='dynamic' relationship
# let's imagine we want to display some user relations
# and flexibly filter, sort and eagerload them
# like this http://www.qopy.me/LwfSCu_ETM6At6el8wlbYA
# (no sort on screenshot, but you've git the idea)
# so we have a user
user = session.query(User).first()
# and we have initial query for his/her comments
# (see User.comments_ relationship)
query = user.comments_
# now we just smartly apply all filters, sorts and eagerload. Perfect!
res = smart_query(query,
filters={
'post___public': True,
'user__isnull': False
},
sort_attrs=['user___name', '-created_at'],
schema=schema).all()
log(res) # cm21
##### 3.3 auto eager load in where() and sort() with auto-joined relations ####
"""
Smart_query does auto-joins for filtering/sorting,
so there's a sense to tell sqlalchemy that we alreeady joined that relation
So we test that relations are set to be joinedload
if they were used in smart_query()
"""
##### 3.3.1 where()
# comments on public posts where posted user name like ...
res = Comment.where(post___public=True, post___user___name__like='Bi%').all()
log(res)
# no additional query needed: we used 'post' and 'post__user'
# relations in smart_query()
log(res[0].post)
log(res[0].post.user)
# we didn't use post___comments in filters, so additional query is needed
log(res[0].post.comments)
##### 3.3.2 sort()
res = Comment.sort('-post___public', 'post___user___name').all()
log(res)
# no additional query needed: we used 'post' and 'post__user'
# relations in smart_query()
log(res[0].post)
log(res[0].post.user)
# we didn't use post___comments in filters, so additional query is needed
log(res[0].post.comments)
| 28.272311 | 88 | 0.671712 | [
"MIT"
] | AdamGold/sqlalchemy-mixins | examples/smartquery.py | 12,355 | Python |
#! /usr/bin/env python
import rospy
from std_msgs.msg import Int32
def cb(message):
rospy.loginfo(message.data*2)
print (rospy.loginfo)
if message.data%2 == 0:
print ('0')
elif message.data%2 != 0 :
print('1')
if __name__ == '__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Int32, cb)
rospy.spin()
| 18.6 | 49 | 0.612903 | [
"BSD-3-Clause"
] | note032/ROS_robosys | mypkg/scripts/twice.py | 372 | Python |
import json
from typing import List, Dict
from icecream import ic
from compiler_idioms.idiom.instruction_sequence import InstructionSequence
from compiler_idioms.idiom.utils.magic import compute_magic_numbers_if_not_exists
from compiler_idioms.instruction import from_anonymized_pattern, Instruction
from compiler_idioms.match import Match
from config import TEST_DIR, ROOT
#TEST_PATTERN_PATH = TEST_DIR / "mods-pointer.json"
TEST_PATTERN_PATH = TEST_DIR / "patterns-mods-O0.json"
PATTERN_DIR = ROOT / 'patterns'
HEX_BASE = 16
class SignedRemainderInstructionSequence(InstructionSequence):
def __init__(self):
sequences = self._load_sequences_from_file()
# with TEST_PATTERN_PATH.open('r') as f:
# seq = json.load(f)
# print(seq)
# sequences = [from_anonymized_pattern(seq['pattern'])]
self.magic_table = compute_magic_numbers_if_not_exists()
super().__init__(sequences)
def search(self, sequence: List[Instruction], original_constants: Dict[str, str], original_registers: Dict[str, str]) -> Match:
if match := super().search(sequence, original_constants, original_registers):
match.operation = "modulo"
match.operand = self._get_register_operand(original_registers)
match.constant = self._get_original_constant_from_magic(original_constants)
if not match.constant:
return None
return match
def _get_register_operand(self, original_registers: Dict[str, str]):
return original_registers.get("reg_1", [])
def _get_original_constant_from_magic(self, original_constants: Dict[str, str]) -> int:
magic = int(original_constants.get("const_0"), HEX_BASE)
power = int(original_constants.get("const_1"), HEX_BASE) + int(original_constants.get("const_2"), HEX_BASE)
return self.magic_table.get((magic, power))
@staticmethod
def _load_sequences_from_file():
sequences = []
for patter_file in PATTERN_DIR.glob("*mods*"):
try:
with patter_file.open("r") as f:
data = json.load(f)
for seq in data:
pattern = seq.get("sequence")
anonymized_instruction_list = from_anonymized_pattern(pattern)
if anonymized_instruction_list:
sequences.append(anonymized_instruction_list)
except FileNotFoundError as e:
print("No file for division found")
return sequences
if __name__ == "__main__":
idiom = SignedRemainderInstructionSequence()
print(idiom.magic_table)
| 40.545455 | 131 | 0.67713 | [
"MIT"
] | fkie-cad/pidarci | compiler_idioms/idiom/implementations/remainder_signed_todo.py | 2,676 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2015, IBM Corp.
# All rights reserved.
#
# Distributed under the terms of the BSD Simplified License.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
"""
Test module for IdaDataFrameObjects
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from future import standard_library
standard_library.install_aliases()
import pandas
import pytest
import six
import ibmdbpy
from ibmdbpy import IdaDataBase
class Test_OpenDataFrameObject(object):
def test_idadf_attr_idadb(self, idadf):
assert isinstance(idadf._idadb, IdaDataBase)
def test_idadf_attr_name(self, idadf, df):
assert isinstance(idadf.name, six.string_types)
assert idadf.name == idadf.schema + "." + "TEST_IBMDBPY"
assert idadf.name == idadf.schema + "." + idadf.tablename
def test_idadf_attr_schema(self, idadf):
assert isinstance(idadf.schema, six.string_types)
def test_idadf_attr_indexer(self, idadf):
assert (isinstance(idadf.indexer, six.string_types)|(idadf.indexer is None))
# TODO : Check more deeply the indexer
def test_idadf_attr_loc(self, idadf):
assert isinstance(idadf.loc, ibmdbpy.indexing.Loc)
def test_idadf_attr_internalstate(self, idadf):
assert isinstance(idadf.internal_state, ibmdbpy.internals.InternalState)
def test_idadf_attr_type(self, idadf):
assert isinstance(idadf.type, six.string_types)
assert idadf.type == "Table"
def test_idadf_atrr_dtypes(self, idadf, df):
assert isinstance(idadf.dtypes, pandas.core.frame.DataFrame)
assert len(idadf.dtypes) == len(idadf.columns)
assert len(idadf.dtypes) == len(df.columns)
def test_idadf_attr_index(self, idadf, df):
# Ok, but what do we do if too big ?
assert type(idadf.index) in [pandas.Int64Index, pandas.Index, pandas.RangeIndex] # Not sure here
assert list(idadf.index) == list(df.index)
def test_idadf_attr_columns(self, idadf, df):
assert isinstance(idadf.columns, pandas.core.index.Index)
assert idadf.columns.equals(df.columns)
def test_idadf_attr_axes(self, idadf):
assert isinstance(idadf.axes, list)
assert len(idadf.axes) == 2
assert idadf.axes[1].equals(idadf.columns)
assert list(idadf.axes[0]) == list(idadf.index)
def test_idadf_attr_shape(self, idadf, df):
assert isinstance(idadf.shape, tuple)
assert len(idadf.shape) == 2
assert idadf.shape[0] == len(idadf.index)
assert idadf.shape[1] == len(idadf.columns)
assert idadf.shape == df.shape
def test_idadf_empty(self, idadb, df):
idadb._create_table(df, "TEST_EMPTY_3496593727406047264076")
to_test = ibmdbpy.IdaDataFrame(idadb, "TEST_EMPTY_3496593727406047264076")
assert(to_test.empty is True)
idadb.drop_table("TEST_EMPTY_3496593727406047264076")
def test_idadf_len(self, idadf, df):
assert(len(idadf) == len(df))
def test_idadf_iter(self, idadf, df):
for idacol, col in zip(idadf, df):
assert(idacol == col)
class Test_IdaDataFrameBehavior(object):
def test_idadf_getitem_1_col_idadf(self, idadf):
if len(idadf.columns) >= 1:
newidadf = idadf[[idadf.columns[0]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 1)
assert(idadf.columns[0] == newidadf.columns[0])
# We don't check of it is actually the corresponding column
newidadf = idadf[[idadf.columns[-1]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 1)
assert(idadf.columns[-1] == newidadf.columns[0])
def test_idadf_getitem_1_col_idadf_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf[["NOTEXISTING_COLUMN_455849820205"]]
def test_idadf_getitem_2_cols_idadf(self, idadf):
if len(idadf.columns) >= 2:
newidadf = idadf[[idadf.columns[0], idadf.columns[-1]]]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == 2)
assert(idadf.columns[0] == newidadf.columns[0])
assert(idadf.columns[-1] == newidadf.columns[-1])
def test_idadf_getitem_2_cols_idadf_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf[[idadf.columns[0], "NOTEXISTING_COLUMN_455849820205"]]
# TODO : FIX If you select twice the same columns, only one with be taken into account
# (This is because they are referenced in a dictionary, maybe force modifying the name of the columns)
def test_idadf_getitem_all_cols_idadf(self, idadf):
if len(idadf.columns) >= 1:
newidadf = idadf[list(idadf.columns)]
assert(isinstance(newidadf, ibmdbpy.IdaDataFrame) is True)
assert(len(newidadf.columns) == len(idadf.columns))
assert(newidadf.shape == idadf.shape)
def test_idadf_getitem_idaseries(self, idadf):
if len(idadf.columns) >= 1:
newidaseries = idadf[idadf.columns[0]]
assert(isinstance(newidaseries, ibmdbpy.IdaSeries))
assert(len(newidaseries.columns) == 1)
assert(idadf.columns[0] == newidaseries.columns[0])
newidaseries = idadf[idadf.columns[-1]]
assert(isinstance(newidaseries, ibmdbpy.IdaDataFrame))
assert(len(newidaseries.columns) == 1)
assert(idadf.columns[-1] == newidaseries.columns[0])
def test_idadf_getitem_idaseries_keyerror(self, idadf):
with pytest.raises(KeyError):
idadf["NOTEXISTING_COLUMN_455849820205"]
def test_idadf_getitem_idaseries_keyerror_several_columns(self, idadf):
if len(idadf.columns) >= 2:
with pytest.raises(KeyError):
idadf[idadf.columns[0], idadf.columns[1]]
def test_idadf_getitem_slice(self, idadb, idadf, idadf_tmp):
if len(idadf) > 10:
newidadf = idadf[0:9]
assert(len(newidadf) == 10)
if len(idadf_tmp) > 10:
idadb.add_column_id(idadf_tmp, destructive = True)
newidadf_1 = idadf_tmp[0:9]
newidadf_2 = idadf_tmp[0:9]
assert(all(newidadf_1.head(10) == newidadf_2.head(10)))
def test_idaseries_getitem_slice(self, idadb, idadf, idadf_tmp):
# Set them as series first and do the same test as above
if len(idadf.columns) >= 1:
idadf = idadf[idadf.columns[0]]
idadf_tmp = idadf_tmp[idadf_tmp.columns[0]]
assert(isinstance(idadf, ibmdbpy.IdaDataFrame))
assert(isinstance(idadf_tmp, ibmdbpy.IdaSeries))
if len(idadf) > 10:
newidadf = idadf[0:9]
assert(len(newidadf) == 10)
def test_idadf_setitem(self, idadf):
pass
def test_idadf_delitem(self, idadf):
pass
def test_idadf_filter_lt(self, idadf):
pass
def test_idadf_filter_le(self, idadf):
pass
def test_idadf_filter_eq(self, idadf):
pass
def test_idadf_filter_ne(self, idadf):
pass
def test_idadf_filter_ge(self, idadf):
pass
def test_idadf_filter_gt(self, idadf):
pass
def test_idadf_feature_add(self, idadf):
pass
def test_idadf_feature_radd(self, idadf):
pass
def test_idadf_feature_div(self, idadf):
pass
def test_idadf_feature_rdiv(self, idadf):
pass
def test_idadf_feature_floordiv(self, idadf):
pass
def test_idadf_feature_rfloordiv(self, idadf):
pass
def test_idadf_feature_mod(self, idadf):
pass
def test_idadf_feature_rmod(self, idadf):
pass
def test_idadf_feature_mul(self, idadf):
pass
def test_idadf_feature_rmul(self, idadf):
pass
def test_idadf_feature_neg(self, idadf):
pass
def test_idadf_feature_rpos(self, idadf):
pass
def test_idadf_feature_pow(self, idadf):
pass
def test_idadf_feature_rpow(self, idadf):
pass
def test_idadf_feature_sub(self, idadf):
pass
def test_idadf_feature_rsub(self, idadf):
pass
class Test_DataBaseFeatures(object):
def test_idadf_exists(self, idadf):
assert(idadf.exists() is True)
pass
def test_idadf_is_view(self, idadf):
assert(idadf.is_view() is False)
pass
def test_idadf_is_table(self, idadf):
assert(idadf.exists() is True)
pass
def test_idadf_get_primary_key(self, idadf):
pass
def test_idadf_ida_query(self, idadf):
pass
def test_idadf_ida_scalar_query(self, idadf):
pass
class Test_DataExploration(object):
### head
# For head and tail we do not test if the rows match because
# the order is not guaranteed anyway
def test_idadf_head_default(self, idadb, idadf, df):
sortkey = idadf.columns[0]
if idadf._get_numerical_columns():
sortkey = idadf._get_numerical_columns()[0]
ida_head = idadf.head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
df_head = df.sort_values(sortkey).head()
assert (ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_10(self, idadb, idadf, df):
ida_head = idadf.head(10)
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 10
def test_idadf_head_10_sort(self, idadb, idadf, df):
ida_head = idadf.head(10, sort=False)
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 10
def test_idadf_head_with_indexer(self, idadb, idadf_indexer, df):
ida_head = idadf_indexer.head()
sortby = len(df.columns)-1
df_head = df.sort_values(df.columns[sortby]).head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[idadf_indexer.columns[sortby]].tolist() ==
df_head[df.columns[sortby]].tolist())
def test_idadf_head_projected_3col(self, idadf, df):
if len(idadf.columns) >= 4:
columns = idadf.columns[1:4].tolist()
newidadf = idadf[columns]
sortkey = newidadf.columns[0]
if newidadf._get_numerical_columns():
sortkey = newidadf._get_numerical_columns()[0]
ida_head = newidadf.head()
df_sorted = df.sort_values(sortkey)
df_head = df_sorted[columns].head()
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_sorted(self, idadf, df):
sortIdx = len(df.columns) - 1
sortkey = idadf.columns[sortIdx]
newidadf = idadf.sort(sortkey)
ida_head = newidadf.head()
df_head = df.sort_values(sortkey).head()
assert(" ORDER BY " in newidadf.internal_state.get_state())
assert isinstance(ida_head, pandas.core.frame.DataFrame)
assert len(ida_head) == 5
assert(ida_head[sortkey].tolist() == df_head[sortkey].tolist())
def test_idadf_head_0(self, idadf):
with pytest.raises(ValueError):
idadf.head(0)
def test_idadf_head_negative(self, idadf):
with pytest.raises(ValueError):
idadf.head(-1)
### tail
def test_idadf_tail_default(self, idadb, idadf, df):
sortkey = idadf.columns[0]
if idadf._get_numerical_columns():
sortkey = idadf._get_numerical_columns()[0]
ida_tail = idadf.tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
df_tail = df.sort_values(sortkey).tail()
assert (ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
def test_idadf_tail_10(self, idadb, idadf, df):
ida_tail = idadf.tail(10)
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 10
def test_idadf_tail_10_sort(self, idadb, idadf, df):
ida_tail = idadf.tail(10, sort=False)
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 10
def test_idadf_tail_with_indexer(self, idadb, idadf_indexer, df):
ida_tail = idadf_indexer.tail()
sortby = len(df.columns)-1
df_head = df.sort_values(df.columns[sortby]).tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[idadf_indexer.columns[sortby]].tolist() ==
df_head[df.columns[sortby]].tolist())
def test_idadf_tail_projected_3col(self, idadf, df):
if len(idadf.columns) >= 4:
columns = idadf.columns[1:4].tolist()
newidadf = idadf[columns]
sortkey = newidadf.columns[0]
if newidadf._get_numerical_columns():
sortkey = newidadf._get_numerical_columns()[0]
ida_tail = newidadf.tail()
df_sorted = df.sort_values(sortkey)
df_tail = df_sorted[columns].tail()
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
@pytest.mark.skip(reason="tail on sorted dataframe fails in general, needs fixing first")
def test_idadf_tail_sorted(self, idadf, df):
sortIdx = len(df.columns) - 1
sortkey = idadf.columns[sortIdx]
newidadf = idadf.sort(sortkey)
ida_tail = newidadf.tail()
df_tail = df.sort_values(sortkey).tail()
assert(" ORDER BY " in newidadf.internal_state.get_state())
assert isinstance(ida_tail, pandas.core.frame.DataFrame)
assert len(ida_tail) == 5
assert(ida_tail[sortkey].tolist() == df_tail[sortkey].tolist())
def test_idadf_tail_0(self, idadf):
with pytest.raises(ValueError):
idadf.tail(0)
def test_idadf_tail_negative(self, idadf):
with pytest.raises(ValueError):
idadf.tail(-1)
def test_idadf_pivot_table(self, idadf):
pass
def test_idadf_sort(self, idadf):
pass
# no test
#__enter__
#__exit__
| 34.152074 | 106 | 0.643773 | [
"BSD-3-Clause"
] | alexmid/ibmdbpy | ibmdbpy/tests/test_frame.py | 14,822 | Python |
#!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import datetime
import json
import logging
import os
import re
import StringIO
import sys
import tempfile
import threading
import time
import traceback
import unittest
# net_utils adjusts sys.path.
import net_utils
from depot_tools import auto_stub
import auth
import isolateserver
import swarming
import test_utils
from depot_tools import fix_encoding
from utils import file_path
from utils import logging_utils
from utils import subprocess42
from utils import tools
import httpserver_mock
import isolateserver_mock
FILE_HASH = u'1' * 40
TEST_NAME = u'unit_tests'
OUTPUT = 'Ran stuff\n'
SHARD_OUTPUT_1 = 'Shard 1 of 3.'
SHARD_OUTPUT_2 = 'Shard 2 of 3.'
SHARD_OUTPUT_3 = 'Shard 3 of 3.'
def gen_yielded_data(index, **kwargs):
"""Returns an entry as it would be yielded by yield_results()."""
return index, gen_result_response(**kwargs)
def get_results(keys, output_collector=None):
"""Simplifies the call to yield_results().
The timeout is hard-coded to 10 seconds.
"""
return list(
swarming.yield_results(
'https://host:9001', keys, 10., None, True,
output_collector, False, True))
def collect(url, task_ids, task_stdout=('console', 'json')):
"""Simplifies the call to swarming.collect()."""
return swarming.collect(
swarming=url,
task_ids=task_ids,
timeout=10,
decorate=True,
print_status_updates=True,
task_summary_json=None,
task_output_dir=None,
task_output_stdout=task_stdout,
include_perf=False)
def main(args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
dispatcher = swarming.subcommand.CommandDispatcher('swarming')
return dispatcher.execute(swarming.OptionParserSwarming(), args)
def gen_properties(**kwargs):
out = {
'caches': [],
'cipd_input': None,
'command': None,
'relative_cwd': None,
'dimensions': [
{'key': 'foo', 'value': 'bar'},
{'key': 'os', 'value': 'Mac'},
],
'env': [],
'env_prefixes': [],
'execution_timeout_secs': 60,
'extra_args': ['--some-arg', '123'],
'grace_period_secs': 30,
'idempotent': False,
'inputs_ref': {
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
'io_timeout_secs': 60,
'outputs': [],
'secret_bytes': None,
}
out.update(kwargs)
return out
def gen_request_data(properties=None, **kwargs):
out = {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(**(properties or {})),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
}
out.update(kwargs)
return out
def gen_request_response(request, **kwargs):
# As seen in services/swarming/handlers_api.py.
out = {
'request': request.copy(),
'task_id': '12300',
}
out.update(kwargs)
return out
def gen_result_response(**kwargs):
out = {
u'bot_id': u'swarm6',
u'completed_ts': u'2014-09-24T13:49:16.012345',
u'created_ts': u'2014-09-24T13:49:03.012345',
u'duration': 0.9636809825897217,
u'exit_code': 0,
u'failure': False,
u'internal_failure': False,
u'modified_ts': u'2014-09-24T13:49:17.012345',
u'name': u'heartbeat-canary-2014-09-24_13:49:01-os=Ubuntu',
u'server_versions': [u'1'],
u'started_ts': u'2014-09-24T13:49:09.012345',
u'state': 'COMPLETED',
u'tags': [u'cpu:x86', u'priority:100', u'user:joe@localhost'],
u'task_id': u'10100',
u'try_number': 1,
u'user': u'joe@localhost',
}
out.update(kwargs)
return out
# Silence pylint 'Access to a protected member _Event of a client class'.
class NonBlockingEvent(threading._Event): # pylint: disable=W0212
"""Just like threading.Event, but a class and ignores timeout in 'wait'.
Intended to be used as a mock for threading.Event in tests.
"""
def wait(self, timeout=None):
return super(NonBlockingEvent, self).wait(0)
class SwarmingServerHandler(httpserver_mock.MockHandler):
"""An extremely minimal implementation of the swarming server API v1.0."""
def do_GET(self):
logging.info('S GET %s', self.path)
if self.path == '/auth/api/v1/server/oauth_config':
self.send_json({
'client_id': 'c',
'client_not_so_secret': 's',
'primary_url': self.server.url})
elif self.path == '/auth/api/v1/accounts/self':
self.send_json({'identity': 'user:joe', 'xsrf_token': 'foo'})
else:
m = re.match(r'/api/swarming/v1/task/(\d+)/request', self.path)
if m:
logging.info('%s', m.group(1))
self.send_json(self.server.tasks[int(m.group(1))])
else:
self.send_json( {'a': 'b'})
#raise NotImplementedError(self.path)
def do_POST(self):
logging.info('POST %s', self.path)
raise NotImplementedError(self.path)
class MockSwarmingServer(httpserver_mock.MockServer):
_HANDLER_CLS = SwarmingServerHandler
def __init__(self):
super(MockSwarmingServer, self).__init__()
self._server.tasks = {}
class Common(object):
def setUp(self):
self._tempdir = None
self.mock(auth, 'ensure_logged_in', lambda _: None)
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
self.mock(logging_utils, 'prepare_logging', lambda *args: None)
self.mock(logging_utils, 'set_console_level', lambda *args: None)
def tearDown(self):
if self._tempdir:
file_path.rmtree(self._tempdir)
if not self.has_failed():
self._check_output('', '')
@property
def tempdir(self):
"""Creates the directory on first reference."""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(prefix=u'swarming_test')
return self._tempdir
maxDiff = None
def _check_output(self, out, err):
self.assertMultiLineEqual(out, sys.stdout.getvalue())
self.assertMultiLineEqual(err, sys.stderr.getvalue())
# Flush their content by mocking them again.
self.mock(sys, 'stdout', StringIO.StringIO())
self.mock(sys, 'stderr', StringIO.StringIO())
def main_safe(self, args):
"""Bypasses swarming.main()'s exception handling.
It gets in the way when debugging test failures.
"""
# pylint: disable=bare-except
try:
return main(args)
except:
data = '%s\nSTDOUT:\n%s\nSTDERR:\n%s' % (
traceback.format_exc(), sys.stdout.getvalue(), sys.stderr.getvalue())
self.fail(data)
class NetTestCase(net_utils.TestCase, Common):
"""Base class that defines the url_open mock."""
def setUp(self):
net_utils.TestCase.setUp(self)
Common.setUp(self)
self.mock(time, 'sleep', lambda _: None)
self.mock(subprocess42, 'call', lambda *_: self.fail())
self.mock(threading, 'Event', NonBlockingEvent)
class TestIsolated(auto_stub.TestCase, Common):
"""Test functions with isolated_ prefix."""
def setUp(self):
auto_stub.TestCase.setUp(self)
Common.setUp(self)
self._isolate = isolateserver_mock.MockIsolateServer()
self._swarming = MockSwarmingServer()
def tearDown(self):
try:
self._isolate.close()
self._swarming.close()
finally:
Common.tearDown(self)
auto_stub.TestCase.tearDown(self)
def test_reproduce_isolated(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
# 'out' is the default value for --output-dir.
outdir = os.path.join(self.tempdir, 'out')
self.assertTrue(os.path.isdir(outdir))
self.assertEqual(
[sys.executable, u'main.py', u'foo', outdir, '--bar'], cmd)
expected = os.environ.copy()
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(os.path.abspath('work')), cwd)
return 0
self.mock(subprocess42, 'call', call)
main_hash = self._isolate.add_content_compressed(
'default-gzip', 'not executed')
isolated = {
'files': {
'main.py': {
'h': main_hash,
's': 12,
'm': 0700,
},
},
'command': ['python', 'main.py'],
}
isolated_hash = self._isolate.add_content_compressed(
'default-gzip', json.dumps(isolated))
self._swarming._server.tasks[123] = {
'properties': {
'inputs_ref': {
'isolatedserver': self._isolate.url,
'namespace': 'default-gzip',
'isolated': isolated_hash,
},
'extra_args': ['foo', '${ISOLATED_OUTDIR}'],
'secret_bytes': None,
},
}
ret = self.main_safe(
[
'reproduce', '--swarming', self._swarming.url, '123', '--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
class TestSwarmingTrigger(NetTestCase):
def test_trigger_task_shards_2_shards(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id=None,
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request_1 = swarming.task_request_to_raw_request(task_request)
request_1['name'] = u'unit_tests:0:2'
request_1['task_slices'][0]['properties']['env'] = [
{'key': 'GTEST_SHARD_INDEX', 'value': '0'},
{'key': 'GTEST_TOTAL_SHARDS', 'value': '2'},
]
result_1 = gen_request_response(request_1)
request_2 = swarming.task_request_to_raw_request(task_request)
request_2['name'] = u'unit_tests:1:2'
request_2['task_slices'][0]['properties']['env'] = [
{'key': 'GTEST_SHARD_INDEX', 'value': '1'},
{'key': 'GTEST_TOTAL_SHARDS', 'value': '2'},
]
result_2 = gen_request_response(request_2, task_id='12400')
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request_1},
result_1,
),
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request_2},
result_2,
),
])
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
task_request=task_request,
shards=2)
expected = {
u'unit_tests:0:2': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
},
u'unit_tests:1:2': {
'shard_index': 1,
'task_id': '12400',
'view_url': 'https://localhost:1/user/task/12400',
},
}
self.assertEqual(expected, tasks)
def test_trigger_task_shards_priority_override(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=None,
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request = swarming.task_request_to_raw_request(task_request)
self.assertEqual('123', request['parent_task_id'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
shards=1,
task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
def test_trigger_cipd_package(self):
task_request = swarming.NewTaskRequest(
name=TEST_NAME,
parent_task_id='123',
priority=101,
task_slices=[
{
'expiration_secs': 60*60,
'properties': swarming.TaskProperties(
caches=[],
cipd_input=swarming.CipdInput(
client_package=None,
packages=[
swarming.CipdPackage(
package_name='mypackage',
path='path/to/package',
version='abc123')],
server=None),
command=['a', 'b'],
relative_cwd=None,
dimensions=[('foo', 'bar'), ('os', 'Mac')],
env={},
env_prefixes=[],
execution_timeout_secs=60,
extra_args=[],
grace_period_secs=30,
idempotent=False,
inputs_ref={
'isolated': None,
'isolatedserver': '',
'namespace': 'default-gzip',
},
io_timeout_secs=60,
outputs=[],
secret_bytes=None),
},
],
service_account=None,
tags=['tag:a', 'tag:b'],
user='joe@localhost')
request = swarming.task_request_to_raw_request(task_request)
expected = {
'client_package': None,
'packages': [{
'package_name': 'mypackage',
'path': 'path/to/package',
'version': 'abc123',
}],
'server': None
}
self.assertEqual(
expected, request['task_slices'][0]['properties']['cipd_input'])
result = gen_request_response(request)
result['request']['priority'] = 200
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
os.environ['SWARMING_TASK_ID'] = '123'
try:
tasks = swarming.trigger_task_shards(
swarming='https://localhost:1',
shards=1,
task_request=task_request)
finally:
os.environ.pop('SWARMING_TASK_ID')
expected = {
u'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
}
self.assertEqual(expected, tasks)
self._check_output('', 'Priority was reset to 200\n')
class TestSwarmingCollection(NetTestCase):
def test_success(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': OUTPUT},
),
])
expected = [gen_yielded_data(0, output=OUTPUT)]
self.assertEqual(expected, get_results(['10100']))
def test_failure(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(exit_code=1),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': OUTPUT},
),
])
expected = [gen_yielded_data(0, output=OUTPUT, exit_code=1)]
self.assertEqual(expected, get_results(['10100']))
def test_no_ids(self):
actual = get_results([])
self.assertEqual([], actual)
def test_url_errors(self):
self.mock(logging, 'error', lambda *_, **__: None)
# NOTE: get_results() hardcodes timeout=10.
now = {}
lock = threading.Lock()
def get_now():
t = threading.current_thread()
with lock:
return now.setdefault(t, range(10)).pop(0)
self.mock(swarming.net, 'sleep_before_retry', lambda _x, _y: None)
self.mock(swarming, 'now', get_now)
# The actual number of requests here depends on 'now' progressing to 10
# seconds. It's called once per loop. Loop makes 9 iterations.
self.expected_requests(
9 * [
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
None,
)
])
actual = get_results(['10100'])
self.assertEqual([], actual)
self.assertTrue(all(not v for v in now.itervalues()), now)
def test_many_shards(self):
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': SHARD_OUTPUT_1},
),
(
'https://host:9001/api/swarming/v1/task/10200/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10200/stdout',
{},
{'output': SHARD_OUTPUT_2},
),
(
'https://host:9001/api/swarming/v1/task/10300/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10300/stdout',
{},
{'output': SHARD_OUTPUT_3},
),
])
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3),
]
actual = get_results(['10100', '10200', '10300'])
self.assertEqual(expected, sorted(actual))
def test_output_collector_called(self):
# Three shards, one failed. All results are passed to output collector.
self.expected_requests(
[
(
'https://host:9001/api/swarming/v1/task/10100/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10100/stdout',
{},
{'output': SHARD_OUTPUT_1},
),
(
'https://host:9001/api/swarming/v1/task/10200/result',
{'retry_50x': False},
gen_result_response(),
),
(
'https://host:9001/api/swarming/v1/task/10200/stdout',
{},
{'output': SHARD_OUTPUT_2},
),
(
'https://host:9001/api/swarming/v1/task/10300/result',
{'retry_50x': False},
gen_result_response(exit_code=1),
),
(
'https://host:9001/api/swarming/v1/task/10300/stdout',
{},
{'output': SHARD_OUTPUT_3},
),
])
class FakeOutputCollector(object):
def __init__(self):
self.results = []
self._lock = threading.Lock()
def process_shard_result(self, index, result):
with self._lock:
self.results.append((index, result))
output_collector = FakeOutputCollector()
get_results(['10100', '10200', '10300'], output_collector)
expected = [
gen_yielded_data(0, output=SHARD_OUTPUT_1),
gen_yielded_data(1, output=SHARD_OUTPUT_2),
gen_yielded_data(2, output=SHARD_OUTPUT_3, exit_code=1),
]
self.assertEqual(sorted(expected), sorted(output_collector.results))
def test_collect_nothing(self):
self.mock(swarming, 'yield_results', lambda *_: [])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
self._check_output('', 'Results from some shards are missing: 0, 1\n')
def test_collect_success(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100']))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+',
'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_success_nostdout(self):
data = gen_result_response(output='Foo')
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(0, collect('https://localhost:1', ['10100'], []))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_fail(self):
data = gen_result_response(output='Foo', exit_code=-9)
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(-9, collect('https://localhost:1', ['10100']))
expected = u'\n'.join((
'+-------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+-------------------------------------------------------+',
'Foo',
'+-------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: -9 |',
'+-------------------------------------------------------+',
'Total duration: 1.0s',
''))
self._check_output(expected, '')
def test_collect_one_missing(self):
data = gen_result_response(output='Foo')
data['output'] = 'Foo'
self.mock(swarming, 'yield_results', lambda *_: [(0, data)])
self.assertEqual(1, collect('https://localhost:1', ['10100', '10200']))
expected = u'\n'.join((
'+------------------------------------------------------+',
'| Shard 0 https://localhost:1/user/task/10100 |',
'+------------------------------------------------------+',
'Foo',
'+------------------------------------------------------+',
'| End of shard 0 |',
'| Pending: 6.0s Duration: 1.0s Bot: swarm6 Exit: 0 |',
'+------------------------------------------------------+',
'',
'Total duration: 1.0s',
''))
self._check_output(expected, 'Results from some shards are missing: 1\n')
def test_collect_multi(self):
actual_calls = []
def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks):
self.assertIs(storage.__class__, isolateserver.Storage)
self.assertIs(cache.__class__, isolateserver.MemoryCache)
# Ensure storage is pointing to required location.
self.assertEqual('https://localhost:2', storage.location)
self.assertEqual('default', storage.namespace)
self.assertEqual(False, use_symlinks)
actual_calls.append((isolated_hash, outdir))
self.mock(isolateserver, 'fetch_isolated', fetch_isolated)
collector = swarming.TaskOutputCollector(
self.tempdir, ['json', 'console'], 2)
for index in xrange(2):
collector.process_shard_result(
index,
gen_result_response(
outputs_ref={
'isolated': str(index) * 40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
}))
summary = collector.finalize()
expected_calls = [
('0'*40, os.path.join(self.tempdir, '0')),
('1'*40, os.path.join(self.tempdir, '1')),
]
self.assertEqual(expected_calls, actual_calls)
# Ensure collected summary is correct.
outputs_refs = [
{
'isolated': '0'*40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '0'*40,
},
{
'isolated': '1'*40,
'isolatedserver': 'https://localhost:2',
'namespace': 'default',
'view_url':
'https://localhost:2/browse?namespace=default&hash=' + '1'*40,
},
]
expected = {
'shards': [gen_result_response(outputs_ref=o) for o in outputs_refs],
}
self.assertEqual(expected, summary)
# Ensure summary dumped to a file is correct as well.
with open(os.path.join(self.tempdir, 'summary.json'), 'r') as f:
summary_dump = json.load(f)
self.assertEqual(expected, summary_dump)
def test_ensures_same_server(self):
self.mock(logging, 'error', lambda *_: None)
# Two shard results, attempt to use different servers.
actual_calls = []
self.mock(
isolateserver, 'fetch_isolated',
lambda *args: actual_calls.append(args))
data = [
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server1',
'namespace': 'namespace',
'isolated':'hash1',
}),
gen_result_response(
outputs_ref={
'isolatedserver': 'https://server2',
'namespace': 'namespace',
'isolated':'hash1',
}),
]
# Feed them to collector.
collector = swarming.TaskOutputCollector(
self.tempdir, ['json', 'console'], 2)
for index, result in enumerate(data):
collector.process_shard_result(index, result)
collector.finalize()
# Only first fetch is made, second one is ignored.
self.assertEqual(1, len(actual_calls))
isolated_hash, storage, _, outdir, _ = actual_calls[0]
self.assertEqual(
('hash1', os.path.join(self.tempdir, '0')),
(isolated_hash, outdir))
self.assertEqual('https://server1', storage.location)
class TestMain(NetTestCase):
# Tests calling main().
def test_bot_delete(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/foo/delete',
{'method': 'POST', 'data': {}},
{},
),
])
ret = self.main_safe(
['bot_delete', '--swarming', 'https://localhost:1', 'foo', '--force'])
self._check_output('', '')
self.assertEqual(0, ret)
def test_trigger_raw_cmd(self):
# Minimalist use.
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--relative-cwd', 'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo=bar\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_raw_cmd_isolated(self):
# Minimalist use.
request = {
'name': u'None/foo=bar/' + FILE_HASH,
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
},
io_timeout_secs=1200),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--isolate-server', 'https://localhost:2',
'--isolated', FILE_HASH,
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/foo=bar/' + FILE_HASH + u'\n'
u'To collect results, use:\n'
u' swarming.py collect -S https://localhost:1 12300\n'
u'Or visit:\n'
u' https://localhost:1/user/task/12300\n',
u'')
def test_trigger_raw_cmd_with_service_account(self):
# Minimalist use.
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200),
},
],
'service_account': 'bot',
'tags': [],
'user': None,
}
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--service-account', 'bot',
'--raw-cmd',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: None/foo=bar\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_isolated_hash(self):
# pylint: disable=unused-argument
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--task-name', 'unit_tests',
'--isolated', FILE_HASH,
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_isolated_and_json(self):
# pylint: disable=unused-argument
write_json_calls = []
self.mock(tools, 'write_json', lambda *args: write_json_calls.append(args))
subprocess_calls = []
self.mock(subprocess42, 'call', lambda *c: subprocess_calls.append(c))
self.mock(swarming, 'now', lambda: 123456)
isolated = os.path.join(self.tempdir, 'zaz.isolated')
content = '{}'
with open(isolated, 'wb') as f:
f.write(content)
isolated_hash = isolateserver_mock.hash_content(content)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--idempotent',
'--task-name', 'unit_tests',
'--dump-json', 'foo.json',
'--isolated', isolated_hash,
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self.assertEqual([], subprocess_calls)
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 --json foo.json\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
expected = [
(
u'foo.json',
{
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(
idempotent=True,
inputs_ref={
'isolated': isolated_hash,
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
},
},
True,
),
]
self.assertEqual(expected, write_json_calls)
def test_trigger_cipd(self):
self.mock(swarming, 'now', lambda: 123456)
request = gen_request_data(
task_slices=[
{
'expiration_secs': 3600,
'properties': gen_properties(
cipd_input={
'client_package': None,
'packages': [
{
'package_name': 'super/awesome/pkg',
'path': 'path/to/pkg',
'version': 'version:42',
},
],
'server': None,
},
inputs_ref={
'isolated': u'1111111111111111111111111111111111111111',
'isolatedserver': 'https://localhost:2',
'namespace': 'default-gzip',
}),
},
])
result = gen_request_response(request)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'trigger',
'--swarming', 'https://localhost:1',
'--isolate-server', 'https://localhost:2',
'--shards', '1',
'--priority', '101',
'--dimension', 'foo', 'bar',
'--dimension', 'os', 'Mac',
'--expiration', '3600',
'--user', 'joe@localhost',
'--tags', 'tag:a',
'--tags', 'tag:b',
'--hard-timeout', '60',
'--io-timeout', '60',
'--task-name', 'unit_tests',
'--isolated', FILE_HASH,
'--cipd-package', 'path/to/pkg:super/awesome/pkg:version:42',
'--',
'--some-arg',
'123',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (actual, sys.stderr.getvalue()))
self._check_output(
'Triggered task: unit_tests\n'
'To collect results, use:\n'
' swarming.py collect -S https://localhost:1 12300\n'
'Or visit:\n'
' https://localhost:1/user/task/12300\n',
'')
def test_trigger_no_request(self):
with self.assertRaises(SystemExit):
main([
'trigger', '--swarming', 'https://host',
'--isolate-server', 'https://host', '-T', 'foo',
'-d', 'os', 'amgia',
])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]\n'
'\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_env_vars(self):
with self.assertRaises(SystemExit):
main(['trigger'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_swarming_env_var(self):
with self.assertRaises(SystemExit):
with test_utils.EnvVars({'ISOLATE_SERVER': 'https://host'}):
main(['trigger', '-T' 'foo', 'foo.isolated'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: --swarming is required.'
'\n')
def test_trigger_no_isolate_server(self):
with self.assertRaises(SystemExit):
with test_utils.EnvVars({'SWARMING_SERVER': 'https://host'}):
main(['trigger', 'foo.isolated', '-d', 'os', 'amiga'])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Specify at least one of --raw-cmd or --isolated '
'or both\n')
def test_trigger_no_dimension(self):
with self.assertRaises(SystemExit):
main([
'trigger', '--swarming', 'https://host', '--raw-cmd', '--', 'foo',
])
self._check_output(
'',
'Usage: swarming.py trigger [options] (hash|isolated) '
'[-- extra_args|raw command]'
'\n\n'
'swarming.py: error: Please at least specify one --dimension\n')
def test_collect_default_json(self):
j = os.path.join(self.tempdir, 'foo.json')
data = {
'base_task_name': 'unit_tests',
'tasks': {
'unit_tests': {
'shard_index': 0,
'task_id': '12300',
'view_url': 'https://localhost:1/user/task/12300',
}
},
'request': {
'name': 'unit_tests',
'parent_task_id': '',
'priority': 101,
'task_slices': [
{
'expiration_secs': 3600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
relative_cwd='deeep'),
},
],
'tags': ['tag:a', 'tag:b'],
'user': 'joe@localhost',
},
}
with open(j, 'wb') as f:
json.dump(data, f)
def stub_collect(
swarming_server, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir, task_output_stdout, include_perf):
self.assertEqual('https://host', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(3670., timeout)
self.assertEqual(True, decorate)
self.assertEqual(True, print_status_updates)
self.assertEqual('/a', task_summary_json)
self.assertEqual('/b', task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
print('Fake output')
self.mock(swarming, 'collect', stub_collect)
self.main_safe(
['collect', '--swarming', 'https://host', '--json', j, '--decorate',
'--print-status-updates', '--task-summary-json', '/a',
'--task-output-dir', '/b', '--task-output-stdout', 'all'])
self._check_output('Fake output\n', '')
def test_post(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
self.expected_requests(
[
(
'http://localhost:1/api/swarming/v1/tasks/new',
{'data': '{"a":"b"}', 'method': 'POST'},
'{"yo":"dawg"}',
{},
),
])
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(0, ret)
self.assertEqual('{"yo":"dawg"}', out.getvalue())
self.assertEqual('', err.getvalue())
def test_post_fail(self):
out = StringIO.StringIO()
err = StringIO.StringIO()
self.mock(sys, 'stdin', StringIO.StringIO('{"a":"b"}'))
self.mock(sys, 'stdout', out)
self.mock(sys, 'stderr', err)
ret = self.main_safe(['post', '-S', 'http://localhost:1', 'tasks/new'])
self.assertEqual(1, ret)
self.assertEqual('', out.getvalue())
self.assertEqual('No response!\n', err.getvalue())
def test_query_base(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?limit=200',
{},
{'yo': 'dawg'},
),
])
ret = self.main_safe(
[
'query', '--swarming', 'https://localhost:1', 'bot/botid/tasks',
])
self._check_output('{\n "yo": "dawg"\n}\n', '')
self.assertEqual(0, ret)
def test_query_cursor(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?'
'foo=bar&limit=2',
{},
{
'cursor': '%',
'extra': False,
'items': ['A'],
},
),
(
'https://localhost:1/api/swarming/v1/bot/botid/tasks?'
'foo=bar&cursor=%25&limit=1',
{},
{
'cursor': None,
'items': ['B'],
'ignored': True,
},
),
])
ret = self.main_safe(
[
'query', '--swarming', 'https://localhost:1',
'bot/botid/tasks?foo=bar',
'--limit', '2',
])
expected = (
'{\n'
' "extra": false, \n'
' "items": [\n'
' "A", \n'
' "B"\n'
' ]\n'
'}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_reproduce(self):
old_cwd = os.getcwd()
try:
os.chdir(self.tempdir)
def call(cmd, env, cwd):
w = os.path.abspath('work')
self.assertEqual([os.path.join(w, 'foo'), '--bar'], cmd)
expected = os.environ.copy()
expected['aa'] = 'bb'
expected['PATH'] = os.pathsep.join(
(os.path.join(w, 'foo', 'bar'), os.path.join(w, 'second'),
expected['PATH']))
expected['SWARMING_TASK_ID'] = 'reproduce'
expected['SWARMING_BOT_ID'] = 'reproduce'
self.assertEqual(expected, env)
self.assertEqual(unicode(w), cwd)
return 0
self.mock(subprocess42, 'call', call)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/123/request',
{},
{
'properties': {
'command': ['foo'],
'env': [
{'key': 'aa', 'value': 'bb'},
],
'env_prefixes': [
{'key': 'PATH', 'value': ['foo/bar', 'second']},
],
'secret_bytes': None,
},
},
),
])
ret = self.main_safe(
[
'reproduce', '--swarming', 'https://localhost:1', '123', '--',
'--bar',
])
self._check_output('', '')
self.assertEqual(0, ret)
finally:
os.chdir(old_cwd)
def test_run(self):
request = {
'name': u'None/foo=bar',
'parent_task_id': '',
'priority': 100,
'task_slices': [
{
'expiration_secs': 21600,
'properties': gen_properties(
command=['python', '-c', 'print(\'hi\')'],
dimensions=[{'key': 'foo', 'value': 'bar'}],
execution_timeout_secs=3600,
extra_args=None,
inputs_ref=None,
io_timeout_secs=1200,
relative_cwd='deeep'),
},
],
'tags': [],
'user': None,
}
result = gen_request_response(request)
def stub_collect(
swarming_server, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir, task_output_stdout, include_perf):
self.assertEqual('https://localhost:1', swarming_server)
self.assertEqual([u'12300'], task_ids)
# It is automatically calculated from hard timeout + expiration + 10.
self.assertEqual(25210., timeout)
self.assertEqual(None, decorate)
self.assertEqual(None, print_status_updates)
self.assertEqual(None, task_summary_json)
self.assertEqual(None, task_output_dir)
self.assertSetEqual(set(['console', 'json']), set(task_output_stdout))
self.assertEqual(False, include_perf)
print('Fake output')
return 0
self.mock(swarming, 'collect', stub_collect)
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/tasks/new',
{'data': request},
result,
),
])
ret = self.main_safe([
'run',
'--swarming', 'https://localhost:1',
'--dimension', 'foo', 'bar',
'--raw-cmd',
'--relative-cwd', 'deeep',
'--',
'python',
'-c',
'print(\'hi\')',
])
actual = sys.stdout.getvalue()
self.assertEqual(0, ret, (ret, actual, sys.stderr.getvalue()))
self._check_output(
u'Triggered task: None/foo=bar\nFake output\n', '')
def test_cancel(self):
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/10100/cancel',
{'data': {'kill_running': False}, 'method': 'POST'},
{'yo': 'dawg'},
),
])
ret = self.main_safe(
[
'cancel', '--swarming', 'https://localhost:1', '10100',
])
self._check_output('', '')
self.assertEqual(0, ret)
def test_collect_timeout_zero(self):
j = os.path.join(self.tempdir, 'foo.json')
pending = gen_result_response(state='PENDING')
self.expected_requests(
[
(
'https://localhost:1/api/swarming/v1/task/10100/result',
{'retry_50x': True},
pending,
),
])
self.main_safe(
[
'collect', '--swarming', 'https://localhost:1',
'--task-summary-json', j, '--timeout', '-1', '10100',
])
self._check_output('swarm6: 10100 0\n', '')
with open(j, 'r') as f:
actual = json.load(f)
self.assertEqual({u'shards': [pending]}, actual)
class TestCommandBot(NetTestCase):
# Specialized test fixture for command 'bot'.
def setUp(self):
super(TestCommandBot, self).setUp()
# Sample data retrieved from actual server.
self.now = unicode(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
self.bot_1 = {
u'bot_id': u'swarm1',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': []},
{u'key': u'id', u'value': [u'swarm1']},
{u'key': u'os', u'value': [u'Ubuntu', u'Ubuntu-12.04']},
],
u'external_ip': u'1.1.1.1',
u'hostname': u'swarm1.example.com',
u'internal_ip': u'192.168.0.1',
u'is_dead': True,
u'last_seen_ts': 'A long time ago',
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_2 = {
u'bot_id': u'swarm2',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': [
u'15ad',
u'15ad:0405',
u'VMware Virtual SVGA 3D Graphics Adapter',
]},
{u'key': u'id', u'value': [u'swarm2']},
{u'key': u'os', u'value': [u'Windows', u'Windows-6.1']},
],
u'external_ip': u'1.1.1.2',
u'hostname': u'swarm2.example.com',
u'internal_ip': u'192.168.0.2',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'',
u'task_name': None,
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_3 = {
u'bot_id': u'swarm3',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'4']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': [u'15ad', u'15ad:0405']},
{u'key': u'id', u'value': [u'swarm3']},
{u'key': u'os', u'value': [u'Mac', u'Mac-10.9']},
],
u'external_ip': u'1.1.1.3',
u'hostname': u'swarm3.example.com',
u'internal_ip': u'192.168.0.3',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'148569b73a89501',
u'task_name': u'browser_tests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
self.bot_4 = {
u'bot_id': u'swarm4',
u'created_ts': self.now,
u'dimensions': [
{u'key': u'cores', u'value': [u'8']},
{u'key': u'cpu', u'value': [u'x86', u'x86-64']},
{u'key': u'gpu', u'value': []},
{u'key': u'id', u'value': [u'swarm4']},
{u'key': u'os', u'value': [u'Ubuntu', u'Ubuntu-12.04']},
],
u'external_ip': u'1.1.1.4',
u'hostname': u'swarm4.example.com',
u'internal_ip': u'192.168.0.4',
u'is_dead': False,
u'last_seen_ts': self.now,
u'quarantined': False,
u'task_id': u'14856971a64c601',
u'task_name': u'base_unittests',
u'version': u'56918a2ea28a6f51751ad14cc086f118b8727905',
}
def mock_swarming_api(self, bots, cursor):
"""Returns fake /api/swarming/v1/bots/list data."""
# Sample data retrieved from actual server.
return {
u'items': bots,
u'cursor': cursor,
u'death_timeout': 1800.0,
u'limit': 4,
u'now': unicode(self.now),
}
def test_bots(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(['bots', '--swarming', 'https://localhost:1'])
expected = (
u'swarm2\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": '
'["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics Adapter"], '
'"id": ["swarm2"], "os": ["Windows", "Windows-6.1"]}\n'
'swarm3\n'
' {"cores": ["4"], "cpu": ["x86", "x86-64"], "gpu": ["15ad", '
'"15ad:0405"], "id": ["swarm3"], "os": ["Mac", "Mac-10.9"]}\n'
u' task: 148569b73a89501\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_bare(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_2], 'opaque'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque',
{},
self.mock_swarming_api([self.bot_3], 'opaque2'),
),
(
base_url + 'is_dead=FALSE&is_busy=NONE&is_mp=NONE&cursor=opaque2',
{},
self.mock_swarming_api([self.bot_4], None),
),
])
ret = self.main_safe(
['bots', '--swarming', 'https://localhost:1', '--bare'])
self._check_output("swarm2\nswarm3\nswarm4\n", '')
self.assertEqual(0, ret)
def test_bots_filter(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url +
'is_dead=FALSE&is_busy=TRUE&is_mp=NONE&dimensions=os%3AWindows',
{},
self.mock_swarming_api([self.bot_2], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--busy',
'--dimension', 'os', 'Windows',
])
expected = (
u'swarm2\n {"cores": ["8"], "cpu": ["x86", "x86-64"], '
'"gpu": ["15ad", "15ad:0405", "VMware Virtual SVGA 3D Graphics '
'Adapter"], "id": ["swarm2"], '
'"os": ["Windows", "Windows-6.1"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_keep_dead(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url + 'is_dead=NONE&is_busy=NONE&is_mp=NONE',
{},
self.mock_swarming_api([self.bot_1, self.bot_4], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--keep-dead',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u'swarm4\n'
u' {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm4"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n'
u' task: 14856971a64c601\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
def test_bots_filter_dead_only(self):
base_url = 'https://localhost:1/api/swarming/v1/bots/list?'
self.expected_requests(
[
(
base_url +
'is_dead=TRUE&is_busy=NONE&is_mp=NONE&dimensions=os%3AUbuntu',
{},
self.mock_swarming_api([self.bot_1], None),
),
])
ret = self.main_safe(
[
'bots', '--swarming', 'https://localhost:1',
'--dimension', 'os', 'Ubuntu', '--dead-only',
])
expected = (
u'swarm1\n {"cores": ["8"], "cpu": ["x86", "x86-64"], "gpu": [], '
'"id": ["swarm1"], "os": ["Ubuntu", "Ubuntu-12.04"]}\n')
self._check_output(expected, '')
self.assertEqual(0, ret)
if __name__ == '__main__':
fix_encoding.fix_encoding()
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
for e in ('ISOLATE_SERVER', 'SWARMING_TASK_ID', 'SWARMING_SERVER'):
os.environ.pop(e, None)
unittest.main()
| 31.401368 | 80 | 0.526184 | [
"BSD-3-Clause"
] | FLOSSBoxIN/src | tools/swarming_client/tests/swarming_test.py | 59,694 | Python |
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to generate gperf tables of kernel object metadata
User mode threads making system calls reference kernel objects by memory
address, as the kernel/driver APIs in Zephyr are the same for both user
and supervisor contexts. It is necessary for the kernel to be able to
validate accesses to kernel objects to make the following assertions:
- That the memory address points to a kernel object
- The kernel object is of the expected type for the API being invoked
- The kernel object is of the expected initialization state
- The calling thread has sufficient permissions on the object
For more details see the :ref:`kernelobjects` section in the documentation.
The zephyr build generates an intermediate ELF binary, zephyr_prebuilt.elf,
which this script scans looking for kernel objects by examining the DWARF
debug information to look for instances of data structures that are considered
kernel objects. For device drivers, the API struct pointer populated at build
time is also examined to disambiguate between various device driver instances
since they are all 'struct device'.
This script can generate five different output files:
- A gperf script to generate the hash table mapping kernel object memory
addresses to kernel object metadata, used to track permissions,
object type, initialization state, and any object-specific data.
- A header file containing generated macros for validating driver instances
inside the system call handlers for the driver subsystem APIs.
- A code fragment included by kernel.h with one enum constant for
each kernel object type and each driver instance.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping the kernel object types and driver
instances to their human-readable representation in the
otype_to_str() function.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping kernel object types to their sizes.
This is used for allocating instances of them at runtime
(CONFIG_DYNAMIC_OBJECTS) in the obj_size_get() function.
"""
import sys
import argparse
import math
import os
import struct
import json
from distutils.version import LooseVersion
import elftools
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
if LooseVersion(elftools.__version__) < LooseVersion('0.24'):
sys.exit("pyelftools is out of date, need version 0.24 or later")
from collections import OrderedDict
# Keys in this dictionary are structs which should be recognized as kernel
# objects. Values are a tuple:
#
# - The first item is None, or the name of a Kconfig that
# indicates the presence of this object's definition in case it is not
# available in all configurations.
#
# - The second item is a boolean indicating whether it is permissible for
# the object to be located in user-accessible memory.
# Regular dictionaries are ordered only with Python 3.6 and
# above. Good summary and pointers to official documents at:
# https://stackoverflow.com/questions/39980323/are-dictionaries-ordered-in-python-3-6
kobjects = OrderedDict([
("k_mem_slab", (None, False)),
("k_msgq", (None, False)),
("k_mutex", (None, False)),
("k_pipe", (None, False)),
("k_queue", (None, False)),
("k_poll_signal", (None, False)),
("k_sem", (None, False)),
("k_stack", (None, False)),
("k_thread", (None, False)),
("k_timer", (None, False)),
("z_thread_stack_element", (None, False)),
("device", (None, False)),
("sys_mutex", (None, True)),
("k_futex", (None, True))
])
def kobject_to_enum(kobj):
if kobj.startswith("k_") or kobj.startswith("z_"):
name = kobj[2:]
else:
name = kobj
return "K_OBJ_%s" % name.upper()
subsystems = [
# Editing the list is deprecated, add the __subsystem sentinal to your driver
# api declaration instead. e.x.
#
# __subsystem struct my_driver_api {
# ....
#};
]
def subsystem_to_enum(subsys):
return "K_OBJ_DRIVER_" + subsys[:-11].upper()
# --- debug stuff ---
scr = os.path.basename(sys.argv[0])
def debug(text):
if not args.verbose:
return
sys.stdout.write(scr + ": " + text + "\n")
def error(text):
sys.exit("%s ERROR: %s" % (scr, text))
def debug_die(die, text):
if 'DW_AT_decl_file' not in die.attributes:
abs_orig_val = die.attributes["DW_AT_abstract_origin"].value
offset = abs_orig_val + die.cu.cu_offset
for var in variables:
if var.offset == offset:
die = var
break
lp_header = die.dwarfinfo.line_program_for_CU(die.cu).header
files = lp_header["file_entry"]
includes = lp_header["include_directory"]
fileinfo = files[die.attributes["DW_AT_decl_file"].value - 1]
filename = fileinfo.name.decode("utf-8")
filedir = includes[fileinfo.dir_index - 1].decode("utf-8")
path = os.path.join(filedir, filename)
lineno = die.attributes["DW_AT_decl_line"].value
debug(str(die))
debug("File '%s', line %d:" % (path, lineno))
debug(" %s" % text)
# -- ELF processing
DW_OP_addr = 0x3
DW_OP_fbreg = 0x91
STACK_TYPE = "z_thread_stack_element"
thread_counter = 0
sys_mutex_counter = 0
futex_counter = 0
stack_counter = 0
# Global type environment. Populated by pass 1.
type_env = {}
extern_env = {}
variables = []
class KobjectInstance:
def __init__(self, type_obj, addr):
global thread_counter
global sys_mutex_counter
global futex_counter
global stack_counter
self.addr = addr
self.type_obj = type_obj
# Type name determined later since drivers needs to look at the
# API struct address
self.type_name = None
if self.type_obj.name == "k_thread":
# Assign an ID for this thread object, used to track its
# permissions to other kernel objects
self.data = thread_counter
thread_counter = thread_counter + 1
elif self.type_obj.name == "sys_mutex":
self.data = "&kernel_mutexes[%d]" % sys_mutex_counter
sys_mutex_counter += 1
elif self.type_obj.name == "k_futex":
self.data = "&futex_data[%d]" % futex_counter
futex_counter += 1
elif self.type_obj.name == STACK_TYPE:
stack_counter += 1
else:
self.data = 0
class KobjectType:
def __init__(self, offset, name, size, api=False):
self.name = name
self.size = size
self.offset = offset
self.api = api
def __repr__(self):
return "<kobject %s>" % self.name
@staticmethod
def has_kobject():
return True
def get_kobjects(self, addr):
return {addr: KobjectInstance(self, addr)}
class ArrayType:
def __init__(self, offset, elements, member_type):
self.elements = elements
self.member_type = member_type
self.offset = offset
def __repr__(self):
return "<array of %d>" % self.member_type
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
# Stacks are arrays of _k_stack_element_t but we want to treat
# the whole array as one kernel object (a thread stack)
# Data value gets set to size of entire region
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
# An array of stacks appears as a multi-dimensional array.
# The last size is the size of each stack. We need to track
# each stack within the array, not as one huge stack object.
*dimensions, stacksize = self.elements
num_members = 1
for e in dimensions:
num_members = num_members * e
ret = {}
for i in range(num_members):
a = addr + (i * stacksize)
o = mt.get_kobjects(a)
o[a].data = stacksize
ret.update(o)
return ret
objs = {}
# Multidimensional array flattened out
num_members = 1
for e in self.elements:
num_members = num_members * e
for i in range(num_members):
objs.update(mt.get_kobjects(addr + (i * mt.size)))
return objs
class AggregateTypeMember:
def __init__(self, offset, member_name, member_type, member_offset):
self.member_name = member_name
self.member_type = member_type
if isinstance(member_offset, list):
# DWARF v2, location encoded as set of operations
# only "DW_OP_plus_uconst" with ULEB128 argument supported
if member_offset[0] == 0x23:
self.member_offset = member_offset[1] & 0x7f
for i in range(1, len(member_offset)-1):
if member_offset[i] & 0x80:
self.member_offset += (
member_offset[i+1] & 0x7f) << i*7
else:
raise Exception("not yet supported location operation (%s:%d:%d)" %
(self.member_name, self.member_type, member_offset[0]))
else:
self.member_offset = member_offset
def __repr__(self):
return "<member %s, type %d, offset %d>" % (
self.member_name, self.member_type, self.member_offset)
def has_kobject(self):
if self.member_type not in type_env:
return False
return type_env[self.member_type].has_kobject()
def get_kobjects(self, addr):
mt = type_env[self.member_type]
return mt.get_kobjects(addr + self.member_offset)
class ConstType:
def __init__(self, child_type):
self.child_type = child_type
def __repr__(self):
return "<const %d>" % self.child_type
def has_kobject(self):
if self.child_type not in type_env:
return False
return type_env[self.child_type].has_kobject()
def get_kobjects(self, addr):
return type_env[self.child_type].get_kobjects(addr)
class AggregateType:
def __init__(self, offset, name, size):
self.name = name
self.size = size
self.offset = offset
self.members = []
def add_member(self, member):
self.members.append(member)
def __repr__(self):
return "<struct %s, with %s>" % (self.name, self.members)
def has_kobject(self):
result = False
bad_members = []
for member in self.members:
if member.has_kobject():
result = True
else:
bad_members.append(member)
# Don't need to consider this again, just remove it
for bad_member in bad_members:
self.members.remove(bad_member)
return result
def get_kobjects(self, addr):
objs = {}
for member in self.members:
objs.update(member.get_kobjects(addr))
return objs
# --- helper functions for getting data from DIEs ---
def die_get_spec(die):
if 'DW_AT_specification' not in die.attributes:
return None
spec_val = die.attributes["DW_AT_specification"].value
# offset of the DW_TAG_variable for the extern declaration
offset = spec_val + die.cu.cu_offset
return extern_env.get(offset)
def die_get_name(die):
if 'DW_AT_name' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_name"].value.decode("utf-8")
def die_get_type_offset(die):
if 'DW_AT_type' not in die.attributes:
die = die_get_spec(die)
if not die:
return None
return die.attributes["DW_AT_type"].value + die.cu.cu_offset
def die_get_byte_size(die):
if 'DW_AT_byte_size' not in die.attributes:
return 0
return die.attributes["DW_AT_byte_size"].value
def analyze_die_struct(die):
name = die_get_name(die) or "<anon>"
offset = die.offset
size = die_get_byte_size(die)
# Incomplete type
if not size:
return
if name in kobjects:
type_env[offset] = KobjectType(offset, name, size)
elif name in subsystems:
type_env[offset] = KobjectType(offset, name, size, api=True)
else:
at = AggregateType(offset, name, size)
type_env[offset] = at
for child in die.iter_children():
if child.tag != "DW_TAG_member":
continue
data_member_location = child.attributes.get("DW_AT_data_member_location")
if not data_member_location:
continue
child_type = die_get_type_offset(child)
member_offset = data_member_location.value
cname = die_get_name(child) or "<anon>"
m = AggregateTypeMember(child.offset, cname, child_type,
member_offset)
at.add_member(m)
return
def analyze_die_const(die):
type_offset = die_get_type_offset(die)
if not type_offset:
return
type_env[die.offset] = ConstType(type_offset)
def analyze_die_array(die):
type_offset = die_get_type_offset(die)
elements = []
for child in die.iter_children():
if child.tag != "DW_TAG_subrange_type":
continue
if "DW_AT_upper_bound" not in child.attributes:
continue
ub = child.attributes["DW_AT_upper_bound"]
if not ub.form.startswith("DW_FORM_data"):
continue
elements.append(ub.value + 1)
if not elements:
if type_offset in type_env.keys():
mt = type_env[type_offset]
if mt.has_kobject():
if isinstance(mt, KobjectType) and mt.name == STACK_TYPE:
elements.append(1)
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
else:
type_env[die.offset] = ArrayType(die.offset, elements, type_offset)
def analyze_typedef(die):
type_offset = die_get_type_offset(die)
if type_offset not in type_env.keys():
return
type_env[die.offset] = type_env[type_offset]
def unpack_pointer(elf, data, offset):
endian_code = "<" if elf.little_endian else ">"
if elf.elfclass == 32:
size_code = "I"
size = 4
else:
size_code = "Q"
size = 8
return struct.unpack(endian_code + size_code,
data[offset:offset + size])[0]
def addr_deref(elf, addr):
for section in elf.iter_sections():
start = section['sh_addr']
end = start + section['sh_size']
if start <= addr < end:
data = section.data()
offset = addr - start
return unpack_pointer(elf, data, offset)
return 0
def device_get_api_addr(elf, addr):
# See include/device.h for a description of struct device
offset = 8 if elf.elfclass == 32 else 16
return addr_deref(elf, addr + offset)
def find_kobjects(elf, syms):
if not elf.has_dwarf_info():
sys.exit("ELF file has no DWARF information")
app_smem_start = syms["_app_smem_start"]
app_smem_end = syms["_app_smem_end"]
di = elf.get_dwarf_info()
# Step 1: collect all type information.
for CU in di.iter_CUs():
for die in CU.iter_DIEs():
# Unions are disregarded, kernel objects should never be union
# members since the memory is not dedicated to that object and
# could be something else
if die.tag == "DW_TAG_structure_type":
analyze_die_struct(die)
elif die.tag == "DW_TAG_const_type":
analyze_die_const(die)
elif die.tag == "DW_TAG_array_type":
analyze_die_array(die)
elif die.tag == "DW_TAG_typedef":
analyze_typedef(die)
elif die.tag == "DW_TAG_variable":
variables.append(die)
# Step 2: filter type_env to only contain kernel objects, or structs
# and arrays of kernel objects
bad_offsets = []
for offset, type_object in type_env.items():
if not type_object.has_kobject():
bad_offsets.append(offset)
for offset in bad_offsets:
del type_env[offset]
# Step 3: Now that we know all the types we are looking for, examine
# all variables
all_objs = {}
for die in variables:
name = die_get_name(die)
if not name:
continue
if name.startswith("__init_sys_init"):
# Boot-time initialization function; not an actual device
continue
type_offset = die_get_type_offset(die)
# Is this a kernel object, or a structure containing kernel
# objects?
if type_offset not in type_env:
continue
if "DW_AT_declaration" in die.attributes:
# Extern declaration, only used indirectly
extern_env[die.offset] = die
continue
if "DW_AT_location" not in die.attributes:
debug_die(die,
"No location information for object '%s'; possibly stack allocated"
% name)
continue
loc = die.attributes["DW_AT_location"]
if loc.form != "DW_FORM_exprloc" and \
loc.form != "DW_FORM_block1":
debug_die(die, "kernel object '%s' unexpected location format" %
name)
continue
opcode = loc.value[0]
if opcode != DW_OP_addr:
# Check if frame pointer offset DW_OP_fbreg
if opcode == DW_OP_fbreg:
debug_die(die, "kernel object '%s' found on stack" % name)
else:
debug_die(die,
"kernel object '%s' unexpected exprloc opcode %s" %
(name, hex(opcode)))
continue
addr = (loc.value[1] | (loc.value[2] << 8) |
(loc.value[3] << 16) | (loc.value[4] << 24))
if addr == 0:
# Never linked; gc-sections deleted it
continue
type_obj = type_env[type_offset]
objs = type_obj.get_kobjects(addr)
all_objs.update(objs)
debug("symbol '%s' at %s contains %d object(s)"
% (name, hex(addr), len(objs)))
# Step 4: objs is a dictionary mapping variable memory addresses to
# their associated type objects. Now that we have seen all variables
# and can properly look up API structs, convert this into a dictionary
# mapping variables to the C enumeration of what kernel object type it
# is.
ret = {}
for addr, ko in all_objs.items():
# API structs don't get into the gperf table
if ko.type_obj.api:
continue
_, user_ram_allowed = kobjects[ko.type_obj.name]
if not user_ram_allowed and app_smem_start <= addr < app_smem_end:
debug_die(die, "object '%s' found in invalid location %s"
% (name, hex(addr)))
continue
if ko.type_obj.name != "device":
# Not a device struct so we immediately know its type
ko.type_name = kobject_to_enum(ko.type_obj.name)
ret[addr] = ko
continue
# Device struct. Need to get the address of its API struct,
# if it has one.
apiaddr = device_get_api_addr(elf, addr)
if apiaddr not in all_objs:
if apiaddr == 0:
debug("device instance at 0x%x has no associated subsystem"
% addr)
else:
debug("device instance at 0x%x has unknown API 0x%x"
% (addr, apiaddr))
# API struct does not correspond to a known subsystem, skip it
continue
apiobj = all_objs[apiaddr]
ko.type_name = subsystem_to_enum(apiobj.type_obj.name)
ret[addr] = ko
debug("found %d kernel object instances total" % len(ret))
# 1. Before python 3.7 dict order is not guaranteed. With Python
# 3.5 it doesn't seem random with *integer* keys but can't
# rely on that.
# 2. OrderedDict means _insertion_ order, so not enough because
# built from other (random!) dicts: need to _sort_ first.
# 3. Sorting memory address looks good.
return OrderedDict(sorted(ret.items()))
def get_symbols(elf):
for section in elf.iter_sections():
if isinstance(section, SymbolTableSection):
return {sym.name: sym.entry.st_value
for sym in section.iter_symbols()}
raise LookupError("Could not find symbol table")
# -- GPERF generation logic
header = """%compare-lengths
%define lookup-function-name z_object_lookup
%language=ANSI-C
%global-table
%struct-type
%{
#include <kernel.h>
#include <toolchain.h>
#include <syscall_handler.h>
#include <string.h>
%}
struct z_object;
"""
# Different versions of gperf have different prototypes for the lookup
# function, best to implement the wrapper here. The pointer value itself is
# turned into a string, we told gperf to expect binary strings that are not
# NULL-terminated.
footer = """%%
struct z_object *z_object_gperf_find(void *obj)
{
return z_object_lookup((const char *)obj, sizeof(void *));
}
void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
int i;
for (i = MIN_HASH_VALUE; i <= MAX_HASH_VALUE; i++) {
if (wordlist[i].name != NULL) {
func(&wordlist[i], context);
}
}
}
#ifndef CONFIG_DYNAMIC_OBJECTS
struct z_object *z_object_find(void *obj)
ALIAS_OF(z_object_gperf_find);
void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
ALIAS_OF(z_object_gperf_wordlist_foreach);
#endif
"""
def write_gperf_table(fp, syms, objs, little_endian, static_begin, static_end):
fp.write(header)
if sys_mutex_counter != 0:
fp.write("static struct k_mutex kernel_mutexes[%d] = {\n"
% sys_mutex_counter)
for i in range(sys_mutex_counter):
fp.write("Z_MUTEX_INITIALIZER(kernel_mutexes[%d])" % i)
if i != sys_mutex_counter - 1:
fp.write(", ")
fp.write("};\n")
if futex_counter != 0:
fp.write("static struct z_futex_data futex_data[%d] = {\n"
% futex_counter)
for i in range(futex_counter):
fp.write("Z_FUTEX_DATA_INITIALIZER(futex_data[%d])" % i)
if i != futex_counter - 1:
fp.write(", ")
fp.write("};\n")
metadata_names = {
"K_OBJ_THREAD" : "thread_id",
"K_OBJ_SYS_MUTEX" : "mutex",
"K_OBJ_FUTEX" : "futex_data"
}
if "CONFIG_GEN_PRIV_STACKS" in syms:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_data"
if stack_counter != 0:
fp.write("static u8_t Z_GENERIC_SECTION(.priv_stacks.noinit) "
" __aligned(Z_PRIVILEGE_STACK_ALIGN)"
" priv_stacks[%d][CONFIG_PRIVILEGED_STACK_SIZE];\n"
% stack_counter)
fp.write("static struct z_stack_data stack_data[%d] = {\n"
% stack_counter)
counter = 0
for _, ko in objs.items():
if ko.type_name != "K_OBJ_THREAD_STACK_ELEMENT":
continue
# ko.data currently has the stack size. fetch the value to
# populate the appropriate entry in stack_data, and put
# a reference to the entry in stack_data into the data value
# instead
size = ko.data
ko.data = "&stack_data[%d]" % counter
fp.write("\t{ %d, (u8_t *)(&priv_stacks[%d]) }"
% (size, counter))
if counter != (stack_counter - 1):
fp.write(",")
fp.write("\n")
counter += 1
fp.write("};\n")
else:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_size"
fp.write("%%\n")
# Setup variables for mapping thread indexes
thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"]
thread_idx_map = {}
for i in range(0, thread_max_bytes):
thread_idx_map[i] = 0xFF
for obj_addr, ko in objs.items():
obj_type = ko.type_name
# pre-initialized objects fall within this memory range, they are
# either completely initialized at build time, or done automatically
# at boot during some PRE_KERNEL_* phase
initialized = static_begin <= obj_addr < static_end
is_driver = obj_type.startswith("K_OBJ_DRIVER_")
if "CONFIG_64BIT" in syms:
format_code = "Q"
else:
format_code = "I"
if little_endian:
endian = "<"
else:
endian = ">"
byte_str = struct.pack(endian + format_code, obj_addr)
fp.write("\"")
for byte in byte_str:
val = "\\x%02x" % byte
fp.write(val)
flags = "0"
if initialized:
flags += " | K_OBJ_FLAG_INITIALIZED"
if is_driver:
flags += " | K_OBJ_FLAG_DRIVER"
if ko.type_name in metadata_names:
tname = metadata_names[ko.type_name]
else:
tname = "unused"
fp.write("\", {}, %s, %s, { .%s = %s }\n" % (obj_type, flags,
tname, str(ko.data)))
if obj_type == "K_OBJ_THREAD":
idx = math.floor(ko.data / 8)
bit = ko.data % 8
thread_idx_map[idx] = thread_idx_map[idx] & ~(2**bit)
fp.write(footer)
# Generate the array of already mapped thread indexes
fp.write('\n')
fp.write('Z_GENERIC_SECTION(.kobject_data.data) ')
fp.write('u8_t _thread_idx_map[%d] = {' % (thread_max_bytes))
for i in range(0, thread_max_bytes):
fp.write(' 0x%x, ' % (thread_idx_map[i]))
fp.write('};\n')
driver_macro_tpl = """
#define Z_SYSCALL_DRIVER_%(driver_upper)s(ptr, op) Z_SYSCALL_DRIVER_GEN(ptr, op, %(driver_lower)s, %(driver_upper)s)
"""
def write_validation_output(fp):
fp.write("#ifndef DRIVER_VALIDATION_GEN_H\n")
fp.write("#define DRIVER_VALIDATION_GEN_H\n")
fp.write("""#define Z_SYSCALL_DRIVER_GEN(ptr, op, driver_lower_case, driver_upper_case) \\
(Z_SYSCALL_OBJ(ptr, K_OBJ_DRIVER_##driver_upper_case) || \\
Z_SYSCALL_DRIVER_OP(ptr, driver_lower_case##_driver_api, op))
""")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write(driver_macro_tpl % {
"driver_lower": subsystem.lower(),
"driver_upper": subsystem.upper(),
})
fp.write("#endif /* DRIVER_VALIDATION_GEN_H */\n")
def write_kobj_types_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write("%s,\n" % kobject_to_enum(kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "").upper()
fp.write("K_OBJ_DRIVER_%s,\n" % subsystem)
def write_kobj_otype_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = "%s"; break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write('case K_OBJ_DRIVER_%s: ret = "%s driver"; break;\n' % (
subsystem.upper(),
subsystem
))
def write_kobj_size_output(fp):
fp.write("/* Non device/stack objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
# device handled by default case. Stacks are not currently handled,
# if they eventually are it will be a special case.
if kobj in {"device", STACK_TYPE}:
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = sizeof(struct %s); break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
def parse_subsystems_list_file(path):
with open(path, "r") as fp:
subsys_list = json.load(fp)
subsystems.extend(subsys_list)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=False,
help="Input zephyr ELF binary")
parser.add_argument(
"-g", "--gperf-output", required=False,
help="Output list of kernel object addresses for gperf use")
parser.add_argument(
"-V", "--validation-output", required=False,
help="Output driver validation macros")
parser.add_argument(
"-K", "--kobj-types-output", required=False,
help="Output k_object enum constants")
parser.add_argument(
"-S", "--kobj-otype-output", required=False,
help="Output case statements for otype_to_str()")
parser.add_argument(
"-Z", "--kobj-size-output", required=False,
help="Output case statements for obj_size_get()")
parser.add_argument("-i", "--include-subsystem-list", required=False, action='append',
help='''Specifies a file with a JSON encoded list of subsystem names to append to
the driver subsystems list. Can be specified multiple times:
-i file1 -i file2 ...''')
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
def main():
parse_args()
if args.include_subsystem_list is not None:
for list_file in args.include_subsystem_list:
parse_subsystems_list_file(list_file)
if args.gperf_output:
assert args.kernel, "--kernel ELF required for --gperf-output"
elf = ELFFile(open(args.kernel, "rb"))
syms = get_symbols(elf)
max_threads = syms["CONFIG_MAX_THREAD_BYTES"] * 8
objs = find_kobjects(elf, syms)
if not objs:
sys.stderr.write("WARNING: zero kobject found in %s\n"
% args.kernel)
if thread_counter > max_threads:
sys.exit("Too many thread objects ({})\n"
"Increase CONFIG_MAX_THREAD_BYTES to {}"
.format(thread_counter, -(-thread_counter // 8)))
with open(args.gperf_output, "w") as fp:
write_gperf_table(fp, syms, objs, elf.little_endian,
syms["_static_kernel_objects_begin"],
syms["_static_kernel_objects_end"])
if args.validation_output:
with open(args.validation_output, "w") as fp:
write_validation_output(fp)
if args.kobj_types_output:
with open(args.kobj_types_output, "w") as fp:
write_kobj_types_output(fp)
if args.kobj_otype_output:
with open(args.kobj_otype_output, "w") as fp:
write_kobj_otype_output(fp)
if args.kobj_size_output:
with open(args.kobj_size_output, "w") as fp:
write_kobj_size_output(fp)
if __name__ == "__main__":
main()
| 31.736634 | 116 | 0.613621 | [
"Apache-2.0"
] | TTJO/zephyr | scripts/gen_kobject_list.py | 32,054 | Python |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training script for UNet-3D."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from hyperparameters import params_dict
import input_reader
import tpu_executor
import unet_config
import unet_model
tpu_executor.define_tpu_flags()
flags.DEFINE_string(
'mode', 'train', 'Mode to run: train or eval or train_and_eval '
'(default: train)')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string('training_file_pattern', '', 'Location of the train data.')
flags.DEFINE_string('eval_file_pattern', '', 'Location of ther eval data')
flags.DEFINE_string('config_file', '', 'a YAML file which specifies overrides.')
flags.DEFINE_string('params_override', '',
'A JSON-style string that specifies overrides.')
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def run_executer(params,
train_input_shapes=None, eval_input_shapes=None,
train_input_fn=None, eval_input_fn=None):
"""Runs Mask RCNN model on distribution strategy defined by the user."""
executer = tpu_executor.TPUEstimatorExecuter(
unet_model.unet_model_fn, params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes)
if FLAGS.mode == 'train':
assert train_input_fn is not None
results = executer.train(train_input_fn)
elif FLAGS.mode == 'eval':
assert eval_input_fn is not None
results = executer.evaluate(eval_input_fn)
elif FLAGS.mode == 'train_and_eval':
assert train_input_fn is not None
assert eval_input_fn is not None
results = executer.train_and_eval(train_input_fn, eval_input_fn)
else:
raise ValueError('Mode must be one of `train`, `eval`, or `train_and_eval`')
return results
def main(argv):
del argv # Unused.
params = params_dict.ParamsDict(unet_config.UNET_CONFIG,
unet_config.UNET_RESTRICTIONS)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=False)
if FLAGS.training_file_pattern:
params.override({'training_file_pattern': FLAGS.training_file_pattern},
is_strict=True)
if FLAGS.eval_file_pattern:
params.override({'eval_file_pattern': FLAGS.eval_file_pattern},
is_strict=True)
train_epoch_steps = params.train_item_count // params.train_batch_size
eval_epoch_steps = params.eval_item_count // params.eval_batch_size
params.override(
{
'model_dir': FLAGS.model_dir,
'min_eval_interval': FLAGS.min_eval_interval,
'eval_timeout': FLAGS.eval_timeout,
'tpu_config': tpu_executor.get_tpu_flags(),
'lr_decay_steps': train_epoch_steps,
'train_steps': params.train_epochs * train_epoch_steps,
'eval_steps': eval_epoch_steps,
},
is_strict=False)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
train_input_fn = None
eval_input_fn = None
train_input_shapes = None
eval_input_shapes = None
if FLAGS.mode in ('train', 'train_and_eval'):
train_input_fn = input_reader.LiverInputFn(
params.training_file_pattern, params, mode=tf_estimator.ModeKeys.TRAIN)
train_input_shapes = train_input_fn.get_input_shapes(params)
if FLAGS.mode in ('eval', 'train_and_eval'):
eval_input_fn = input_reader.LiverInputFn(
params.eval_file_pattern, params, mode=tf_estimator.ModeKeys.EVAL)
eval_input_shapes = eval_input_fn.get_input_shapes(params)
assert train_input_shapes is not None or eval_input_shapes is not None
run_executer(params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn)
if __name__ == '__main__':
tf.disable_v2_behavior()
app.run(main)
| 35.928058 | 80 | 0.718662 | [
"Apache-2.0"
] | tensorflow/tpu-demos | models/official/unet3d/unet_main.py | 4,994 | Python |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy.random import uniform
from fairseq import options, utils
from fairseq.models import (
FairseqEncoder,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from bert import BertTokenizer
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
from bert import BertModel
@register_model('transformer')
class TransformerModel(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
@register_model('transformers2')
class TransformerS2Model(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerS2Encoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., input feeding/teacher
forcing) to the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad())
bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask)
bert_encoder_out = bert_encoder_out[self.bert_output_layer]
if self.mask_cls_sep:
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls())
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep())
bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous()
bert_encoder_out = {
'bert_encoder_out': bert_encoder_out,
'bert_encoder_padding_mask': bert_encoder_padding_mask,
}
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs)
return decoder_out
@register_model('transformerstack')
class TransformerModelStack(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False):
super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if len(task.datasets) > 0:
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoderStack(args, tgt_dict, embed_tokens)
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.layer_norm(x)
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, bert_outs, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if bert_outs['bert_encoder_out'] is not None:
bert_outs['bert_encoder_out'] = \
bert_outs['bert_encoder_out'].index_select(1, new_order)
if bert_outs['bert_encoder_padding_mask'] is not None:
bert_outs['bert_encoder_padding_mask'] = \
bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)
return encoder_out, bert_outs
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerS2Encoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.output_mask = nn.Softmax(dim = 0)
self.t_layer = nn.Linear(512, 1)
self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings)
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])
bert_gates = [x == 1 for x in bert_gates]
assert len(bert_gates) == args.encoder_layers
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerS2EncoderLayer(args, bert_gate=bert_gates[i])
for i in range(args.encoder_layers)
])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim))))
self.mask_layers = nn.ModuleList([])
self.mask_layers.extend([
TransformerEncoderLayer(args)
for i in range(2)
])
if args.encoder_normalize_before:
self.mask_layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
'''
self.x = None
self.unmask_output = None
self.mask_output = None
self.encoder_vocab_output = None
self.backwards = 0
'''
self.i = 0
def forward(self, src_tokens, src_lengths, bert_encoder_out):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
# T x B mask model
###########
###########
###########
'''
mask_output = self.mask(src_tokens , x)
p = mask_output
p = p.transpose(0, 1)
t_p = torch.argsort(p,dim=1)
ratio = 0.2
self.ratio = ratio
p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p))
self.p_mask = p_mask
p_mask = p_mask.unsqueeze(-1).transpose(0,1)
self.mask_output = p
if self.training:
x = x * p_mask.detach()
else:
x = x
###########
###########
###########
# t_p[t_p>t_p.size*ratio] = 1
# t_p[t_p<=t_p.size*ratio] = 0
# t_p.permute(1,0)
# model.encoder.mask_output
'''
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])
if self.layer_norm:
x = self.layer_norm(x)
# if self.training:
'''
self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out)
'''
'''
##########################
if self.i%1==0:
import scipy.io as scio
self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out)
scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()})
self.i+=1
########################
'''
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
self.src_tokens = src_tokens
x = self.embed_scale * self.embed_tokens(src_tokens)
'''
ratio = 0.3
mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False)
if mask is not None:
'''
'''
if x.size(1)<10:
mask = [4]
else:
mask = [7,9]
x[:, mask] = self.mask_embedding
'''
mask_output = self.mask(src_tokens , x)
p = mask_output
p = p
t_p = torch.argsort(p,dim=1)
ratio = 0.2
self.ratio = ratio
p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p))
self.p_mask = p_mask
p_mask = p_mask.unsqueeze(-1)
self.mask_output = p
x = x * p_mask.detach()
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'])
if self.layer_norm:
x = self.layer_norm(x)
encoder_vocab_output = self.output_vocab_linear(x)
self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1)
self.token = src_tokens
return encoder_vocab_output
def mask(self, src_tokens, x):
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.mask_layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.mask_layer_norm(x)
x = self.t_layer(x).squeeze(-1)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf')))
return self.output_mask(x).transpose(0, 1)
def reorder_encoder_out(self, encoder_out, bert_outs, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
if bert_outs['bert_encoder_out'] is not None:
bert_outs['bert_encoder_out'] = \
bert_outs['bert_encoder_out'].index_select(1, new_order)
if bert_outs['bert_encoder_padding_mask'] is not None:
bert_outs['bert_encoder_padding_mask'] = \
bert_outs['bert_encoder_padding_mask'].index_select(0, new_order)
return encoder_out, bert_outs
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1])
bert_gates = [x == 1 for x in bert_gates]
assert len(bert_gates) == args.decoder_layers
print('bert_gates', bert_gates)
self.layers = nn.ModuleList([])
decoder_no_bert = getattr(args, 'decoder_no_bert', False)
if decoder_no_bert:
self.layers.extend([
TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])
for i in range(args.decoder_layers)
])
else:
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i])
for i in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
bert_encoder_out['bert_encoder_out'],
bert_encoder_out['bert_encoder_padding_mask'],
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoderStack(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayerStack(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
bert_encoder_out['bert_encoder_out'],
bert_encoder_out['bert_encoder_padding_mask'],
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {'attn': attn, 'inner_states': inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout, self_attention=True
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if k in state_dict:
state_dict[
'{}.{}.{}'.format(name, new, m)
] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
self.attn_weight = attn_weight
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerS2EncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args, bert_gate=True):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout, self_attention=True
)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = False
self.encoder_bert_mixup = False
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if k in state_dict:
state_dict[
'{}.{}.{}'.format(name, new, m)
] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask)
x1 = F.dropout(x1, p=self.dropout, training=self.training)
x2 = F.dropout(x2, p=self.dropout, training=self.training)
ratios = self.get_ratio()
x = residual + ratios[0] * x1 + ratios[1] * x2
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = False
self.encoder_bert_mixup = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x1, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x2, _ = self.bert_attn(
query=x,
key=bert_encoder_out,
value=bert_encoder_out,
key_padding_mask=bert_encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x1 = F.dropout(x1, p=self.dropout, training=self.training)
x2 = F.dropout(x2, p=self.dropout, training=self.training)
ratios = self.get_ratio()
x = residual + ratios[0] * x1 + ratios[1] * x2
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
class TransformerStandardDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
# bert_out_dim = args.bert_out_dim
# self.bert_attn = MultiheadAttention(
# self.embed_dim, args.decoder_attention_heads,
# kdim=bert_out_dim, vdim=bert_out_dim,
# dropout=args.attention_dropout, encoder_decoder_attention=True
# )
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.encoder_ratio = args.encoder_ratio
self.bert_ratio = args.bert_ratio
if not bert_gate:
self.bert_ratio = 0.
self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False)
self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25)
assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5
self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x1, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
# x2, _ = self.bert_attn(
# query=x,
# key=bert_encoder_out,
# value=bert_encoder_out,
# key_padding_mask=bert_encoder_padding_mask,
# incremental_state=incremental_state,
# static_kv=True,
# need_weights=(not self.training and self.need_attn),
# )
x1 = F.dropout(x1, p=self.dropout, training=self.training)
# x2 = F.dropout(x2, p=self.dropout, training=self.training)
# ratios = self.get_ratio()
x = residual + x1
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def get_ratio(self):
if self.encoder_bert_dropout:
frand = float(uniform(0, 1))
if self.encoder_bert_mixup and self.training:
return [frand, 1 - frand]
if frand < self.encoder_bert_dropout_ratio and self.training:
return [1, 0]
elif frand > 1 - self.encoder_bert_dropout_ratio and self.training:
return [0, 1]
else:
return [0.5, 0.5]
else:
return [self.encoder_ratio, self.bert_ratio]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
class TransformerDecoderLayerStack(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
bert_out_dim = args.bert_out_dim
self.bert_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
kdim=bert_out_dim, vdim=bert_out_dim,
dropout=args.attention_dropout, encoder_decoder_attention=True
)
self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.bert_first = args.bert_first
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
bert_encoder_out=None,
bert_encoder_padding_mask=None,
incremental_state=None,
prev_self_attn_state=None,
prev_attn_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state):
residual = x
x = self.maybe_layer_norm(layer_norm, x, before=True)
x, attn = attnlayer(
query=x,
key=keyorvalue,
value=keyorvalue,
key_padding_mask=key_padding,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(layer_norm, x, after=True)
return x, attn
if self.bert_first:
x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,
bert_encoder_padding_mask, incremental_state)
x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,
incremental_state)
else:
x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask,
incremental_state)
x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out,
bert_encoder_padding_mask, incremental_state)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
self_attn_state = saved_state["prev_key"], saved_state["prev_value"]
return x, attn, self_attn_state
return x, attn
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
@register_model_architecture('transformer', 'transformer')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformers2', 'transformers2')
def base_architecture_s2(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformerstack', 'transformerstack')
def base_stack_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.activation_dropout = getattr(args, 'activation_dropout', 0.)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
@register_model_architecture('transformer', 'transformer_iwslt_de_en')
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
@register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en')
def transformer_s2_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture_s2(args)
@register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en')
def transformerstack_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_stack_architecture(args)
@register_model_architecture('transformers2', 'transformer_wmt_en_de')
def transformer_wmt_en_de(args):
base_architecture_s2(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture(args)
@register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big')
def transformer_s2_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture_s2(args)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, 'dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de_big')
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
| 45.987847 | 169 | 0.625363 | [
"MIT"
] | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | models/transformer.py | 105,956 | Python |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilites to computed GuidedBackprop SaliencyMasks"""
from .base import SaliencyMask
import tensorflow.compat.v1 as tf
class GuidedBackprop(SaliencyMask):
"""A SaliencyMask class that computes saliency masks with GuidedBackProp.
This implementation copies the TensorFlow graph to a new graph with the ReLU
gradient overwritten as in the paper:
https://arxiv.org/abs/1412.6806
Thanks to Chris Olah for generously sharing his implementation of the ReLU
backprop.
"""
GuidedReluRegistered = False
def __init__(self,
graph,
session,
y,
x,
tmp_ckpt_path='/tmp/guided_backprop_ckpt'):
"""Constructs a GuidedBackprop SaliencyMask."""
super(GuidedBackprop, self).__init__(graph, session, y, x)
self.x = x
if GuidedBackprop.GuidedReluRegistered is False:
#### Acknowledgement to Chris Olah ####
@tf.RegisterGradient("GuidedRelu")
def _GuidedReluGrad(op, grad):
gate_g = tf.cast(grad > 0, "float32")
gate_y = tf.cast(op.outputs[0] > 0, "float32")
return gate_y * gate_g * grad
GuidedBackprop.GuidedReluRegistered = True
with graph.as_default():
saver = tf.train.Saver()
saver.save(session, tmp_ckpt_path)
graph_def = graph.as_graph_def()
self.guided_graph = tf.Graph()
with self.guided_graph.as_default():
self.guided_sess = tf.Session(graph = self.guided_graph)
with self.guided_graph.gradient_override_map({'Relu': 'GuidedRelu'}):
# Import the graph def, and all the variables.
tf.import_graph_def(graph_def, name='')
saver.restore(self.guided_sess, tmp_ckpt_path)
imported_y = self.guided_graph.get_tensor_by_name(y.name)
imported_x = self.guided_graph.get_tensor_by_name(x.name)
self.guided_grads_node = tf.gradients(imported_y, imported_x)[0]
def GetMask(self, x_value, feed_dict = {}):
"""Returns a GuidedBackprop mask."""
with self.guided_graph.as_default():
# Move all the feed dict tensor keys to refer to the same tensor on the
# new graph.
guided_feed_dict = {}
for tensor in feed_dict:
guided_feed_dict[tensor.name] = feed_dict[tensor]
guided_feed_dict[self.x.name] = [x_value]
return self.guided_sess.run(
self.guided_grads_node, feed_dict = guided_feed_dict)[0]
| 35.5 | 78 | 0.697183 | [
"Apache-2.0"
] | aliabd/history-of-interpretation | saliency/guided_backprop.py | 2,982 | Python |
import jax.numpy as jnp
import jax.random as random
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
from typing import Any, Dict, Optional
class PlayerModel(object):
"""
numpyro implementation of the AIrsenal player model.
"""
def __init__(self):
self.player_ids = None
self.samples = None
@staticmethod
def _model(
nplayer: int, nmatch: int, minutes: jnp.array, y: jnp.array, alpha: jnp.array
):
theta = dist.Dirichlet(concentration=alpha)
# one sample from the prior per player
with numpyro.plate("nplayer", nplayer):
dprobs = numpyro.sample("probs", theta)
# now it's all about how to broadcast in the right dimensions.....
prob_score = numpyro.deterministic(
"prob_score", dprobs[:, 0, None] * (minutes / 90.0)
)
prob_assist = numpyro.deterministic(
"prob_assist", dprobs[:, 1, None] * (minutes / 90.0)
)
prob_neither = numpyro.deterministic(
"prob_neither", dprobs[:, 2, None] * (minutes / 90.0) + (90.0 - minutes)
)
theta_mins = dist.Multinomial(
probs=jnp.moveaxis(jnp.array([prob_score, prob_assist, prob_neither]), 0, 2)
)
return numpyro.sample("obs", theta_mins, obs=y)
def fit(
self,
data,
random_state: int = 42,
num_warmup: int = 500,
num_samples: int = 2000,
mcmc_kwargs: Optional[Dict[str, Any]] = None,
run_kwargs: Optional[Dict[str, Any]] = None,
):
self.player_ids = data["player_ids"]
kernel = NUTS(self._model)
mcmc = MCMC(
kernel,
num_warmup=num_warmup,
num_samples=num_samples,
num_chains=1,
progress_bar=True,
**(mcmc_kwargs or {}),
)
rng_key, rng_key_predict = random.split(random.PRNGKey(44))
mcmc.run(
rng_key,
data["nplayer"],
data["nmatch"],
data["minutes"],
data["y"],
data["alpha"],
**(run_kwargs or {}),
)
self.samples = mcmc.get_samples()
return self
def get_probs(self):
prob_dict = {
"player_id": [],
"prob_score": [],
"prob_assist": [],
"prob_neither": [],
}
for i, pid in enumerate(self.player_ids):
prob_dict["player_id"].append(pid)
prob_dict["prob_score"].append(float(self.samples["probs"][:, i, 0].mean()))
prob_dict["prob_assist"].append(
float(self.samples["probs"][:, i, 1].mean())
)
prob_dict["prob_neither"].append(
float(self.samples["probs"][:, i, 2].mean())
)
return prob_dict
def get_probs_for_player(self, player_id):
try:
index = list(self.player_ids).index(player_id)
except (ValueError):
raise RuntimeError(f"Unknown player_id {player_id}")
prob_score = float(self.samples["probs"][:, index, 0].mean())
prob_assist = float(self.samples["probs"][:, index, 1].mean())
prob_neither = float(self.samples["probs"][:, index, 2].mean())
return (prob_score, prob_assist, prob_neither)
| 33.227723 | 88 | 0.554827 | [
"MIT"
] | JPKFin/AIrsenal | airsenal/framework/player_model.py | 3,356 | Python |
from .plot import plot
from .simple_plot import simple_plot
| 20 | 36 | 0.833333 | [
"BSD-3-Clause"
] | ebolyen/q2-gamma | q2_gamma/visualizers/__init__.py | 60 | Python |
# -*- coding: utf-8 -*-
""" OneLogin_Saml2_Settings class
Copyright (c) 2010-2018 OneLogin, Inc.
MIT License
Setting class of OneLogin's Python Toolkit.
"""
from time import time
import re
from os.path import dirname, exists, join, sep
from app.utils.onelogin.saml2 import compat
from app.utils.onelogin.saml2.constants import OneLogin_Saml2_Constants
from app.utils.onelogin.saml2.errors import OneLogin_Saml2_Error
from app.utils.onelogin.saml2.metadata import OneLogin_Saml2_Metadata
from app.utils.onelogin.saml2.utils import OneLogin_Saml2_Utils
from app.utils.onelogin.saml2.xml_utils import OneLogin_Saml2_XML
try:
import ujson as json
except ImportError:
import json
try:
basestring
except NameError:
basestring = str
# Regex from Django Software Foundation and individual contributors.
# Released under a BSD 3-Clause License
url_regex = re.compile(
r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately
r'(?:(?:[A-Z0-9_](?:[A-Z0-9-_]{0,61}[A-Z0-9_])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
url_schemes = ['http', 'https', 'ftp', 'ftps']
def validate_url(url):
"""
Auxiliary method to validate an urllib
:param url: An url to be validated
:type url: string
:returns: True if the url is valid
:rtype: bool
"""
scheme = url.split('://')[0].lower()
if scheme not in url_schemes:
return False
if not bool(url_regex.search(url)):
return False
return True
class OneLogin_Saml2_Settings(object):
"""
Handles the settings of the Python toolkits.
"""
def __init__(self, settings=None, custom_base_path=None, sp_validation_only=False):
"""
Initializes the settings:
- Sets the paths of the different folders
- Loads settings info from settings file or array/object provided
:param settings: SAML Toolkit Settings
:type settings: dict
:param custom_base_path: Path where are stored the settings file and the cert folder
:type custom_base_path: string
:param sp_validation_only: Avoid the IdP validation
:type sp_validation_only: boolean
"""
self.__sp_validation_only = sp_validation_only
self.__paths = {}
self.__strict = False
self.__debug = False
self.__sp = {}
self.__idp = {}
self.__security = {}
self.__contacts = {}
self.__organization = {}
self.__errors = []
self.__load_paths(base_path=custom_base_path)
self.__update_paths(settings)
if settings is None:
try:
valid = self.__load_settings_from_file()
except Exception as e:
raise e
if not valid:
raise OneLogin_Saml2_Error(
'Invalid dict settings at the file: %s',
OneLogin_Saml2_Error.SETTINGS_INVALID,
','.join(self.__errors)
)
elif isinstance(settings, dict):
if not self.__load_settings_from_dict(settings):
raise OneLogin_Saml2_Error(
'Invalid dict settings: %s',
OneLogin_Saml2_Error.SETTINGS_INVALID,
','.join(self.__errors)
)
else:
raise OneLogin_Saml2_Error(
'Unsupported settings object',
OneLogin_Saml2_Error.UNSUPPORTED_SETTINGS_OBJECT
)
self.format_idp_cert()
if 'x509certMulti' in self.__idp:
self.format_idp_cert_multi()
self.format_sp_cert()
if 'x509certNew' in self.__sp:
self.format_sp_cert_new()
self.format_sp_key()
def __load_paths(self, base_path=None):
"""
Set the paths of the different folders
"""
if base_path is None:
base_path = dirname(dirname(dirname(__file__)))
if not base_path.endswith(sep):
base_path += sep
self.__paths = {
'base': base_path,
'cert': base_path + 'certs' + sep,
'lib': base_path + 'lib' + sep,
'extlib': base_path + 'extlib' + sep,
}
def __update_paths(self, settings):
"""
Set custom paths if necessary
"""
if not isinstance(settings, dict):
return
if 'custom_base_path' in settings:
base_path = settings['custom_base_path']
base_path = join(dirname(__file__), base_path)
self.__load_paths(base_path)
def get_base_path(self):
"""
Returns base path
:return: The base toolkit folder path
:rtype: string
"""
return self.__paths['base']
def get_cert_path(self):
"""
Returns cert path
:return: The cert folder path
:rtype: string
"""
return self.__paths['cert']
def get_lib_path(self):
"""
Returns lib path
:return: The library folder path
:rtype: string
"""
return self.__paths['lib']
def get_ext_lib_path(self):
"""
Returns external lib path
:return: The external library folder path
:rtype: string
"""
return self.__paths['extlib']
def get_schemas_path(self):
"""
Returns schema path
:return: The schema folder path
:rtype: string
"""
return self.__paths['lib'] + 'schemas/'
def __load_settings_from_dict(self, settings):
"""
Loads settings info from a settings Dict
:param settings: SAML Toolkit Settings
:type settings: dict
:returns: True if the settings info is valid
:rtype: boolean
"""
errors = self.check_settings(settings)
if len(errors) == 0:
self.__errors = []
self.__sp = settings['sp']
self.__idp = settings.get('idp', {})
self.__strict = settings.get('strict', False)
self.__debug = settings.get('debug', False)
self.__security = settings.get('security', {})
self.__contacts = settings.get('contactPerson', {})
self.__organization = settings.get('organization', {})
self.__add_default_values()
return True
self.__errors = errors
return False
def __load_settings_from_file(self):
"""
Loads settings info from the settings json file
:returns: True if the settings info is valid
:rtype: boolean
"""
filename = self.get_base_path() + 'settings.json'
if not exists(filename):
raise OneLogin_Saml2_Error(
'Settings file not found: %s',
OneLogin_Saml2_Error.SETTINGS_FILE_NOT_FOUND,
filename
)
# In the php toolkit instead of being a json file it is a php file and
# it is directly included
with open(filename, 'r') as json_data:
settings = json.loads(json_data.read())
advanced_filename = self.get_base_path() + 'advanced_settings.json'
if exists(advanced_filename):
with open(advanced_filename, 'r') as json_data:
settings.update(json.loads(json_data.read())) # Merge settings
return self.__load_settings_from_dict(settings)
def __add_default_values(self):
"""
Add default values if the settings info is not complete
"""
self.__sp.setdefault('assertionConsumerService', {})
self.__sp['assertionConsumerService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_POST)
self.__sp.setdefault('attributeConsumingService', {})
self.__sp.setdefault('singleLogoutService', {})
self.__sp['singleLogoutService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT)
self.__idp.setdefault('singleLogoutService', {})
# Related to nameID
self.__sp.setdefault('NameIDFormat', OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED)
self.__security.setdefault('nameIdEncrypted', False)
# Metadata format
self.__security.setdefault('metadataValidUntil', None) # None means use default
self.__security.setdefault('metadataCacheDuration', None) # None means use default
# Sign provided
self.__security.setdefault('authnRequestsSigned', False)
self.__security.setdefault('logoutRequestSigned', False)
self.__security.setdefault('logoutResponseSigned', False)
self.__security.setdefault('signMetadata', False)
# Sign expected
self.__security.setdefault('wantMessagesSigned', False)
self.__security.setdefault('wantAssertionsSigned', False)
# NameID element expected
self.__security.setdefault('wantNameId', True)
# Encrypt expected
self.__security.setdefault('wantAssertionsEncrypted', False)
self.__security.setdefault('wantNameIdEncrypted', False)
# Signature Algorithm
self.__security.setdefault('signatureAlgorithm', OneLogin_Saml2_Constants.RSA_SHA1)
# Digest Algorithm
self.__security.setdefault('digestAlgorithm', OneLogin_Saml2_Constants.SHA1)
# AttributeStatement required by default
self.__security.setdefault('wantAttributeStatement', True)
self.__idp.setdefault('x509cert', '')
self.__idp.setdefault('certFingerprint', '')
self.__idp.setdefault('certFingerprintAlgorithm', 'sha1')
self.__sp.setdefault('x509cert', '')
self.__sp.setdefault('privateKey', '')
self.__security.setdefault('requestedAuthnContext', True)
self.__security.setdefault('failOnAuthnContextMismatch', False)
def check_settings(self, settings):
"""
Checks the settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the settings data
:rtype: list
"""
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or len(settings) == 0:
errors.append('invalid_syntax')
else:
if not self.__sp_validation_only:
errors += self.check_idp_settings(settings)
sp_errors = self.check_sp_settings(settings)
errors += sp_errors
return errors
def check_idp_settings(self, settings):
"""
Checks the IdP settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the IdP settings data
:rtype: list
"""
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or len(settings) == 0:
errors.append('invalid_syntax')
else:
if not settings.get('idp'):
errors.append('idp_not_found')
else:
idp = settings['idp']
if not idp.get('entityId'):
errors.append('idp_entityId_not_found')
if not idp.get('singleSignOnService', {}).get('url'):
errors.append('idp_sso_not_found')
elif not validate_url(idp['singleSignOnService']['url']):
errors.append('idp_sso_url_invalid')
slo_url = idp.get('singleLogoutService', {}).get('url')
if slo_url and not validate_url(slo_url):
errors.append('idp_slo_url_invalid')
if 'security' in settings:
security = settings['security']
exists_x509 = bool(idp.get('x509cert'))
exists_fingerprint = bool(idp.get('certFingerprint'))
exists_multix509sign = 'x509certMulti' in idp and \
'signing' in idp['x509certMulti'] and \
idp['x509certMulti']['signing']
exists_multix509enc = 'x509certMulti' in idp and \
'encryption' in idp['x509certMulti'] and \
idp['x509certMulti']['encryption']
want_assert_sign = bool(security.get('wantAssertionsSigned'))
want_mes_signed = bool(security.get('wantMessagesSigned'))
nameid_enc = bool(security.get('nameIdEncrypted'))
if (want_assert_sign or want_mes_signed) and \
not(exists_x509 or exists_fingerprint or exists_multix509sign):
errors.append('idp_cert_or_fingerprint_not_found_and_required')
if nameid_enc and not (exists_x509 or exists_multix509enc):
errors.append('idp_cert_not_found_and_required')
return errors
def check_sp_settings(self, settings):
"""
Checks the SP settings info.
:param settings: Dict with settings data
:type settings: dict
:returns: Errors found on the SP settings data
:rtype: list
"""
assert isinstance(settings, dict)
errors = []
if not isinstance(settings, dict) or not settings:
errors.append('invalid_syntax')
else:
if not settings.get('sp'):
errors.append('sp_not_found')
else:
# check_sp_certs uses self.__sp so I add it
old_sp = self.__sp
self.__sp = settings['sp']
sp = settings['sp']
security = settings.get('security', {})
if not sp.get('entityId'):
errors.append('sp_entityId_not_found')
if not sp.get('assertionConsumerService', {}).get('url'):
errors.append('sp_acs_not_found')
elif not validate_url(sp['assertionConsumerService']['url']):
errors.append('sp_acs_url_invalid')
if sp.get('attributeConsumingService'):
attributeConsumingService = sp['attributeConsumingService']
if 'serviceName' not in attributeConsumingService:
errors.append('sp_attributeConsumingService_serviceName_not_found')
elif not isinstance(attributeConsumingService['serviceName'], basestring):
errors.append('sp_attributeConsumingService_serviceName_type_invalid')
if 'requestedAttributes' not in attributeConsumingService:
errors.append('sp_attributeConsumingService_requestedAttributes_not_found')
elif not isinstance(attributeConsumingService['requestedAttributes'], list):
errors.append('sp_attributeConsumingService_serviceName_type_invalid')
else:
for req_attrib in attributeConsumingService['requestedAttributes']:
if 'name' not in req_attrib:
errors.append('sp_attributeConsumingService_requestedAttributes_name_not_found')
if 'name' in req_attrib and not req_attrib['name'].strip():
errors.append('sp_attributeConsumingService_requestedAttributes_name_invalid')
if 'attributeValue' in req_attrib and type(req_attrib['attributeValue']) != list:
errors.append('sp_attributeConsumingService_requestedAttributes_attributeValue_type_invalid')
if 'isRequired' in req_attrib and type(req_attrib['isRequired']) != bool:
errors.append('sp_attributeConsumingService_requestedAttributes_isRequired_type_invalid')
if "serviceDescription" in attributeConsumingService and not isinstance(attributeConsumingService['serviceDescription'], basestring):
errors.append('sp_attributeConsumingService_serviceDescription_type_invalid')
slo_url = sp.get('singleLogoutService', {}).get('url')
if slo_url and not validate_url(slo_url):
errors.append('sp_sls_url_invalid')
if 'signMetadata' in security and isinstance(security['signMetadata'], dict):
if 'keyFileName' not in security['signMetadata'] or \
'certFileName' not in security['signMetadata']:
errors.append('sp_signMetadata_invalid')
authn_sign = bool(security.get('authnRequestsSigned'))
logout_req_sign = bool(security.get('logoutRequestSigned'))
logout_res_sign = bool(security.get('logoutResponseSigned'))
want_assert_enc = bool(security.get('wantAssertionsEncrypted'))
want_nameid_enc = bool(security.get('wantNameIdEncrypted'))
if not self.check_sp_certs():
if authn_sign or logout_req_sign or logout_res_sign or \
want_assert_enc or want_nameid_enc:
errors.append('sp_cert_not_found_and_required')
if 'contactPerson' in settings:
types = settings['contactPerson']
valid_types = ['technical', 'support', 'administrative', 'billing', 'other']
for c_type in types:
if c_type not in valid_types:
errors.append('contact_type_invalid')
break
for c_type in settings['contactPerson']:
contact = settings['contactPerson'][c_type]
if ('givenName' not in contact or len(contact['givenName']) == 0) or \
('emailAddress' not in contact or len(contact['emailAddress']) == 0):
errors.append('contact_not_enought_data')
break
if 'organization' in settings:
for org in settings['organization']:
organization = settings['organization'][org]
if ('name' not in organization or len(organization['name']) == 0) or \
('displayname' not in organization or len(organization['displayname']) == 0) or \
('url' not in organization or len(organization['url']) == 0):
errors.append('organization_not_enought_data')
break
# Restores the value that had the self.__sp
if 'old_sp' in locals():
self.__sp = old_sp
return errors
def check_sp_certs(self):
"""
Checks if the x509 certs of the SP exists and are valid.
:returns: If the x509 certs of the SP exists and are valid
:rtype: boolean
"""
key = self.get_sp_key()
cert = self.get_sp_cert()
return key is not None and cert is not None
def get_sp_key(self):
"""
Returns the x509 private key of the SP.
:returns: SP private key
:rtype: string or None
"""
key = self.__sp.get('privateKey')
key_file_name = self.__paths['cert'] + 'sp.key'
if not key and exists(key_file_name):
with open(key_file_name) as f:
key = f.read()
return key or None
def get_sp_cert(self):
"""
Returns the x509 public cert of the SP.
:returns: SP public cert
:rtype: string or None
"""
cert = self.__sp.get('x509cert')
cert_file_name = self.__paths['cert'] + 'sp.crt'
if not cert and exists(cert_file_name):
with open(cert_file_name) as f:
cert = f.read()
return cert or None
def get_sp_cert_new(self):
"""
Returns the x509 public of the SP planned
to be used soon instead the other public cert
:returns: SP public cert new
:rtype: string or None
"""
cert = self.__sp.get('x509certNew')
cert_file_name = self.__paths['cert'] + 'sp_new.crt'
if not cert and exists(cert_file_name):
with open(cert_file_name) as f:
cert = f.read()
return cert or None
def get_idp_cert(self):
"""
Returns the x509 public cert of the IdP.
:returns: IdP public cert
:rtype: string
"""
return self.__idp.get('x509cert')
def get_idp_data(self):
"""
Gets the IdP data.
:returns: IdP info
:rtype: dict
"""
return self.__idp
def get_sp_data(self):
"""
Gets the SP data.
:returns: SP info
:rtype: dict
"""
return self.__sp
def get_security_data(self):
"""
Gets security data.
:returns: Security info
:rtype: dict
"""
return self.__security
def get_contacts(self):
"""
Gets contact data.
:returns: Contacts info
:rtype: dict
"""
return self.__contacts
def get_organization(self):
"""
Gets organization data.
:returns: Organization info
:rtype: dict
"""
return self.__organization
def get_sp_metadata(self):
"""
Gets the SP metadata. The XML representation.
:returns: SP metadata (xml)
:rtype: string
"""
metadata = OneLogin_Saml2_Metadata.builder(
self.__sp, self.__security['authnRequestsSigned'],
self.__security['wantAssertionsSigned'],
self.__security['metadataValidUntil'],
self.__security['metadataCacheDuration'],
self.get_contacts(), self.get_organization()
)
add_encryption = self.__security['wantNameIdEncrypted'] or self.__security['wantAssertionsEncrypted']
cert_new = self.get_sp_cert_new()
metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert_new, add_encryption)
cert = self.get_sp_cert()
metadata = OneLogin_Saml2_Metadata.add_x509_key_descriptors(metadata, cert, add_encryption)
# Sign metadata
if 'signMetadata' in self.__security and self.__security['signMetadata'] is not False:
if self.__security['signMetadata'] is True:
# Use the SP's normal key to sign the metadata:
if not cert:
raise OneLogin_Saml2_Error(
'Cannot sign metadata: missing SP public key certificate.',
OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND
)
cert_metadata = cert
key_metadata = self.get_sp_key()
if not key_metadata:
raise OneLogin_Saml2_Error(
'Cannot sign metadata: missing SP private key.',
OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND
)
else:
# Use a custom key to sign the metadata:
if ('keyFileName' not in self.__security['signMetadata'] or
'certFileName' not in self.__security['signMetadata']):
raise OneLogin_Saml2_Error(
'Invalid Setting: signMetadata value of the sp is not valid',
OneLogin_Saml2_Error.SETTINGS_INVALID_SYNTAX
)
key_file_name = self.__security['signMetadata']['keyFileName']
cert_file_name = self.__security['signMetadata']['certFileName']
key_metadata_file = self.__paths['cert'] + key_file_name
cert_metadata_file = self.__paths['cert'] + cert_file_name
try:
with open(key_metadata_file, 'r') as f_metadata_key:
key_metadata = f_metadata_key.read()
except IOError:
raise OneLogin_Saml2_Error(
'Private key file not readable: %s',
OneLogin_Saml2_Error.PRIVATE_KEY_FILE_NOT_FOUND,
key_metadata_file
)
try:
with open(cert_metadata_file, 'r') as f_metadata_cert:
cert_metadata = f_metadata_cert.read()
except IOError:
raise OneLogin_Saml2_Error(
'Public cert file not readable: %s',
OneLogin_Saml2_Error.PUBLIC_CERT_FILE_NOT_FOUND,
cert_metadata_file
)
signature_algorithm = self.__security['signatureAlgorithm']
digest_algorithm = self.__security['digestAlgorithm']
metadata = OneLogin_Saml2_Metadata.sign_metadata(metadata, key_metadata, cert_metadata, signature_algorithm, digest_algorithm)
return metadata
def validate_metadata(self, xml):
"""
Validates an XML SP Metadata.
:param xml: Metadata's XML that will be validate
:type xml: string
:returns: The list of found errors
:rtype: list
"""
assert isinstance(xml, compat.text_types)
if len(xml) == 0:
raise Exception('Empty string supplied as input')
errors = []
root = OneLogin_Saml2_XML.validate_xml(xml, 'saml-schema-metadata-2.0.xsd', self.__debug)
if isinstance(root, str):
errors.append(root)
else:
if root.tag != '{%s}EntityDescriptor' % OneLogin_Saml2_Constants.NS_MD:
errors.append('noEntityDescriptor_xml')
else:
if (len(root.findall('.//md:SPSSODescriptor', namespaces=OneLogin_Saml2_Constants.NSMAP))) != 1:
errors.append('onlySPSSODescriptor_allowed_xml')
else:
valid_until, cache_duration = root.get('validUntil'), root.get('cacheDuration')
if valid_until:
valid_until = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until)
expire_time = OneLogin_Saml2_Utils.get_expire_time(cache_duration, valid_until)
if expire_time is not None and int(time()) > int(expire_time):
errors.append('expired_xml')
# TODO: Validate Sign
return errors
def format_idp_cert(self):
"""
Formats the IdP cert.
"""
self.__idp['x509cert'] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509cert'])
def format_idp_cert_multi(self):
"""
Formats the Multple IdP certs.
"""
if 'x509certMulti' in self.__idp:
if 'signing' in self.__idp['x509certMulti']:
for idx in range(len(self.__idp['x509certMulti']['signing'])):
self.__idp['x509certMulti']['signing'][idx] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509certMulti']['signing'][idx])
if 'encryption' in self.__idp['x509certMulti']:
for idx in range(len(self.__idp['x509certMulti']['encryption'])):
self.__idp['x509certMulti']['encryption'][idx] = OneLogin_Saml2_Utils.format_cert(self.__idp['x509certMulti']['encryption'][idx])
def format_sp_cert(self):
"""
Formats the SP cert.
"""
self.__sp['x509cert'] = OneLogin_Saml2_Utils.format_cert(self.__sp['x509cert'])
def format_sp_cert_new(self):
"""
Formats the SP cert.
"""
self.__sp['x509certNew'] = OneLogin_Saml2_Utils.format_cert(self.__sp['x509certNew'])
def format_sp_key(self):
"""
Formats the private key.
"""
self.__sp['privateKey'] = OneLogin_Saml2_Utils.format_private_key(self.__sp['privateKey'])
def get_errors(self):
"""
Returns an array with the errors, the array is empty when the settings is ok.
:returns: Errors
:rtype: list
"""
return self.__errors
def set_strict(self, value):
"""
Activates or deactivates the strict mode.
:param value: Strict parameter
:type value: boolean
"""
assert isinstance(value, bool)
self.__strict = value
def is_strict(self):
"""
Returns if the 'strict' mode is active.
:returns: Strict parameter
:rtype: boolean
"""
return self.__strict
def is_debug_active(self):
"""
Returns if the debug is active.
:returns: Debug parameter
:rtype: boolean
"""
return self.__debug
| 36.322055 | 153 | 0.583198 | [
"MIT"
] | nycrecords/intranet | app/utils/onelogin/saml2/settings.py | 28,985 | Python |
from emonitor.utils import Module
from emonitor.extensions import babel
from .content_frontend import getFrontendContent, getFrontendData
class LocationsModule(Module):
info = dict(area=['frontend'], name='locations', path='locations', icon='fa-code-fork', version='0.1')
def __repr__(self):
return "locations"
def __init__(self, app):
Module.__init__(self, app)
# add template path
app.jinja_loader.searchpath.append("%s/emonitor/modules/locations/templates" % app.config.get('PROJECT_ROOT'))
# translations
babel.gettext(u'module.locations')
def frontendContent(self):
return 1
def getFrontendContent(self, **params):
return getFrontendContent(**params)
def getFrontendData(self):
return getFrontendData(self)
| 29.142857 | 118 | 0.693627 | [
"BSD-3-Clause"
] | Durburz/eMonitor | emonitor/modules/locations/__init__.py | 816 | Python |
import functools
import os
import random
import matplotlib.pyplot as plt
import networkx as nx
def make_graph(path):
G = nx.DiGraph()
with open(path, 'r') as f:
lines = f.readlines()
# random.seed(0)
sample_nums = int(len(lines) * 0.00006)
lines = random.sample(lines, sample_nums)
lines = [line.strip() for line in lines]
for line in lines:
edge_node = line.split(' ')
source = int(edge_node[0])
target = int(edge_node[1])
G.add_edge(source, target)
return G
def degree_centrality(G):
# 节点的度中心性
if len(G) <= 1:
return {n: 1 for n in G}
s = 1.0 / (len(G) - 1.0)
centrality = {n: d * s for n, d in G.degree()}
return centrality
def closeness_centrality(G, u=None, distance=None, wf_improved=True):
# 节点的接近中心性
if G.is_directed():
G = G.reverse()
if distance is not None:
path_length = functools.partial(
nx.single_source_dijkstra_path_length, weight=distance
)
else:
path_length = nx.single_source_shortest_path_length
if u is None:
nodes = G.nodes
else:
nodes = [u]
closeness_centrality = {}
for n in nodes:
sp = path_length(G, n)
totsp = sum(sp.values())
len_G = len(G)
_closeness_centrality = 0.0
if totsp > 0.0 and len_G > 1:
_closeness_centrality = (len(sp) - 1.0) / totsp
if wf_improved:
s = (len(sp) - 1.0) / (len_G - 1)
_closeness_centrality *= s
closeness_centrality[n] = _closeness_centrality
if u is not None:
return closeness_centrality[u]
else:
return closeness_centrality
def core_number(G):
# 节点的核数
degrees = dict(G.degree())
nodes = sorted(degrees, key=degrees.get)
bin_boundaries = [0]
curr_degree = 0
for i, v in enumerate(nodes):
if degrees[v] > curr_degree:
bin_boundaries.extend([i] * (degrees[v] - curr_degree))
curr_degree = degrees[v]
node_pos = {v: pos for pos, v in enumerate(nodes)}
core = degrees
nbrs = {v: list(nx.all_neighbors(G, v)) for v in G}
for v in nodes:
for u in nbrs[v]:
if core[u] > core[v]:
nbrs[u].remove(v)
pos = node_pos[u]
bin_start = bin_boundaries[core[u]]
node_pos[u] = bin_start
node_pos[nodes[bin_start]] = pos
nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
return core
def pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1.0e-6, nstart=None, weight="weight",
dangling=None):
# 节点的pagerank值
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(nstart.values()))
x = {k: v / s for k, v in nstart.items()}
if personalization is None:
p = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(personalization.values()))
p = {k: v / s for k, v in personalization.items()}
if dangling is None:
dangling_weights = p
else:
s = float(sum(dangling.values()))
dangling_weights = {k: v / s for k, v in dangling.items()}
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0)
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N * tol:
return x
raise nx.PowerIterationFailedConvergence(max_iter)
def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True):
# 节点的hub值和authority值
if len(G) == 0:
return {}, {}
if nstart is None:
h = dict.fromkeys(G, 1.0 / G.number_of_nodes())
else:
h = nstart
s = 1.0 / sum(h.values())
for k in h:
h[k] *= s
for _ in range(max_iter):
hlast = h
h = dict.fromkeys(hlast.keys(), 0)
a = dict.fromkeys(hlast.keys(), 0)
for n in h:
for nbr in G[n]:
a[nbr] += hlast[n] * G[n][nbr].get("weight", 1)
for n in h:
for nbr in G[n]:
h[n] += a[nbr] * G[n][nbr].get("weight", 1)
s = 1.0 / max(h.values())
for n in h:
h[n] *= s
s = 1.0 / max(a.values())
for n in a:
a[n] *= s
err = sum([abs(h[n] - hlast[n]) for n in h])
if err < tol:
break
else:
raise nx.PowerIterationFailedConvergence(max_iter)
if normalized:
s = 1.0 / sum(a.values())
for n in a:
a[n] *= s
s = 1.0 / sum(h.values())
for n in h:
h[n] *= s
return h, a
def metrics_fuse(G):
degree = degree_centrality(G)
closeness = closeness_centrality(G)
betweenness = nx.betweenness_centrality(G) # 节点的介数中心性
core = core_number(G)
pageranks = pagerank(G)
hubs, authorities = hits(G)
fused = dict()
for node in G.nodes:
deg = degree[node]
cl = closeness[node]
bet = betweenness[node]
co = core[node]
pr = pageranks[node]
auth = authorities[node]
M = 0.05 * deg + 0.15 * cl + 0.1 * bet + 0.3 * co + 0.25 * pr + 0.15 * auth
fused[node] = M
pageranks = sorted(pageranks.items(), key=lambda x: x[1], reverse=True)
print("使用PageRank算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(pageranks[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in pageranks[:10]]
other_nodes = [k for k, v in pageranks[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in pageranks[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./pagerank_result.png")
plt.show()
print("---------------------------------------------")
authorities = sorted(authorities.items(), key=lambda x: x[1], reverse=True)
print("使用HITS算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(authorities[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in authorities[:10]]
other_nodes = [k for k, v in authorities[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in authorities[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./hits_result.png")
plt.show()
print("---------------------------------------------")
fused = sorted(fused.items(), key=lambda x: x[1], reverse=True)
print("使用混合算法,影响力前10的节点为:")
for i in range(10):
print("节点 {}".format(fused[i][0]))
pos = nx.random_layout(G)
top_nodes = [k for k, v in fused[:10]]
other_nodes = [k for k, v in fused[10:]]
nx.draw_networkx_nodes(G, pos, top_nodes, node_size=200, node_color='Red', alpha=0.6)
nx.draw_networkx_nodes(G, pos, other_nodes, node_size=200, node_color='Green', alpha=0.6)
nx.draw_networkx_edges(G, pos)
labels = dict()
for k, v in fused[:10]:
labels[k] = k
nx.draw_networkx_labels(G, pos, labels=labels)
plt.savefig("./fused_result.png")
plt.show()
print("---------------------------------------------")
return fused
if __name__ == '__main__':
path = './课程设计数据集.txt'
if not os.path.exists(path):
print('未找到数据集')
exit(1)
G = make_graph(path)
metrics_fuse(G)
| 31.066914 | 105 | 0.551514 | [
"MIT"
] | showerhhh/ComplexNetwork | homework_3/main.py | 8,559 | Python |
from nlu import *
from nlu.pipe_components import SparkNLUComponent
from sparknlp.annotator import *
class Lemmatizer(SparkNLUComponent):
def __init__(self,component_name='lemma', language='en', component_type='lemmatizer', get_default=False,model = None, sparknlp_reference=''):
component_name = 'lemmatizer'
SparkNLUComponent.__init__(self,component_name,component_type)
# component_name = utils.lower_case(component_name) TODO
if model != None : self.model = model
else :
if 'lemma' in component_name :
from nlu import SparkNLPLemmatizer
if get_default : self.model = SparkNLPLemmatizer.get_default_model()
else : self.model = SparkNLPLemmatizer.get_pretrained_model(sparknlp_reference,language)
| 44.888889 | 145 | 0.709158 | [
"Apache-2.0"
] | sumanthratna/nlu | nlu/components/lemmatizer.py | 808 | Python |
import base64
import os
import tkinter as tk
import tkinter.messagebox as msg
import tkinter.ttk as ttk
from functools import partial
from chatwindow import ChatWindow
from requester import Requester
from avatarwindow import AvatarWindow
from addfriendwindow import AddFriendWindow
friend_avatars_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/friends"))
default_avatar_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "images/default.png"))
class FriendsList(tk.Tk):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.title('Tk Chat')
self.geometry('700x500')
self.menu = tk.Menu(self, bg="lightgrey", fg="black", tearoff=0)
self.friends_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0)
self.friends_menu.add_command(label="Add Friend", command=self.show_add_friend_window)
self.avatar_menu = tk.Menu(self.menu, fg="black", bg="lightgrey", tearoff=0)
self.avatar_menu.add_command(label="Change Avatar", command=self.change_avatar)
self.menu.add_cascade(label="Friends", menu=self.friends_menu)
self.menu.add_cascade(label="Avatar", menu=self.avatar_menu)
self.requester = Requester()
self.show_login_screen()
def show_login_screen(self):
self.login_frame = ttk.Frame(self)
username_label = ttk.Label(self.login_frame, text="Username")
self.username_entry = ttk.Entry(self.login_frame)
self.username_entry.focus_force()
real_name_label = ttk.Label(self.login_frame, text="Real Name")
self.real_name_entry = ttk.Entry(self.login_frame)
login_button = ttk.Button(self.login_frame, text="Login", command=self.login)
create_account_button = ttk.Button(self.login_frame, text="Create Account", command=self.create_account)
username_label.grid(row=0, column=0, sticky='e')
self.username_entry.grid(row=0, column=1)
real_name_label.grid(row=1, column=0, sticky='e')
self.real_name_entry.grid(row=1, column=1)
login_button.grid(row=2, column=0, sticky='e')
create_account_button.grid(row=2, column=1)
for i in range(3):
tk.Grid.rowconfigure(self.login_frame, i, weight=1)
tk.Grid.columnconfigure(self.login_frame, i, weight=1)
self.login_frame.pack(fill=tk.BOTH, expand=1)
self.login_event = self.bind("<Return>", self.login)
def login(self, event=None):
username = self.username_entry.get()
real_name = self.real_name_entry.get()
if self.requester.login(username, real_name):
self.username = username
self.real_name = real_name
self.unbind("<Return>", self.login_event)
self.show_friends()
else:
msg.showerror("Failed", f"Could not log in as {username}")
def create_account(self):
username = self.username_entry.get()
real_name = self.real_name_entry.get()
if self.requester.create_account(username, real_name):
self.username = username
self.real_name = real_name
self.show_friends()
else:
msg.showerror("Failed", "Account already exists!")
def show_friends(self):
self.configure(menu=self.menu)
self.login_frame.pack_forget()
self.canvas = tk.Canvas(self, bg="white")
self.canvas_frame = tk.Frame(self.canvas)
self.scrollbar = ttk.Scrollbar(self, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.scrollbar.set)
self.scrollbar.pack(side=tk.LEFT, fill=tk.Y)
self.canvas.pack(side=tk.LEFT, expand=1, fill=tk.BOTH)
self.friends_area = self.canvas.create_window((0, 0), window=self.canvas_frame, anchor="nw")
self.bind_events()
self.load_friends()
def bind_events(self):
self.bind('<Configure>', self.on_frame_resized)
self.canvas.bind('<Configure>', self.friends_width)
def friends_width(self, event):
canvas_width = event.width
self.canvas.itemconfig(self.friends_area, width=canvas_width)
def on_frame_resized(self, event=None):
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def load_friends(self):
my_friends = self.requester.get_friends(self.username)
for user in my_friends["friends"]:
if user['username'] != self.username:
friend_frame = ttk.Frame(self.canvas_frame)
friend_avatar_path = os.path.join(friend_avatars_dir, f"{user['username']}.png")
if user["avatar"]:
with open(friend_avatar_path, 'wb') as friend_avatar:
img = base64.urlsafe_b64decode(user['avatar'])
friend_avatar.write(img)
else:
friend_avatar_path = default_avatar_path
profile_photo = tk.PhotoImage(file=friend_avatar_path)
profile_photo_label = ttk.Label(friend_frame, image=profile_photo)
profile_photo_label.image = profile_photo
friend_name = ttk.Label(friend_frame, text=user['real_name'], anchor=tk.W)
message_this_friend = partial(self.open_chat_window, username=user["username"], real_name=user["real_name"], avatar=friend_avatar_path)
block_this_friend = partial(self.block_friend, username=user["username"])
message_button = ttk.Button(friend_frame, text="Chat", command=message_this_friend)
block_button = ttk.Button(friend_frame, text="Block", command=block_this_friend)
profile_photo_label.pack(side=tk.LEFT)
friend_name.pack(side=tk.LEFT)
message_button.pack(side=tk.RIGHT)
block_button.pack(side=tk.RIGHT, padx=(0, 30))
friend_frame.pack(fill=tk.X, expand=1)
def reload_friends(self):
for child in self.canvas_frame.winfo_children():
child.pack_forget()
self.load_friends()
def show_add_friend_window(self):
AddFriendWindow(self)
def add_friend(self, username):
if self.requester.add_friend(self.username, username):
msg.showinfo("Friend Added", "Friend Added")
success = True
self.reload_friends()
else:
msg.showerror("Add Failed", "Friend was not found")
success = False
return success
def open_chat_window(self, username, real_name, avatar):
cw = ChatWindow(self, real_name, username, avatar)
def block_friend(self, username):
self.requester.block_friend(self.username, username)
self.reload_friends()
def change_avatar(self):
AvatarWindow(self)
if __name__ == '__main__':
f = FriendsList()
f.mainloop()
| 35.891192 | 151 | 0.651653 | [
"MIT"
] | PacktPublishing/Tkinter-GUI-Programming-by-Example | Chapter10/Ch10/friendslist.py | 6,927 | Python |
#!/usr/bin/env python
"""
Solution to Project Euler Problem
http://projecteuler.net/
by Apalala <[email protected]>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
We shall say that an n-digit number is pandigital if it makes use of all
the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital
and is also prime.
What is the largest n-digit pandigital prime that exists?
"""
from digits import is_pandigital
from primality import primes_upto, is_prime
def pandigital_primes(digits=7):
for p in primes_upto(int("9" * digits)):
if is_pandigital(p):
yield p
def test():
assert not is_prime(123)
assert not is_prime(132)
assert not is_prime(213)
assert not is_prime(231)
assert not is_prime(312)
assert not is_prime(321)
assert is_prime(2143)
assert is_pandigital(2143)
assert 2143 in set(pandigital_primes(digits=4))
def run():
print(list(pandigital_primes())[-1])
if __name__ == "__main__":
test()
run()
| 22.391304 | 73 | 0.707767 | [
"MIT"
] | Web-Dev-Collaborative/PYTHON_PRAC | projecteuler/euler041_pandigital_prime.py | 1,030 | Python |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Zyxel.ZyNOS.get_inventory
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
class Script(BaseScript):
name = "Zyxel.ZyNOS.get_inventory"
interface = IGetInventory
def remove_non_ascii(self, s, sub="?"):
return "".join([i if ord(i) < 128 else sub for i in s])
def execute(self):
objects = []
v = self.scripts.get_version()
part_no = v["platform"]
vendor = v["vendor"]
p = {
"type": "CHASSIS",
"number": 1,
"vendor": vendor,
"description": part_no,
"part_no": [part_no],
"builtin": False,
}
if v.get("attributes", {}).get("Serial Number", ""):
p["serial"] = v["attributes"]["Serial Number"]
objects += [p]
objects += self.get_transceivers()
return objects
def get_transceivers(self):
def get_offset(offset):
def wrap(x):
return str(int(x) + offset)
return wrap
objects = []
if self.match_version(version__startswith="3.90"):
xcvr_n = get_offset(0)
inv = self.cli("show interface transceiver *")
rx_trans = re.compile(
r"Port\s+:\s+(?P<number>\d+)\s+\S+\n"
r"Vendor\s+:\s+(?P<vendor>\S+)\s*\n"
r"Part Number\s+:\s+(?P<part_no>\S+\s*\S*)\s*\n"
r"Serial Number\s+:\s+(?P<serial>\S+)\s*\n"
r"Revision\s+:\s+(?P<rev>\S+)?\s*\n"
r"Date Code\s+:\s+\S+\n"
r"Transceiver\s+:\s+(?P<type>\S+)",
re.MULTILINE | re.DOTALL,
)
else:
if self.match_version(platform__contains="2024"):
xcvr_n = get_offset(25)
elif self.match_version(platform__contains="2108"):
xcvr_n = get_offset(9)
else:
xcvr_n = get_offset(1)
with self.zynos_mode():
inv = self.cli("sys sw sfp disp")
rx_trans = re.compile(
r"SFP\s+:\s+(?P<number>\d+)\s*\n"
r"Vendor\s+:\s+(?P<vendor>\S+)\s*\n"
r"Part\sNumber\s+:\s+(?P<part_no>\S+\s*\S*)\s*\n"
r"Series\sNumber\s+:\s+(?P<serial>\S+)\s*\n"
r"Revision\s+:\s+(?P<rev>\S+)?\s*\n"
r"Transceiver\s+:\s+(?P<type>\S+)",
re.MULTILINE | re.DOTALL,
)
for match in rx_trans.finditer(inv):
try:
vendor = match.group("vendor").encode("utf-8")
except UnicodeDecodeError:
vendor = "NONAME"
try:
part_no = match.group("part_no").encode("utf-8").strip()
except UnicodeDecodeError:
part_no = "NoName | Transceiver | Unknown SFP"
part_no_orig = self.remove_non_ascii(match.group("part_no").strip())
if vendor in ["NONAME", "OEM", "CISCO-FINISAR", "AODevices"]:
part_no = "NoName | Transceiver | "
description = match.group("type")
if description.endswith(tuple([" EX", "-EX"])):
part_no = part_no + "1G | SFP EX"
elif description.endswith(tuple([" LH", "-LH"])):
part_no = part_no + "1G | SFP LH"
elif description.endswith(tuple([" LX", "-LX"])):
part_no = part_no + "1G | SFP LX"
elif description.endswith(tuple([" SX", "-SX"])):
part_no = part_no + "1G | SFP SX"
elif description.endswith(tuple([" T", "-T"])):
part_no = part_no + "1G | SFP T"
elif description.endswith(tuple([" TX", "-TX"])):
part_no = part_no + "1G | SFP TX"
elif description.endswith(tuple([" ZX", "-ZX"])):
part_no = part_no + "1G | SFP ZX"
elif part_no_orig.endswith(tuple(["BX-U", "BX-1"])):
part_no = part_no + "1G | SFP BXU"
elif part_no_orig.endswith("BX-D"):
part_no = part_no + "1G | SFP BXD"
else:
part_no = part_no + "Unknown SFP"
revision = self.remove_non_ascii(match.group("rev"), "") if match.group("rev") else None
o = {
"type": "XCVR",
"number": xcvr_n(match.group("number")),
"vendor": vendor,
"description": "%s (%s)" % (match.group("type"), vendor),
"part_no": [part_no.strip()],
"builtin": False,
}
if revision:
o["revision"] = revision
try:
o["serial"] = match.group("serial").encode("utf-8")
except UnicodeDecodeError:
pass
objects += [o]
return objects
| 39.87218 | 100 | 0.456723 | [
"BSD-3-Clause"
] | ewwwcha/noc | sa/profiles/Zyxel/ZyNOS/get_inventory.py | 5,303 | Python |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tests for the ApplyMagFieldTask
"""
from multiprocessing import Event
import pytest
import enaml
from exopy.tasks.api import RootTask
from exopy.tasks.tasks.logic.loop_task import LoopTask
from exopy.testing.util import show_and_close_widget
from exopy_hqc_legacy.tasks.tasks.instr.apply_mag_field_task\
import ApplyMagFieldTask
with enaml.imports():
from exopy.tasks.tasks.logic.views.loop_view import LoopView
from exopy_hqc_legacy.tasks.tasks.instr.views.apply_mag_field_view\
import ApplyMagFieldView
from .instr_helper import (InstrHelper, InstrHelperStarter, DummyJob,
PROFILES, DRIVERS)
class TestApplyMagFieldTask(object):
def setup(self):
self.root = RootTask(should_stop=Event(), should_pause=Event())
self.task = ApplyMagFieldTask(name='Test',
parallel={'activated': False})
self.root.add_child_task(0, self.task)
self.root.run_time[DRIVERS] = {'Test': (InstrHelper,
InstrHelperStarter())}
self.root.run_time[PROFILES] =\
{'Test1':
{'connections': {'C': {'owner': [],
'output_fluctuations': 1e-6,
'heater_state': []}},
'settings': {'S': {'sweep_to_field': [DummyJob(), DummyJob(),
DummyJob()],
'sweep_to_persistent_field': [DummyJob()],
'read_persistent_field': [1],
'check_connection': [True]}}
}
}
# This is set simply to make sure the test of InstrTask pass.
self.task.selected_instrument = ('Test1', 'Test', 'C', 'S')
def test_check1(self):
"""Simply test that everything is ok if field can be evaluated.
"""
self.task.field = '3.0'
test, traceback = self.task.check(test_instr=True)
assert test
assert not traceback
assert self.task.get_from_database('Test_field') == 3.0
def test_check2(self):
"""Check handling a wrong field.
"""
self.task.field = '*1.0*'
test, traceback = self.task.check(test_instr=True)
assert not test
assert len(traceback) == 1
assert 'root/Test-field'in traceback
assert self.task.get_from_database('Test_field') == 0.01
def test_perform1(self):
"""Simple test when everything is right.
"""
self.task.field = '2.0'
self.root.prepare()
self.task.perform()
assert self.root.get_from_database('Test_field') == 2.0
@pytest.mark.ui
def test_apply_mag_field_view1(exopy_qtbot, root_view, task_workbench):
"""Test ApplyMagFieldView widget outisde of a LoopTask.
"""
task = ApplyMagFieldTask(name='Test')
root_view.task.add_child_task(0, task)
show_and_close_widget(exopy_qtbot, ApplyMagFieldView(task=task, root=root_view))
@pytest.mark.ui
def test_apply_mag_field_view2(exopy_qtbot, root_view, task_workbench):
"""Test ApplyMagFieldView widget inside of a LoopTask.
"""
task = ApplyMagFieldTask(name='Test')
loop = LoopTask(name='r', task=task)
root_view.task.add_child_task(0, loop)
# XXX check for absence of target field
show_and_close_widget(exopy_qtbot, LoopView(task=loop, root=root_view))
| 34.131579 | 84 | 0.58751 | [
"BSD-3-Clause"
] | Qcircuits/exopy_hqc_legacy | tests/tasks/tasks/instr/test_apply_mag_field_task.py | 3,891 | Python |
from asyncio import sleep
from discord import FFmpegPCMAudio, PCMVolumeTransformer
from configs import bot_enum
from ..session.Session import Session
async def alert(session: Session):
vc = session.ctx.voice_client
if not vc:
return
path = bot_enum.AlertPath.POMO_END
if session.state == bot_enum.State.COUNTDOWN:
pass
elif session.stats.pomos_completed % session.settings.intervals == 0:
path = bot_enum.AlertPath.LONG_BREAK_START
elif session.state != bot_enum.State.POMODORO:
path = bot_enum.AlertPath.POMO_START
source = PCMVolumeTransformer(FFmpegPCMAudio(path, executable='/usr/bin/ffmpeg'),
volume=0.1)
if vc.is_playing():
vc.stop()
vc.play(source)
while vc.is_playing():
await sleep(1)
| 29.321429 | 85 | 0.678441 | [
"MIT"
] | SlenderCylinder/pomoji | bot/src/utils/player.py | 821 | Python |
#!/usr/bin/env python
# $Id$
#
# Author: Thilee Subramaniam
#
# Copyright 2012 Quantcast Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This code is used to generate a plan file for metaserver vs namenode
# benchmarking.
#
import optparse
import sys
import subprocess
import time
import os
import math
import getpass
"""
This program is used to create the directory/file layout to be used
in metaserver/namenode stress test.
You basically specify the depth of the directory tree and the number
of elements (files or directories) per level, along with the list of
client-hosts you want to use and the number of clients per client-host
that you want to use.
This script will generate the plan file, and copy it to the /tmp on the
given list of client hosts.
Thereafter, you can execute the mstress.py with this plan file.
"""
class Globals:
PATH_PREFIX = 'Dir_'
PLAN_OUTPUT = './planfile.txt'
def ParseCommandline():
epi = ('Example: "%s -c h1,h2 -n 3 -l 4 -i 3 -s 100" would create 4 levels of 3 inodes ' % sys.argv[0] +
'(3+9+27+81=120) per client process. Since there are 3 ' +
'processes on 2 hosts, we create 120x6=720 inodes. We will attempt ' +
'to stat 100 random leaf paths using all client processes. We will do a readdir ' +
'all through the directory tree.')
parser = optparse.OptionParser(epilog=epi)
parser.add_option('-c', '--client-hosts',
action='store',
default='localhost',
type='string',
help='Comma-separated list of client host names.')
parser.add_option('-n', '--clients-per-host',
action='store',
default=1,
type='int',
help='Number of clients per client host.')
parser.add_option('-l', '--levels',
action='store',
default=1,
type='int',
help='File-tree depth on each client.')
parser.add_option('-i', '--inodes-per-level',
action='store',
default=100,
type='int',
help='Inodes per each level on each client.')
parser.add_option('-t', '--path-type',
action='store',
default='dir',
type='string',
help='Whether to create "dir" or "file" inodes.')
parser.add_option('-s', '--num-to-stat',
action='store',
default=100,
type='int',
help='Number of inodes to stat (<=total leaf inodes).')
parser.add_option('-o', '--output-file',
action='store',
default=None,
type='string',
help='Output plan file.')
opts, args = parser.parse_args()
if args:
sys.exit('Unexpected arguments: %s.' % str(args))
if opts.output_file is None:
opts.output_file = '/tmp/mstress_%s_%s.plan' % (getpass.getuser(), time.strftime("%F-%H-%M-%S", time.gmtime()))
return opts
def main():
opts = ParseCommandline()
hostlist = opts.client_hosts.split(',')
numClientProcesses = float(len(hostlist) * opts.clients_per_host)
if numClientProcesses == 0.0:
sys.exit('Invalid client processes')
#get the smallest number larger than 'opts.num_to_stat' that is a multiple of opts.num_to_stat
statPerClient = int(math.ceil(float(opts.num_to_stat) / numClientProcesses))
#print opts
outfile = open(opts.output_file, 'w')
outfile.write('# *** DO NOT EDIT THIS FILE BY HAND *** \n# USE mstress_plan.py TO MODIFY INSTEAD\n#\n')
outfile.write('#List of hosts taking part in the plan\nhostslist=%s\n' % opts.client_hosts)
outfile.write('#Number of mstress cliends per client host\nclientsperhost=%d\n' % opts.clients_per_host)
outfile.write('#File or directory\ntype=%s\n' % opts.path_type)
outfile.write('#Number of levels in created tree\nlevels=%d\n' % opts.levels)
outfile.write('#Number of inodes per level\ninodes=%d\n' % opts.inodes_per_level)
outfile.write('#Number of random paths to stat, per client\nnstat=%d\n' % statPerClient)
""" old code
begin_tree_delta = 0
for level in range(0,opts.levels):
begin_tree_delta = begin_tree_delta + pow(opts.inodes_per_level, level + 1)
#print "delta = ", begin_tree_delta
outfile.write('#host\tclient\tlevel\tdistribution\n')
begin_tree_idx = 0
for host_no in range(0,len(hostlist)):
host = hostlist[host_no]
for client_no in range(0,opts.clients_per_host):
# tree for this level
begin_idx = begin_tree_idx
for level in range(0,opts.levels):
prefix = '%s\tproc_%02d\t%d\t' % (host, client_no, level)
# print '-- h=%d, c=%d level=%d, begin idx = %d' % (host_no, client_no, level, begin_idx)
suffix = ''
for ranges in range(0, pow(opts.inodes_per_level, level)):
if len(suffix) != 0:
suffix = suffix + ','
suffix = suffix + '%d-%d'%(begin_idx, begin_idx + opts.inodes_per_level - 1)
begin_idx = begin_idx + opts.inodes_per_level
outfile.write('%s\t%s\n' % (prefix, suffix))
begin_tree_idx = begin_tree_idx + begin_tree_delta
#print "next begin tree idx = ", begin_tree_idx
"""
outfile.close()
print '==> Created planfile: %s' % opts.output_file
print 'copying file %s to all client hosts' % opts.output_file
for client in hostlist:
p = subprocess.Popen(['/usr/bin/scp', os.path.abspath(opts.output_file), '%s:%s' % (client, opts.output_file)])
while 1:
ret = p.poll()
if ret == None:
time.sleep(0.5)
else:
print 'transfered %s to %s' % (opts.output_file, client)
break
if __name__ == '__main__':
main()
| 37.170588 | 115 | 0.627947 | [
"Apache-2.0"
] | chanwit/qfs | benchmarks/mstress/mstress_plan.py | 6,319 | Python |
"""
Support for Syslog-based networking devices.
For now, support is limited to hostapd and dnsmasq.
Example syslog lines:
<30>Dec 31 13:03:21 router hostapd: wlan1: STA a4:77:33:e3:17:7c WPA: group key handshake completed (RSN)
<29>Dec 31 13:05:15 router hostapd: wlan0: AP-STA-CONNECTED 64:20:0c:37:52:82
<30>Dec 31 13:15:22 router hostapd: wlan0: STA 64:20:0c:37:52:82 IEEE 802.11: disassociated
<30>Dec 31 13:15:23 router hostapd: wlan0: STA 64:20:0c:37:52:82 IEEE 802.11: deauthenticated due to inactivity (timer DEAUTH/REMOVE)
<29>Dec 31 13:20:15 router hostapd: wlan0: AP-STA-CONNECTED 64:20:0c:37:52:82
<30>Dec 31 13:02:33 router dnsmasq-dhcp[1601]: DHCPACK(br-lan) 192.168.0.101 f4:6d:04:ae:ac:d7 leon-pc
"""
from asyncio import coroutine
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA, SOURCE_TYPE_ROUTER
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_DEVICES
from homeassistant.helpers.event import async_track_time_interval
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
#vol.Optional(CONF_WHITELIST): cv.string, # ACL
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=514): cv.port,
# mac => name
vol.Required(CONF_DEVICES): {cv.string: cv.string},
# TODO: TCP vs UDP
# TODO: periodically ARP ping wired devices
})
Event = namedtuple('Event', 'mac kind is_sta reason')
STA_EVENTS = {
'WPA: group key handshake completed': 'home',
'WPA: pairwise key handshake completed': 'home',
'deauthenticated due to local deauth request': 'not_home',
'IEEE 802.11: disconnected due to excessive missing ACKs': 'timeout',
'IEEE 802.11: disassociated due to inactivity': 'timeout',
'IEEE 802.11: deauthenticated due to inactivity': 'timeout',
# Ignored, should be covered by AP-STA-*
'IEEE 802.11: associated': '',
'IEEE 802.11: authenticated': '',
'IEEE 802.11: disassociated': '',
}
def _skip_date_tokens(tokens):
"""
Based on RFC 3164 + RFC 5424 and real-world logs
"""
if tokens and tokens[0].startswith('<'):
tokens.pop(0)
while tokens and (not tokens[0] or tokens[0][:1].isdigit()):
tokens.pop(0)
def _find_process(tokens):
while tokens:
token = tokens.pop(0)
if token.endswith(':'):
c = token.find('[')
if c > -1:
return token[:c]
return token[:-1]
def _remove_param(tokens):
i = len(tokens) - 1
while i > 0:
if tokens[i].startswith('('):
return tokens[:i]
i -= 1
return tokens
def parse_syslog_line(line):
"""Parses lines created by hostapd and dnsmasq DHCP"""
tokens = line.split(' ')
_skip_date_tokens(tokens)
process = _find_process(tokens)
if not process or not tokens:
_LOGGER.debug('Unable to process line: %r', line)
return
if process == 'hostapd':
# <iface>: AP-STA-<event>: <mac>
if len(tokens) == 3:
if tokens[1] == 'AP-STA-CONNECTED':
return Event(tokens[2], 'home', True, tokens[1])
elif tokens[1] == 'AP-STA-DISCONNECTED':
# Disconnected, but we might get the real reason later
return Event(tokens[2], 'timeout', True, tokens[1])
elif len(tokens) > 4 and tokens[1] == 'STA':
# <iface>: STA <mac> WPA: <...>
# <iface>: STA <mac> IEEE 802.11: <...>
suffix = ' '.join(_remove_param(tokens[3:]))
for consider, status in STA_EVENTS.items():
if suffix.endswith(consider):
if status == '':
return
return Event(tokens[2], status, True, suffix)
_LOGGER.warning('Unhandled line: %r', line)
elif process == 'dnsmasq-dhcp':
if len(tokens) >= 3:
# <event>(<iface> <ip> <mac> <name>
if tokens[0].startswith('DHCPACK('):
return Event(tokens[2], 'home', False, tokens[0])
class SyslogScanner:
def __init__(self, hass, async_see, devices):
self.hass = hass
self.devices = devices
self.wireless_devices = set()
self.async_see = async_see
# TODO: consider marking all devices as offline after start
self.debug_marked = {}
#async_track_time_interval(hass, self.scan_online_devices,
# timedelta(minutes=1))
@coroutine
def scan_online_devices(self, now=None):
_LOGGER.info('Check online devices')
for mac, name in self.devices.items():
if mac in self.wireless_devices:
continue
_LOGGER.info('Check %r', mac)
def process_line(self, line):
event = parse_syslog_line(line.rstrip('\n'))
if not event:
return
_LOGGER.info('%r', event)
mac = event.mac.replace(':', '')
if event.is_sta:
self.wireless_devices.add(mac)
device = self.devices.get(mac)
if not device:
# Automatic tracking
device = self.devices[mac] = mac
consider_home = None
state = event.kind
if event.kind == 'timeout':
state = 'not_home'
# TODO: this feature has not been added yet
consider_home = timedelta(minutes=5)
if self.debug_marked.get(device) != state:
_LOGGER.info('Mark %r as %r [%s]', device, state, consider_home)
self.debug_marked[device] = state
self.hass.async_add_job(self.async_see(dev_id=device,
source_type=SOURCE_TYPE_ROUTER,
mac=event.mac,
#consider_home=consider_home,
location_name=state))
class SyslogScannerUDP(SyslogScanner):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
message = data.decode('utf8', 'replace')
self.process_line(message)
@coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
bind = (config[CONF_HOST], config[CONF_PORT])
_LOGGER.info('Listening on %s:%s', bind[0], bind[1])
proto = lambda: SyslogScannerUDP(hass, async_see, config[CONF_DEVICES])
listen = hass.loop.create_datagram_endpoint(proto, local_addr=bind)
hass.async_add_job(listen)
return True
| 35.089005 | 137 | 0.609669 | [
"MIT"
] | Bun/ha-syslog-devtracker | syslog.py | 6,702 | Python |
import xlrd
import os
import sys
import copy
import json
import codecs
from collections import OrderedDict
# Constant Values
PARENT_NAME_ROW = 0
PARENT_NAME_COL = 0
COLUMN_NAMES_ROW = 1
DATA_STARTING_ROW = 2
ROOT_NAME = '*root'
ID_COLUMN_NAME = 'id'
PARENT_COLUMN_NAME = '*parent'
IGNORE_WILDCARD = '_'
REQUIRE_VERSION = (3, 5)
EXCEL_PATH = './excel/'
JSON_PATH = '../../asset/json/'
# Class
class TypeUtility:
# xlrd is giving number as float
@staticmethod
def check_integer(value):
return type(value) == float and int(value) == value
# xlrd is giving boolean as integer
@staticmethod
def check_boolean(value):
return type(value) == int
@staticmethod
def convert_value(value):
if TypeUtility.check_integer(value):
return int(value)
elif TypeUtility.check_boolean(value):
return bool(value)
else:
return value
class Table:
def __init__(self, sheet):
self.init_name(sheet)
self.init_parent_name(sheet)
self.init_metadata(sheet)
self.init_descriptors(sheet)
self.init_id_index_map()
def init_name(self, sheet):
self.name = sheet.name
def init_parent_name(self, sheet):
row = sheet.row_values(PARENT_NAME_ROW)
self.parent_name = row[PARENT_NAME_COL]
if type(self.parent_name) is not str:
raise Exception('[' + self.name + ']' + 'Parent name is not string')
sys.exit()
self.is_root = self.parent_name == ROOT_NAME
def init_metadata(self, sheet):
row = sheet.row_values(COLUMN_NAMES_ROW)
self.is_parent = False
self.is_child = False
self.column_names = []
for value in row:
if type(value) is not str:
raise Exception('[' + self.name + ']' + 'Column name is not string')
sys.exit()
if value == ID_COLUMN_NAME:
self.is_parent = True
if value == PARENT_COLUMN_NAME:
self.is_child = True
self.column_names.append(value)
if self.is_root and self.is_child:
raise Exception('[' + self.name + ']' + 'Root table must not have a "' + PARENT_COLUMN_NAME + '" column')
sys.exit()
if not self.is_root and not self.is_child:
raise Exception('[' + self.name + ']' + 'Child table must have a "' + PARENT_COLUMN_NAME + '" column')
sys.exit()
def init_descriptors(self, sheet):
self.descriptors = []
id_table = []
for i in range(DATA_STARTING_ROW, sheet.nrows):
#add metadata row count
rowcount = i + 1
col = sheet.row_values(i)
desc = self.get_descriptor(col)
if self.is_parent:
id = desc[ID_COLUMN_NAME]
if not id:
raise Exception('[' + self.name + ']' + 'Descriptor id must have a value - row : ' + str(i + 1))
sys.exit()
if id in id_table:
raise Exception('[' + self.name + ']' + 'Descriptor id is duplicated - row : ' + str(i + 1))
sys.exit()
id_table.append(id)
self.descriptors.append(desc)
def get_descriptor(self, col):
descriptor = OrderedDict()
for i in range(0, len(col)):
key = self.column_names[i]
if key[0] == IGNORE_WILDCARD:
continue
descriptor[key] = TypeUtility.convert_value(col[i])
return descriptor
def init_id_index_map(self):
if not self.is_parent:
return
self.id_index_map = {}
for descriptor in self.descriptors:
id = descriptor[ID_COLUMN_NAME]
self.id_index_map[id] = self.descriptors.index(descriptor)
def merge_child_table(self, table):
self.add_child_descriptor_list(table.name)
for descriptor in table.descriptors:
parent_id = descriptor[PARENT_COLUMN_NAME]
parent_idx = self.id_index_map[parent_id]
parent_descriptor = self.descriptors[parent_idx]
parent_descriptor[table.name].append(descriptor)
def add_child_descriptor_list(self, name):
for descriptor in self.descriptors:
descriptor[name] = []
def remove_parent_column(self):
for descriptor in self.descriptors:
del descriptor[PARENT_COLUMN_NAME]
def save_to_json(self, pretty_print, export_path):
if pretty_print:
string = json.dumps(self.descriptors, ensure_ascii=False, indent=4)
else:
string = json.dumps(self.descriptors, ensure_ascii=False)
with codecs.open(export_path + self.name + '.json', 'w', 'utf-8') as f:
f.write(string)
class Converter:
def __init__(self, pretty_print, export_path):
self.pretty_print = pretty_print
self.export_path = export_path
def convert(self, filename):
print(filename + ' convert starting...')
sheets = Converter.get_sheets(filename)
root_table, tables = Converter.get_tables(sheets)
Converter.post_process(tables)
root_table.save_to_json(self.pretty_print, self.export_path)
print(filename + ' convert is Done\n')
@staticmethod
def get_sheets(filename):
path = os.path.abspath(filename)
workbook = xlrd.open_workbook(path)
return workbook.sheets()
@staticmethod
def get_tables(sheets):
tables = {}
root_tables = []
for sheet in sheets:
if sheet.name[0] == IGNORE_WILDCARD:
continue
table = Table(sheet)
tables[table.name] = table
if table.is_root:
root_tables.append(table)
if len(root_tables) == 1:
return root_tables[0], tables
else:
raise Exception('Root table must be one')
sys.exit()
@staticmethod
def post_process(tables):
for name, table in tables.items():
if table.is_root:
continue
parent_table = tables[table.parent_name]
if not parent_table.is_parent:
raise Exception('Parent table must have a id column')
sys.exit()
parent_table.merge_child_table(table)
table.remove_parent_column()
# Script
current_version = sys.version_info
if current_version < REQUIRE_VERSION:
raise Exception('[eeror]You Need Python 3.5 or later')
sys.exit()
json_path = sys.argv[1] if len(sys.argv) > 1 else './'
converter = Converter(True, JSON_PATH + json_path)
for path, dirs, files in os.walk(EXCEL_PATH):
for file in files:
if file[0] is "~":
continue
if os.path.splitext(file)[1].lower() == '.xlsx':
converter.convert(EXCEL_PATH + file)
| 30.017094 | 117 | 0.589408 | [
"CC0-1.0"
] | mousedoc/Prism | third-party/language/generator.py | 7,024 | Python |
import torch.nn as nn
import torch.nn.functional as F
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import force_fp32
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
from .bbox_head import BBoxHead
from mmdet.core import multi_apply, multiclass_nms
from mmdet.core.bbox.iou_calculators.builder import build_iou_calculator
@HEADS.register_module()
class ConvFCBBoxHead(BBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
.. code-block:: none
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
(\-> dis convs -> dis fcs -> dis)
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
with_dis=False, #for leaves
num_dis_convs=0,
num_dis_fcs=0,
*args,
**kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
#only for leaves
self.with_dis = with_dis
self.num_dis_convs = num_dis_convs
self.num_dis_fcs = num_dis_fcs
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
if not self.with_dis:
assert num_dis_convs == 0 and num_dis_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
#add dis branch(only for leaves)
if self.with_dis:
self.dis_convs, self.dis_fcs, self.dis_last_dim = \
self._add_conv_fc_branch(
self.num_dis_convs, self.num_dis_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
if self.with_dis:
if self.dis_selector == 0 or self.dis_selector == 1:
self.fc_dis = nn.Linear(self.cls_last_dim, 1)
elif self.dis_selector == 2:
self.fc_dis = nn.Linear(self.cls_last_dim, 4)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch.
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
# conv layers are already initialized by ConvModule
if self.with_dis:
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs, self.dis_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
else:
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
if self.with_dis:
x_dis = x
for conv in self.dis_convs:
x_dis = conv(x_dis)
if x_dis.dim() > 2:
if self.with_avg_pool:
x_dis = self.avg_pool(x_dis)
x_dis = x_dis.flatten(1)
for fc in self.dis_fcs:
x_dis = self.relu(fc(x_dis))
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
dis_pred = self.fc_dis(x_dis) if self.with_dis else None
return cls_score, bbox_pred, dis_pred
@HEADS.register_module()
class Shared2FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared2FCBBoxHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
@HEADS.register_module()
class Shared2FCBBoxHeadLeaves(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
loss_dis = kwargs['loss_dis']
self.reference_labels = kwargs['reference_labels']
self.classes = kwargs['classes']
self.dis_selector = kwargs['dis_selector']
assert self.dis_selector in (0, 1, 2)
kwargs.pop('loss_dis')
kwargs.pop('reference_labels')
kwargs.pop('classes')
kwargs.pop('dis_selector')
super(Shared2FCBBoxHeadLeaves, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
with_dis=True, #only for leaves
num_dis_convs=0,
num_dis_fcs=0,
*args,
**kwargs)
if self.dis_selector == 0 or self.dis_selector == 1:
assert loss_dis['use_sigmoid'], "used invalid loss_dis"
elif self.dis_selector == 2:
assert not loss_dis['use_sigmoid'], "used invalid loss_dis"
self.loss_dis = build_loss(loss_dis)
#DEBUG
#loss_dis_py =dict(type='py_FocalLoss',
# alpha=torch.tensor(self.dis_weights, device=torch.device('cpu')),
# gamma = 2.0,
# reduction = 'mean')
#self.loss_dis_py = build_loss(loss_dis_py)
#Override
def get_targets(self,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
reference_labels,
classes,
concat=True):
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_target_single` function.
Args:
sampling_results (List[obj:SamplingResults]): Assign results of
all images in a batch after sampling.
gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
each tensor has shape (num_gt, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
gt_labels (list[Tensor]): Gt_labels of all images in a batch,
each tensor has shape (num_gt,).
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all
proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target
for all proposals in a batch, each tensor in list
has shape (num_proposals, 4) when `concat=False`,
otherwise just a single tensor has shape
(num_all_proposals, 4), the last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
- dis_targets (list[tensor], Tensor): Gt_dis for all
proposal in a batch, each tensor in list has
shape (num_proposal,) when 'concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
"""
pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
self._get_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
#processing for dis_target
iou_calculator=dict(type='BboxOverlaps2D')
iou_calculator = build_iou_calculator(iou_calculator)
isolation_thr = 0.45 #TODO da mettere come arg
#retrive the gt_superclass bboxes
dis_targets = []
for i, res in enumerate(sampling_results):
ref_grap_list =[]
ref_leav_list =[]
ref_grap_dis_list =[]
ref_leav_dis_list =[]
for j, bbox in enumerate(gt_bboxes[i]):
if self.dis_selector == 0:
if 'grappolo' in classes[gt_labels[i][j]] and gt_labels[i][j] != reference_labels['grappolo_vite']:
ref_grap_dis_list.append(bbox)
elif (('foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio')
and gt_labels[i][j] != reference_labels['foglia_vite']):
ref_leav_dis_list.append(bbox)
elif self.dis_selector == 1:
if gt_labels[i][j] == reference_labels['grappolo_vite']:
ref_grap_list.append(bbox)
elif gt_labels[i][j] == reference_labels['foglia_vite']:
ref_leav_list.append(bbox)
elif self.dis_selector == 2:
if gt_labels[i][j] == reference_labels['grappolo_vite']:
ref_grap_list.append(bbox)
elif gt_labels[i][j] == reference_labels['foglia_vite']:
ref_leav_list.append(bbox)
elif 'grappolo' in classes[gt_labels[i][j]]:
ref_grap_dis_list.append(bbox)
elif 'foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio':
ref_leav_dis_list.append(bbox)
'''
if 'grappolo' in classes[gt_labels[i][j]] and gt_labels[i][j] != reference_labels['grappolo_vite']:
ref_grap_dis_list.append(bbox)
elif (('foglia' in classes[gt_labels[i][j]] or classes[gt_labels[i][j]] == 'malattia_esca'\
or classes[gt_labels[i][j]] == 'virosi_pinot_grigio')
and gt_labels[i][j] != reference_labels['foglia_vite']):
ref_leav_dis_list.append(bbox)
'''
if len(ref_grap_list) > 0:
ref_grap_tensor = torch.cat(ref_grap_list)
ref_grap_tensor = torch.reshape(ref_grap_tensor, (len(ref_grap_list), 4))
if len(ref_leav_list) > 0:
ref_leav_tensor = torch.cat(ref_leav_list)
ref_leav_tensor = torch.reshape(ref_leav_tensor, (len(ref_leav_list), 4))
if len(ref_grap_dis_list) > 0:
ref_grap_dis_tensor = torch.cat(ref_grap_dis_list)
ref_grap_dis_tensor = torch.reshape(ref_grap_dis_tensor, (len(ref_grap_dis_list), 4))
if len(ref_leav_dis_list) > 0:
ref_leav_dis_tensor = torch.cat(ref_leav_dis_list)
ref_leav_dis_tensor = torch.reshape(ref_leav_dis_tensor, (len(ref_leav_dis_list), 4))
num_pos = res.pos_bboxes.size(0)
num_neg = res.neg_bboxes.size(0)
num_samples = num_pos + num_neg
dis_tensor= res.pos_bboxes.new_full((num_samples, ), -1, dtype=torch.long)
dis_list = []
for j, bbox in enumerate(res.pos_bboxes):
#trick for using the iof calculator
bbox = bbox.unsqueeze(0)
if res.pos_gt_labels[j] == reference_labels['grappolo_vite']:
if self.dis_selector == 0:
dis_list.append(-1) #the grape is not considered
elif self.dis_selector == 1 or self.dis_selector == 2:
if len(ref_grap_dis_list) > 0:
overlaps = iou_calculator(ref_grap_dis_tensor, bbox, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the grape is healthy
else:
dis_list.append(1) #the grape is affected by a disease
else:
dis_list.append(0) #the grape is healthy
elif res.pos_gt_labels[j] == reference_labels['foglia_vite']:
if self.dis_selector == 0:
dis_list.append(-1) #the leaf is not considered
elif self.dis_selector == 1 or self.dis_selector == 2:
if len(ref_leav_dis_list) > 0:
overlaps = iou_calculator(ref_leav_dis_tensor, bbox, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the leaf is healthy
else:
dis_list.append(1) #the leaf is affected by a disease
else:
dis_list.append(0) #the leaf is healthy
elif 'grappolo' in classes[res.pos_gt_labels[j]] and res.pos_gt_labels[j] != reference_labels['grappolo_vite']:
if self.dis_selector == 1:
dis_list.append(-1) #the disease is not considered
elif self.dis_selector == 0:
if len(ref_grap_list) > 0:
overlaps = iou_calculator(bbox, ref_grap_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the disease is isolated
else:
dis_list.append(1) #the disease is inside a leaf or grape
else:
dis_list.append(0) #the disease is isolated
elif self.dis_selector == 2:
if len(ref_grap_list) > 0:
overlaps = iou_calculator(bbox, ref_grap_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(2) #the disease is isolated
else:
dis_list.append(3) #the disease is inside a leaf or grape
else:
dis_list.append(2) #the disease is isolated
elif (('foglia' in classes[res.pos_gt_labels[j]] or classes[res.pos_gt_labels[j]] == 'malattia_esca'
or classes[res.pos_gt_labels[j]] == 'virosi_pinot_grigio')
and res.pos_gt_labels[j] != reference_labels['foglia_vite']):
if self.dis_selector == 1:
dis_list.append(-1) #the disease is not considered
elif self.dis_selector == 0:
if len(ref_leav_list) > 0:
overlaps = iou_calculator(bbox, ref_leav_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(0) #the disease is isolated
else:
dis_list.append(1) #the disease is inside a leaf or grape
else:
dis_list.append(0) #the disease is isolated
elif self.dis_selector == 2:
if len(ref_leav_list) > 0:
overlaps = iou_calculator(bbox, ref_leav_tensor, mode='iof')
overlaps = overlaps < isolation_thr
if overlaps.all():
dis_list.append(2) #the disease is isolated
else:
dis_list.append(3) #the disease is inside a leaf or grape
else:
dis_list.append(2) #the disease is isolated
#elif res.pos_gt_labels[j] == reference_labels['oidio_tralci']:
# dis_list.append(-1) #the disease is not considered
dis_tensor[:num_pos] = torch.tensor(dis_list)
dis_targets.append(dis_tensor)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
dis_targets = torch.cat(dis_targets, 0)
#del dis_tensor
#torch.cuda.empty_cache()
return labels, label_weights, bbox_targets, bbox_weights, dis_targets
#Override
@force_fp32(apply_to=('cls_score', 'bbox_pred', 'dis_pred'))
def loss(self,
cls_score,
bbox_pred,
dis_pred,
rois,
labels,
label_weights,
bbox_targets,
bbox_weights,
dis_targets,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
if cls_score.numel() > 0:
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bg_class_ind = self.num_classes
# 0~self.num_classes-1 are FG, self.num_classes is BG
pos_inds = (labels >= 0) & (labels < bg_class_ind)
# do not perform bounding box regression for BG anymore.
if pos_inds.any():
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`,
# `GIouLoss`, `DIouLoss`) is applied directly on
# the decoded bounding boxes, it decodes the
# already encoded coordinates to absolute format.
bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
if self.reg_class_agnostic:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
else:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), -1,
4)[pos_inds.type(torch.bool),
labels[pos_inds.type(torch.bool)]]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred,
bbox_targets[pos_inds.type(torch.bool)],
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=bbox_targets.size(0),
reduction_override=reduction_override)
else:
losses['loss_bbox'] = bbox_pred[pos_inds].sum()
if dis_pred is not None:
pos_inds = dis_targets != -1
if pos_inds.any():
pos_dis_pred = dis_pred[pos_inds.type(torch.bool)]
pos_dis_targets = dis_targets[pos_inds.type(torch.bool)]
avg_factor = dis_pred.size(0)
losses['loss_dis'] = self.loss_dis(
pos_dis_pred,
pos_dis_targets,
avg_factor=avg_factor,
reduction_override=reduction_override)
#DEBUG
#loss_py = self.loss_dis_py(pos_dis_pred,
# pos_dis_targets)
#from mmcv.utils import print_log
#import logging
#logger = logging.getLogger(__name__)
#print_log("loss_dis:{:0.4f}, loss_dis_py:{:0.4f}".format(losses['loss_dis'], loss_py), logger = logger)
return losses
#Override
@force_fp32(apply_to=('cls_score', 'bbox_pred', 'dis_pred'))
def get_bboxes(self,
rois,
cls_score,
bbox_pred,
dis_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes = self.bbox_coder.decode(
rois[:, 1:], bbox_pred, max_shape=img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])
if rescale and bboxes.size(0) > 0:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = (bboxes.view(bboxes.size(0), -1, 4) /
scale_factor).view(bboxes.size()[0], -1)
if dis_pred is not None:
if self.dis_selector == 0 or self.dis_selector == 1:
diseases = F.sigmoid(dis_pred)
elif self.dis_selector == 2:
diseases = F.softmax(dis_pred, dim=1)
if cfg is None:
return bboxes, scores, diseases
else:
det_bboxes, det_labels, inds = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img,
return_inds=True)
if self.dis_selector == 0 or self.dis_selector == 1:
diseases = diseases.expand(bboxes.size(0), scores.size(1) - 1)
diseases = diseases.reshape(-1)
elif self.dis_selector == 2:
diseases = diseases[:, None].expand(bboxes.size(0), scores.size(1) - 1, 4)
diseases = diseases.reshape(-1, 4)
det_dis = diseases[inds]
return det_bboxes, det_labels, det_dis
@HEADS.register_module()
class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared4Conv1FCBBoxHead, self).__init__(
num_shared_convs=4,
num_shared_fcs=1,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| 45.200603 | 154 | 0.521523 | [
"Apache-2.0"
] | marcovalenti/mmdetection | mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py | 29,968 | Python |
#! /usr/bin/env python3
""" example module: extra.good.best.tau """
def FunT():
return "Tau"
if __name__ == "__main__":
print("I prefer to be a module") | 17.444444 | 43 | 0.649682 | [
"MIT"
] | tomasfriz/Curso-de-Cisco | Curso de Cisco/Actividades/py/packages/extra/good/best/tau.py | 157 | Python |
#!/usr/bin/env python3
import os
import requests
os.system("clear")
print("""
██ ██ █████ ██ ██ ██ ██ ██ ██
██ ██ ██ ██ ██ ██ ██ ██ ██ ██
██ █ ██ ███████ ██ ██ ██ ██ ███
██ ███ ██ ██ ██ ██ ██ ██ ██ ██ ██
███ ███ ██ ██ ███████ ███████ ██████ ██ ██
""")
print("[INFO] Initializing...\n")
baseurl = "https://raw.githubusercontent.com/Wallux-0/Wallpapers/main/"
req = requests.get(
"https://raw.githubusercontent.com/Wallux-0/Wallux/main/static/tags.json")
if req:
content = eval(req.content)
content = content['wallpaper']
else:
print("[ERROR] Please connect to internet and try again.")
print("""Hello! Wallux is a wallpaper library hosted on Github.
Please visit https://wallux-0.github.io/Wallux/ to choose a wallpaper and enter its Wallux ID here.
Wallux ID:""")
try:
walluxid = int(input())
except:
print("[ERROR] Not a valid Wallux ID.")
exit()
for w in content:
if str(walluxid) == ''.join([n for n in w['path'] if n.isdigit()]):
print("[INFO] Downloading your new wallpaper...")
req = requests.get(baseurl+w['path'], stream=True)
if req:
img = req.raw.read()
path = os.path.expanduser(
"~/Documents/"+w['path'].lstrip("wallpapers/").strip())
with open(path, 'wb') as f:
f.write(img)
print("[INFO] Image Downloaded")
else:
print("[ERROR] Please connect to an internet connection.")
break
os.system("""echo $(ps -e | grep -E -i "xfce|kde|gnome") > /tmp/wallux.file""")
parseStr = ''
with open("/tmp/wallux.file") as f:
parseStr = f.read()
os.remove("/tmp/wallux.file")
de = {}
de['kde'] = parseStr.lower().count("kde")
de['gnome'] = parseStr.lower().count('gnome')
de['xfce'] = parseStr.lower().count('xfce')
if max(de, key=de.get) == "gnome":
os.system(
"gsettings set org.gnome.desktop.background picture-uri file://{}".format(path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
elif max(de, key=de.get) == "kde":
import dbus
plugin = 'org.kde.image'
jscript = """
var allDesktops = desktops();
print (allDesktops);
for (i=0;i<allDesktops.length;i++) {
d = allDesktops[i];
d.wallpaperPlugin = "%s";
d.currentConfigGroup = Array("Wallpaper", "%s", "General");
d.writeConfig("Image", "file://%s")
}
"""
bus = dbus.SessionBus()
plasma = dbus.Interface(bus.get_object(
'org.kde.plasmashell', '/PlasmaShell'), dbus_interface='org.kde.PlasmaShell')
plasma.evaluateScript(jscript % (plugin, plugin, path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
elif max(de, key=de.get) == "xfce":
"""
To find out what property is changed when the backgound changes, run the following command in a terminal window:
xfconf-query -c xfce4-desktop -m
...and then change the background using the Settings Manager > Desktop.
The command monitors channel xfce4-desktop for changes. It will tell which property on channel xfce4-desktop is changed.
Then the command to change that property would be like this
xfconf-query -c xfce4-desktop -p insert_property_here -s path/image
"""
os.system("xfconf-query --channel xfce4-desktop --property /backdrop/screen0/monitoreDP-1/workspace0/last-image --set {}".format(path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
else:
print("[ERROR] Oops. Your desktop enviroinment is not supported at the moment. But I saved the wallpaper to your Documents folder. Enjoy!")
| 40.086957 | 143 | 0.588124 | [
"MIT"
] | Manoj-Paramsetti/Wallux | wallux.py | 3,930 | Python |
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
from decisionengine.framework.modules.Publisher import Publisher
def test_publisher_structure():
"""
The module.publisher itself is a bit of a skeleton...
"""
params = {"1": 1, "2": 2, "channel_name": "test"}
test_publisher = Publisher(params)
assert test_publisher.get_parameters() == {"1": 1, "2": 2, "channel_name": "test"}
test_publisher.set_data_block("example")
assert test_publisher.get_data_block() == "example"
assert test_publisher._consumes == {}
test_publisher.publish()
test_publisher.publish(data_block="asdf")
test_publisher.shutdown()
| 31.909091 | 86 | 0.706553 | [
"Apache-2.0"
] | BrunoCoimbra/decisionengine | src/decisionengine/framework/modules/tests/test_Publisher.py | 702 | Python |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF model input pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# pylint: disable=g-bad-import-order
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from utils.recommendation import constants as rconst
from utils.recommendation import movielens
from utils.recommendation import data_pipeline
NUM_SHARDS = 16
def create_dataset_from_tf_record_files(input_file_pattern,
pre_batch_size,
batch_size,
is_training=True):
"""Creates dataset from (tf)records files for training/evaluation."""
files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training)
def make_dataset(files_dataset, shard_index):
"""Returns dataset for sharded tf record files."""
if pre_batch_size != batch_size:
raise ValueError("Pre-batch ({}) size is not equal to batch "
"size ({})".format(pre_batch_size, batch_size))
files_dataset = files_dataset.shard(NUM_SHARDS, shard_index)
dataset = files_dataset.interleave(tf.data.TFRecordDataset)
decode_fn = functools.partial(
data_pipeline.DatasetManager.deserialize,
batch_size=pre_batch_size,
is_training=is_training)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
dataset = tf.data.Dataset.range(NUM_SHARDS)
map_fn = functools.partial(make_dataset, files)
dataset = dataset.interleave(
map_fn,
cycle_length=NUM_SHARDS,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_dataset_from_data_producer(producer, params):
"""Return dataset online-generating data."""
def preprocess_train_input(features, labels):
"""Pre-process the training data.
This is needed because
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for DUPLICATE_MASK in training data.
Args:
features: Dictionary of features for training.
labels: Training labels.
Returns:
Processed training features.
"""
fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN])
features[rconst.DUPLICATE_MASK] = fake_dup_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
train_input_fn = producer.make_input_fn(is_training=True)
train_input_dataset = train_input_fn(params).map(preprocess_train_input)
def preprocess_eval_input(features):
"""Pre-process the eval data.
This is needed because:
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for VALID_PT_MASK in eval data.
Args:
features: Dictionary of features for evaluation.
Returns:
Processed evaluation features.
"""
labels = tf.cast(tf.zeros_like(
features[movielens.USER_COLUMN]), tf.bool)
fake_valid_pt_mask = tf.cast(
tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
eval_input_fn = producer.make_input_fn(is_training=False)
eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input)
return train_input_dataset, eval_input_dataset
def create_ncf_input_data(params,
producer=None,
input_meta_data=None,
strategy=None):
"""Creates NCF training/evaluation dataset.
Args:
params: Dictionary containing parameters for train/evaluation data.
producer: Instance of BaseDataConstructor that generates data online. Must
not be None when params['train_dataset_path'] or
params['eval_dataset_path'] is not specified.
input_meta_data: A dictionary of input metadata to be used when reading data
from tf record files. Must be specified when params["train_input_dataset"]
is specified.
strategy: Distribution strategy used for distributed training. If specified,
used to assert that evaluation batch size is correctly a multiple of
total number of devices used.
Returns:
(training dataset, evaluation dataset, train steps per epoch,
eval steps per epoch)
Raises:
ValueError: If data is being generated online for when using TPU's.
"""
# NCF evaluation metric calculation logic assumes that evaluation data
# sample size are in multiples of (1 + number of negative samples in
# evaluation) for each device. As so, evaluation batch size must be a
# multiple of (number of replicas * (1 + number of negative samples)).
num_devices = strategy.num_replicas_in_sync if strategy else 1
if (params["eval_batch_size"] % (num_devices *
(1 + rconst.NUM_EVAL_NEGATIVES))):
raise ValueError("Evaluation batch size must be divisible by {} "
"times {}".format(num_devices,
(1 + rconst.NUM_EVAL_NEGATIVES)))
if params["train_dataset_path"]:
assert params["eval_dataset_path"]
train_dataset = create_dataset_from_tf_record_files(
params["train_dataset_path"],
input_meta_data["train_prebatch_size"],
params["batch_size"],
is_training=True)
eval_dataset = create_dataset_from_tf_record_files(
params["eval_dataset_path"],
input_meta_data["eval_prebatch_size"],
params["eval_batch_size"],
is_training=False)
num_train_steps = int(input_meta_data["num_train_steps"])
num_eval_steps = int(input_meta_data["num_eval_steps"])
else:
if params["use_tpu"]:
raise ValueError(
"TPU training does not support data producer yet. "
"Use pre-processed data.")
assert producer
# Start retrieving data from producer.
train_dataset, eval_dataset = create_dataset_from_data_producer(
producer, params)
num_train_steps = producer.train_batches_per_epoch
num_eval_steps = producer.eval_batches_per_epoch
return train_dataset, eval_dataset, num_train_steps, num_eval_steps
| 39.84492 | 82 | 0.668232 | [
"Apache-2.0"
] | Ezra-H/autodist | examples/benchmark/utils/recommendation/ncf_input_pipeline.py | 7,451 | Python |
# coding=utf-8
"""
A utility module for working with playbooks in the `origin-ci-tool` repository.
"""
from __future__ import absolute_import, division, print_function
from os.path import abspath, dirname, exists, join
from click import ClickException
def playbook_path(playbook_name):
"""
Get the path to the named playbook. To allow for
as much brevity as possible in the given playbook
name, we will attempt to search under:
- oct/playbooks
- openshift-ansible/playbooks
:param playbook_name: the name of the playbook
:type playbook_name: str
:return: the path to the playbook
:rtype: str
:raises ClickException: when no playbook is found
"""
from ..oct import __file__ as root_path
for parent_repo in ['oct', 'openshift-ansible']:
playbook_file = join(abspath(dirname(root_path)), 'ansible', parent_repo, 'playbooks', playbook_name + '.yml')
if exists(playbook_file):
return playbook_file
raise ClickException('No playbook named {} found!'.format(playbook_name))
| 29.666667 | 118 | 0.707865 | [
"Apache-2.0"
] | DennisPeriquet/origin-ci-tool | oct/util/playbook.py | 1,068 | Python |
import os
import json
CONFIG_FILE_PATH = os.path.expanduser("~/.coinbase-indicator")
GENERAL_OPTION_KEY = 'general'
OPTION_KEY_LARGE_LABEL = 'show_crypto_currency_in_the_label'
OPTION_KEY_NOTIFICATION = 'show_notifications'
OPTION_KEY_THEME_MONOCHROME = 'theme_monochrome'
CRYPTO_CURRENCY_OPTION_KEY = 'crypto_currency'
OPTION_KEY_CRYPTO_CURRENCY_SHOW = 'show_exchange_price'
class Option(object):
def __init__(self, status, label):
self.status = status
self.label = label
def get_label(self):
return self.label
def get_status(self):
return self.status
def set_status(self, status):
self.status = status
class Config(object):
def __init__(self):
self.general_options = {
OPTION_KEY_LARGE_LABEL: Option(False, self.__get_label(OPTION_KEY_LARGE_LABEL)),
OPTION_KEY_NOTIFICATION: Option(True, self.__get_label(OPTION_KEY_NOTIFICATION)),
OPTION_KEY_THEME_MONOCHROME: Option(True, self.__get_label(OPTION_KEY_THEME_MONOCHROME)),
}
self.crypto_currency_options = {}
def set_crypto_currencies_options(self, crypto_currencies):
for crypto_currency in crypto_currencies:
if crypto_currency not in self.crypto_currency_options:
self.crypto_currency_options[crypto_currency] = {
OPTION_KEY_CRYPTO_CURRENCY_SHOW: Option(False, self.__get_label(OPTION_KEY_CRYPTO_CURRENCY_SHOW)),
}
def load(self):
if not os.path.isfile(CONFIG_FILE_PATH):
return
with open(CONFIG_FILE_PATH, 'r') as config_file:
config_dict = json.load(config_file)
if GENERAL_OPTION_KEY in config_dict:
for option_key in config_dict[GENERAL_OPTION_KEY]:
self.general_options[option_key] = Option(config_dict[GENERAL_OPTION_KEY][option_key], self.__get_label(option_key))
if CRYPTO_CURRENCY_OPTION_KEY in config_dict:
for crypto_currency in config_dict[CRYPTO_CURRENCY_OPTION_KEY]:
if crypto_currency not in self.crypto_currency_options:
self.crypto_currency_options[crypto_currency] = {}
for option_key in config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency]:
self.crypto_currency_options[crypto_currency][option_key] = Option(config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency][option_key], self.__get_label(option_key))
def persist(self):
config_dict = {
GENERAL_OPTION_KEY: {},
CRYPTO_CURRENCY_OPTION_KEY: {},
}
for option_key in self.general_options:
config_dict[GENERAL_OPTION_KEY][option_key] = self.general_options[option_key].get_status()
for crypto_currency in self.crypto_currency_options:
config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency] = {}
for option_key in self.crypto_currency_options[crypto_currency]:
config_dict[CRYPTO_CURRENCY_OPTION_KEY][crypto_currency][option_key] = self.crypto_currency_options[crypto_currency][option_key].get_status()
with open(CONFIG_FILE_PATH, 'w') as config_file:
json.dump(config_dict, config_file)
def get_crypto_currency_options(self):
return self.crypto_currency_options
def get_general_options(self):
return self.general_options
def is_crypto_currency_visible(self, crypto_currency):
return \
crypto_currency in self.crypto_currency_options \
and OPTION_KEY_CRYPTO_CURRENCY_SHOW in self.crypto_currency_options[crypto_currency] \
and self.crypto_currency_options[crypto_currency][OPTION_KEY_CRYPTO_CURRENCY_SHOW].get_status()
def is_theme_monochrome(self):
return \
OPTION_KEY_THEME_MONOCHROME in self.general_options \
and self.general_options[OPTION_KEY_THEME_MONOCHROME].get_status()
def is_notification_visible(self):
return \
OPTION_KEY_NOTIFICATION in self.general_options \
and self.general_options[OPTION_KEY_NOTIFICATION].get_status()
def is_large_label_visible(self):
return \
OPTION_KEY_LARGE_LABEL in self.general_options \
and self.general_options[OPTION_KEY_LARGE_LABEL].get_status()
@staticmethod
def __get_label(key):
label = key.replace('_', ' ')
return label[:1].upper() + label[1:]
| 39.415929 | 186 | 0.705658 | [
"Apache-2.0"
] | amitkumarj441/Cryptocoin-Price-Indicator | indicator/config.py | 4,454 | Python |
# pylint: disable=redefined-outer-name
import pytest
from dagster.core.code_pointer import ModuleCodePointer
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.host_representation.grpc_server_registry import ProcessGrpcServerRegistry
from dagster.core.host_representation.handle import GrpcServerRepositoryLocationHandle
from dagster.core.host_representation.origin import (
ExternalPipelineOrigin,
ExternalRepositoryOrigin,
InProcessRepositoryLocationOrigin,
)
from dagster.core.storage.pipeline_run import IN_PROGRESS_RUN_STATUSES, PipelineRunStatus
from dagster.core.storage.tags import PRIORITY_TAG
from dagster.core.test_utils import create_run_for_test, instance_for_test
from dagster.daemon.run_coordinator.queued_run_coordinator_daemon import QueuedRunCoordinatorDaemon
from dagster_tests.api_tests.utils import get_foo_pipeline_handle
@pytest.fixture()
def instance():
overrides = {
"run_launcher": {"module": "dagster.core.test_utils", "class": "MockedRunLauncher"},
}
with instance_for_test(overrides=overrides) as inst:
yield inst
@pytest.fixture()
def grpc_server_registry(instance): # pylint: disable=unused-argument
with ProcessGrpcServerRegistry(wait_for_processes_on_exit=True) as registry:
yield registry
def create_run(instance, **kwargs):
with get_foo_pipeline_handle() as pipeline_handle:
create_run_for_test(
instance,
external_pipeline_origin=pipeline_handle.get_external_origin(),
pipeline_name="foo",
**kwargs,
)
def create_invalid_run(instance, **kwargs):
create_run_for_test(
instance,
external_pipeline_origin=ExternalPipelineOrigin(
ExternalRepositoryOrigin(
InProcessRepositoryLocationOrigin(
ReconstructableRepository(ModuleCodePointer("fake", "fake"))
),
"foo",
),
"wrong-pipeline",
),
pipeline_name="wrong-pipeline",
**kwargs,
)
def get_run_ids(runs_queue):
return [run.run_id for run in runs_queue]
def test_attempt_to_launch_runs_filter(instance, grpc_server_registry):
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="non-queued-run",
status=PipelineRunStatus.NOT_STARTED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["queued-run"]
def test_attempt_to_launch_runs_no_queued(instance, grpc_server_registry):
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.STARTED,
)
create_run(
instance,
run_id="non-queued-run",
status=PipelineRunStatus.NOT_STARTED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert instance.run_launcher.queue() == []
@pytest.mark.parametrize(
"num_in_progress_runs",
[0, 1, 3, 4, 5],
)
def test_get_queued_runs_max_runs(instance, num_in_progress_runs, grpc_server_registry):
max_runs = 4
# fill run store with ongoing runs
in_progress_run_ids = ["in_progress-run-{}".format(i) for i in range(num_in_progress_runs)]
for i, run_id in enumerate(in_progress_run_ids):
# get a selection of all in progress statuses
status = IN_PROGRESS_RUN_STATUSES[i % len(IN_PROGRESS_RUN_STATUSES)]
create_run(
instance,
run_id=run_id,
status=status,
)
# add more queued runs than should be launched
queued_run_ids = ["queued-run-{}".format(i) for i in range(max_runs + 1)]
for run_id in queued_run_ids:
create_run(
instance,
run_id=run_id,
status=PipelineRunStatus.QUEUED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=max_runs,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert len(instance.run_launcher.queue()) == max(0, max_runs - num_in_progress_runs)
def test_priority(instance, grpc_server_registry):
create_run(instance, run_id="default-pri-run", status=PipelineRunStatus.QUEUED)
create_run(
instance,
run_id="low-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "-1"},
)
create_run(
instance,
run_id="hi-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "3"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == [
"hi-pri-run",
"default-pri-run",
"low-pri-run",
]
def test_priority_on_malformed_tag(instance, grpc_server_registry):
create_run(
instance,
run_id="bad-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "foobar"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["bad-pri-run"]
def test_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="tiny-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="tiny-2",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="large-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "large"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[{"key": "database", "value": "tiny", "limit": 1}],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["tiny-1", "large-1"]
def test_multiple_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="run-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny", "user": "johann"},
)
create_run(
instance,
run_id="run-2",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="run-3",
status=PipelineRunStatus.QUEUED,
tags={"user": "johann"},
)
create_run(
instance,
run_id="run-4",
status=PipelineRunStatus.QUEUED,
tags={"user": "johann"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[
{"key": "database", "value": "tiny", "limit": 1},
{"key": "user", "value": "johann", "limit": 2},
],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]
def test_overlapping_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="run-1",
status=PipelineRunStatus.QUEUED,
tags={"foo": "bar"},
)
create_run(
instance,
run_id="run-2",
status=PipelineRunStatus.QUEUED,
tags={"foo": "bar"},
)
create_run(
instance,
run_id="run-3",
status=PipelineRunStatus.QUEUED,
tags={"foo": "other"},
)
create_run(
instance,
run_id="run-4",
status=PipelineRunStatus.QUEUED,
tags={"foo": "other"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[
{"key": "foo", "limit": 2},
{"key": "foo", "value": "bar", "limit": 1},
],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]
def test_location_handles_reused(instance, monkeypatch, grpc_server_registry):
"""
verifies that only one repository location is created when two queued runs from the same
location are dequeued in the same iteration
"""
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="queued-run-2",
status=PipelineRunStatus.QUEUED,
)
original_method = GrpcServerRepositoryLocationHandle.__init__
method_calls = []
def mocked_handle_init(
self,
origin,
host=None,
port=None,
socket=None,
server_id=None,
heartbeat=False,
watch_server=True,
):
method_calls.append(origin)
return original_method(self, origin, host, port, socket, server_id, heartbeat, watch_server)
monkeypatch.setattr(
GrpcServerRepositoryLocationHandle,
"__init__",
mocked_handle_init,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["queued-run", "queued-run-2"]
assert len(method_calls) == 1
def test_skip_error_runs(instance, grpc_server_registry):
create_invalid_run(
instance,
run_id="bad-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="good-run",
status=PipelineRunStatus.QUEUED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
errors = [
error for error in list(coordinator.run_iteration(instance, grpc_server_registry)) if error
]
assert len(errors) == 1
assert "ModuleNotFoundError" in errors[0].message
assert get_run_ids(instance.run_launcher.queue()) == ["good-run"]
assert instance.get_run_by_id("bad-run").status == PipelineRunStatus.FAILURE
| 28.32 | 100 | 0.655932 | [
"Apache-2.0"
] | PenguinToast/dagster | python_modules/dagster/dagster_tests/daemon_tests/test_queued_run_coordinator_daemon.py | 10,620 | Python |
# -*- coding: utf-8 -*-
import tensorflow as tf
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
import argparse
from aquaman_net import AquamanNet
from utils import IMAGE_SIZE
EPOCHS = 1000
BATCH_SIZE = 4
def preproc(image_bytes):
image_jpg = tf.image.decode_jpeg(image_bytes, channels=3)
image_jpg = tf.image.resize_images(image_jpg, IMAGE_SIZE)
image_jpg = tf.to_float(image_jpg) / 255.0
image_jpg = tf.reshape(
image_jpg, [IMAGE_SIZE[0], IMAGE_SIZE[1], 3], name="Reshape_Preproc")
return image_jpg
def input_fn(tf_records_list, epochs=10, batch_size=8, n_frames=16):
def _parse_proto(example_proto):
parsed_dict = {
"target": tf.FixedLenFeature((), tf.float32, default_value=0)
}
for i in range(n_frames):
parsed_dict['frame_{}'.format(i)] = tf.FixedLenFeature(
(), tf.string, default_value="")
parsed_features = tf.parse_single_example(example_proto, parsed_dict)
return parsed_features
def _split_xy(feat_dict):
target = tf.one_hot(tf.to_int32(
feat_dict['target']), depth=2, dtype=tf.float32)
input_frames = {}
for i in range(n_frames):
frame_id = 'frame_{}'.format(i)
input_frames[frame_id] = feat_dict[frame_id]
return input_frames, {'target': target}
def _input_fn():
dataset = tf.data.TFRecordDataset(
tf_records_list, compression_type='GZIP')
dataset = dataset.map(_parse_proto)
dataset = dataset.map(_split_xy)
dataset = dataset.shuffle(buffer_size=2 * batch_size)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
return dataset
return _input_fn
def metrics(logits, labels):
argmax_logits = tf.argmax(logits, axis=1)
argmax_labels = tf.argmax(labels, axis=1)
return {'accuracy': tf.metrics.accuracy(argmax_labels, argmax_logits)}
def get_serving_fn(window_size):
input_tensor = {"frame_{}".format(i): tf.placeholder(
dtype=tf.string, shape=[None]) for i in range(window_size)}
return tf.estimator.export.build_raw_serving_input_receiver_fn(input_tensor)
def model_fn(n_frames):
def _model_fn(features, labels, mode, params):
input_tensors_list = []
for i in range(n_frames):
frame_id = 'frame_{}'.format(i)
frame_tensor = tf.map_fn(preproc, features[frame_id], tf.float32)
frame_tensor = tf.expand_dims(frame_tensor, axis=-1)
frame_tensor = tf.transpose(frame_tensor, [0, 1, 2, 4, 3])
print(frame_tensor)
input_tensors_list.append(frame_tensor)
input_tensor_stream = tf.concat(input_tensors_list, axis=3)
print(input_tensor_stream)
is_training = mode == tf.estimator.ModeKeys.TRAIN
logits = AquamanNet(input_tensor_stream, is_training, 2)
# Loss, training and eval operations are not needed during inference.
total_loss = None
loss = None
train_op = None
eval_metric_ops = {}
export_outputs = None
prediction_dict = {'class': tf.argmax(
logits, axis=1, name="predictions")}
if mode != tf.estimator.ModeKeys.PREDICT:
# IT IS VERY IMPORTANT TO RETRIEVE THE REGULARIZATION LOSSES
reg_loss = tf.losses.get_regularization_loss()
# This summary is automatically caught by the Estimator API
tf.summary.scalar("Regularization_Loss", tensor=reg_loss)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=labels['target'], logits=logits)
tf.summary.scalar("XEntropy_LOSS", tensor=loss)
total_loss = loss + reg_loss
learning_rate = tf.constant(1e-4, name='fixed_learning_rate')
#optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
vars_to_train = tf.trainable_variables()
tf.logging.info("Variables to train: {}".format(vars_to_train))
if is_training:
# You DO must get this collection in order to perform updates on batch_norm variables
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
loss=total_loss, global_step=tf.train.get_global_step(), var_list=vars_to_train)
eval_metric_ops = metrics(logits, labels['target'])
else:
# pass
export_outputs = {
'logits': tf.estimator.export.PredictOutput(outputs=logits)}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=prediction_dict,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
return _model_fn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-tf-list',
dest='train_tf_list',
type=str,
required=True)
parser.add_argument('--test-tf-list',
dest='test_tf_list',
type=str,
required=True)
parser.add_argument('--output-dir',
dest='output_dir',
type=str,
required=True)
parser.add_argument('--window-size',
dest='window_size',
type=int,
required=True)
args = parser.parse_args()
tfrecord_list_train = args.train_tf_list.split(',')
tfrecord_list_test = args.test_tf_list.split(',')
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False
)
run_config = tf.estimator.RunConfig(
model_dir=args.output_dir,
save_summary_steps=100,
session_config=session_config,
save_checkpoints_steps=100,
save_checkpoints_secs=None,
keep_checkpoint_max=1
)
estimator = tf.estimator.Estimator(
model_fn=model_fn(args.window_size),
config=run_config
)
train_input_fn = input_fn(
batch_size=BATCH_SIZE, tf_records_list=tfrecord_list_train, epochs=EPOCHS, n_frames=args.window_size)
test_input_fn = input_fn(
batch_size=BATCH_SIZE, tf_records_list=tfrecord_list_test, epochs=1, n_frames=args.window_size)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=10000)
# eval_steps = math.ceil(EVAL_SET_SIZE / FLAGS.batch_size)
eval_spec = tf.estimator.EvalSpec(
input_fn=test_input_fn,
# steps=eval_steps,
start_delay_secs=60,
throttle_secs=60)
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
estimator.export_savedmodel(
export_dir_base=args.output_dir, serving_input_receiver_fn=get_serving_fn(args.window_size))
| 32.63964 | 109 | 0.637593 | [
"MIT"
] | brungcm/health-hack-2019 | ml/train_net.py | 7,246 | Python |
#!/usr/bin/env python
"""
Test Service
"""
from ..debugging import bacpypes_debugging, ModuleLogger
# some debugging
_debug = 0
_log = ModuleLogger(globals())
def some_function(*args):
if _debug: some_function._debug("f %r", args)
return args[0] + 1
bacpypes_debugging(some_function) | 16.5 | 56 | 0.720539 | [
"MIT"
] | ChristianTremblay/bacpypes | py25/bacpypes/service/test.py | 297 | Python |
# Copyright 2021 The Couler Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import couler.core.templates.output
from couler.core import states, utils
from couler.core.templates import OutputArtifact, Step
def update_step(func_name, args, step_name, caller_line):
if states.workflow.dag_mode_enabled():
step_name = _update_dag_tasks(
func_name,
states._dag_caller_line,
states._upstream_dag_task,
states._upstream_dag_depends_logic,
args,
step_name=step_name,
)
states._upstream_dag_task = [step_name]
else:
if states._run_concurrent_lock:
step_name = _update_steps(
"concurrent_func_name",
states._concurrent_func_line,
args,
func_name,
)
else:
step_name = _update_steps(func_name, caller_line, args)
return step_name
def _update_dag_tasks(
function_name,
caller_line,
dependencies,
depends_logic,
args=None,
template_name=None,
step_name=None,
):
"""
A task in DAG of Argo YAML contains name, related template and parameters.
Here we insert a single task into the global tasks.
"""
if step_name is None:
function_id = utils.invocation_name(function_name, caller_line)
else:
function_id = step_name
task_template = states.workflow.get_dag_task(function_id)
if task_template is None:
task_template = OrderedDict({"name": function_id})
if dependencies is not None and isinstance(dependencies, list):
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = dependencies
if depends_logic is not None:
task_template["depends"] = depends_logic
if template_name is None:
task_template["template"] = function_name
else:
task_template["template"] = template_name
# configure the args
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args, function_name, prefix="tasks"
)
if len(parameters) > 0:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["parameters"] = parameters
if len(artifacts) > 0:
if "arguments" not in task_template:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["artifacts"] = artifacts
else:
# step exist on the dag, thus, we update its dependency
if dependencies is not None:
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = [dependencies]
if depends_logic is not None:
task_template["depends"] = depends_logic
t_name = function_name if template_name is None else template_name
step = Step(name=function_id, template=t_name)
if states._exit_handler_enable:
if states._when_prefix is not None:
step.when = states._when_prefix
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [step.to_dict()]
elif states._when_prefix is not None:
step.when = states._when_prefix
if step.name not in states.workflow.dag_tasks.keys():
step_spec = step.to_dict()
step_spec["dependencies"] = [states._when_task]
states.workflow.dag_tasks[step.name] = step_spec
else:
states.workflow.update_dag_task(function_id, task_template)
# return the current task name
return function_id
def _update_steps(function_name, caller_line, args=None, template_name=None):
"""
A step in Argo YAML contains name, related template and parameters.
Here we insert a single step into the global steps.
"""
function_id = utils.invocation_name(function_name, caller_line)
# Update `steps` only if needed
if states._update_steps_lock:
name = function_id
if states._run_concurrent_lock:
_id = utils.invocation_name(template_name, caller_line)
name = "%s-%s" % (_id, states._concurrent_func_id)
if states._sub_steps is not None:
states._concurrent_func_id = states._concurrent_func_id + 1
t_name = function_name if template_name is None else template_name
step = Step(name=name, template=t_name)
if states._when_prefix is not None:
step.when = states._when_prefix
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args,
template_name
if states._run_concurrent_lock
else function_name,
prefix="steps",
)
if len(parameters) > 0:
step.arguments = OrderedDict()
step.arguments["parameters"] = parameters
if len(artifacts) > 0:
if step.arguments is None:
step.arguments = OrderedDict()
step.arguments["artifacts"] = artifacts
if states._condition_id is not None:
function_id = states._condition_id
if states._while_lock:
if function_id in states._while_steps:
states._while_steps.get(function_id).append(step.to_dict())
else:
states._while_steps[function_id] = [step.to_dict()]
else:
if states._sub_steps is not None:
if function_id in states._sub_steps:
states._sub_steps.get(function_id).append(step.to_dict())
else:
states._sub_steps[function_id] = [step.to_dict()]
elif states._exit_handler_enable is True:
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [
step.to_dict()
]
else:
states.workflow.add_step(function_id, step)
return step.name
else:
return function_id
def _get_params_and_artifacts_from_args(args, input_param_name, prefix):
parameters = []
artifacts = []
if not isinstance(args, list):
args = [args]
i = 0
for arg in args:
values = couler.core.templates.output.parse_argo_output(arg, prefix)
if isinstance(values, list):
for value in values:
parameters.append(
{
"name": utils.input_parameter_name(
input_param_name, i
),
"value": value,
}
)
i += 1
else:
if isinstance(arg, OutputArtifact):
artifact_dict = {
"name": ".".join(arg.value.split(".")[5:]),
"from": values,
}
if not any(
[artifact_dict["from"] == x["from"] for x in artifacts]
):
artifacts.append(artifact_dict)
else:
parameters.append(
{
"name": utils.input_parameter_name(
input_param_name, i
),
"value": values,
}
)
i += 1
return parameters, artifacts
| 35.506173 | 78 | 0.585883 | [
"Apache-2.0"
] | javoweb/couler | couler/core/step_update_utils.py | 8,628 | Python |
""" Pymode utils. """
import os.path
import sys
import threading
import warnings
from contextlib import contextmanager
import vim # noqa
from ._compat import StringIO, PY2
DEBUG = int(vim.eval('g:pymode_debug'))
warnings.filterwarnings('ignore')
@contextmanager
def silence_stderr():
""" Redirect stderr. """
if DEBUG:
yield
else:
with threading.Lock():
stderr = sys.stderr
sys.stderr = StringIO()
yield
with threading.Lock():
sys.stderr = stderr
def patch_paths():
""" Function description. """
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs'))
if PY2:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs2'))
else:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'libs3'))
| 20 | 76 | 0.630952 | [
"MIT"
] | Jenkin0603/myvim | bundle/python-mode/pymode/utils.py | 840 | Python |
import graphene
import graphql_jwt
from graphql_jwt.refresh_token.mixins import RefreshTokenMixin
from ..testcases import SchemaTestCase
from . import mixins
class TokenAuthTests(mixins.TokenAuthMixin, SchemaTestCase):
query = '''
mutation TokenAuth($username: String!, $password: String!) {
tokenAuth(username: $username, password: $password) {
token
refreshToken
}
}'''
refresh_token_mutations = {
'token_auth': graphql_jwt.ObtainJSONWebToken,
}
class Refresh(RefreshTokenMixin, graphql_jwt.Refresh):
class Arguments(RefreshTokenMixin.Fields):
"""Refresh Arguments"""
class RefreshTests(mixins.RefreshMixin, SchemaTestCase):
query = '''
mutation RefreshToken($refreshToken: String!) {
refreshToken(refreshToken: $refreshToken) {
token
refreshToken
payload
}
}'''
refresh_token_mutations = {
'refresh_token': Refresh,
}
class RevokeTests(mixins.RevokeMixin, SchemaTestCase):
query = '''
mutation RevokeToken($refreshToken: String!) {
revokeToken(refreshToken: $refreshToken) {
revoked
}
}'''
class Mutation(graphene.ObjectType):
revoke_token = graphql_jwt.Revoke.Field()
| 22.945455 | 64 | 0.674326 | [
"MIT"
] | CZZLEGEND/django-graphql-jwt | tests/refresh_token/test_mutations.py | 1,262 | Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os.path
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SRC_DIR = os.path.join(REPO, "src")
def check_header_files(component):
component_dir = os.path.join(SRC_DIR, component)
header_files = (glob.glob(os.path.join(component_dir, "*.h")) +
glob.glob(os.path.join(component_dir, "*", "*.h")))
assert header_files
errors = []
for filename in header_files:
assert filename.endswith(".h"), filename
rel_filename = os.path.relpath(filename, start=component_dir)
guard = rel_filename.replace(".", "_").replace("/", "_").replace("-", "_").upper()
expected = "#ifndef " + guard
for line in open(filename):
line = line.rstrip("\n")
if line.startswith("#ifndef"):
if line != expected:
errors.append('%s uses guard "%s" but should use "%s"' %
(filename, line, expected))
break
return errors
def main():
errors = []
errors.extend(check_header_files("preprocess"))
errors.extend(check_header_files("search"))
for error in errors:
print(error)
if errors:
sys.exit(1)
if __name__ == "__main__":
main()
| 28.979167 | 90 | 0.591661 | [
"MIT"
] | karthikv792/PlanningAssistance | planner/FAST-DOWNWARD/misc/style/check-include-guard-convention.py | 1,391 | Python |
# Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for interpreter commands. Each command has to implement two
methods:
- eval(list(string)): Given a list of tokens check whether the tokens reference
the given command. If yes, evaluate the command and return True. Otherwise,
return False.
- help: Print a simple help statement
"""
from abc import abstractmethod
from typing import List
class Command(object):
"""Abstract class for interpreter commands."""
@abstractmethod
def eval(self, tokens: List[str]) -> bool:
"""If the given tokens sequence matches the given command execute it
and return True. Otherwise, return False.
Parameters
----------
tokens: list(string)
List of tokens in the command line
Returns
-------
bool
"""
raise NotImplementedError()
@abstractmethod
def help(self) -> None:
"""Print a simple help statement for the command."""
raise NotImplementedError()
def output(self, rows):
"""Output the given rows in tabular format. Each rows is a list of
string values. All rows are expected to have the sam elength. The first
row is the table header.
Parameters
----------
rows: list(string)
List of rows in the table
"""
# Determine the longest value for each column.
columns = [0] * len(rows[0])
for row in rows:
for col in range(len(columns)):
count = len(row[col])
if count > columns[col]:
columns[col] = count
# Create format string
format = None
divider = list()
for col_len in columns:
f = '%-' + str(col_len) + 's'
if format is None:
format = f
else:
format += ' | ' + f
if len(divider) in [0, len(columns) - 1]:
i = 1
else:
i = 2
divider.append('-' * (col_len + i))
# Print fomrated rows
print(format % tuple(rows[0]))
print('|'.join(divider))
for row in rows[1:]:
print(format % tuple(row))
| 32.943182 | 79 | 0.588134 | [
"ECL-2.0",
"Apache-2.0"
] | VizierDB/web-api-async | vizier/api/client/cli/command.py | 2,899 | Python |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.water_heaters_and_thermal_storage import WaterHeaterMixed
log = logging.getLogger(__name__)
class TestWaterHeaterMixed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_waterheatermixed(self):
pyidf.validation_level = ValidationLevel.error
obj = WaterHeaterMixed()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_tank_volume = 0.0
obj.tank_volume = var_tank_volume
# object-list
var_setpoint_temperature_schedule_name = "object-list|Setpoint Temperature Schedule Name"
obj.setpoint_temperature_schedule_name = var_setpoint_temperature_schedule_name
# real
var_deadband_temperature_difference = 0.0
obj.deadband_temperature_difference = var_deadband_temperature_difference
# real
var_maximum_temperature_limit = 5.5
obj.maximum_temperature_limit = var_maximum_temperature_limit
# alpha
var_heater_control_type = "Cycle"
obj.heater_control_type = var_heater_control_type
# real
var_heater_maximum_capacity = 0.0
obj.heater_maximum_capacity = var_heater_maximum_capacity
# real
var_heater_minimum_capacity = 0.0
obj.heater_minimum_capacity = var_heater_minimum_capacity
# real
var_heater_ignition_minimum_flow_rate = 0.0
obj.heater_ignition_minimum_flow_rate = var_heater_ignition_minimum_flow_rate
# real
var_heater_ignition_delay = 0.0
obj.heater_ignition_delay = var_heater_ignition_delay
# alpha
var_heater_fuel_type = "Electricity"
obj.heater_fuel_type = var_heater_fuel_type
# real
var_heater_thermal_efficiency = 0.50005
obj.heater_thermal_efficiency = var_heater_thermal_efficiency
# object-list
var_part_load_factor_curve_name = "object-list|Part Load Factor Curve Name"
obj.part_load_factor_curve_name = var_part_load_factor_curve_name
# real
var_off_cycle_parasitic_fuel_consumption_rate = 0.0
obj.off_cycle_parasitic_fuel_consumption_rate = var_off_cycle_parasitic_fuel_consumption_rate
# alpha
var_off_cycle_parasitic_fuel_type = "Electricity"
obj.off_cycle_parasitic_fuel_type = var_off_cycle_parasitic_fuel_type
# real
var_off_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.off_cycle_parasitic_heat_fraction_to_tank = var_off_cycle_parasitic_heat_fraction_to_tank
# real
var_on_cycle_parasitic_fuel_consumption_rate = 0.0
obj.on_cycle_parasitic_fuel_consumption_rate = var_on_cycle_parasitic_fuel_consumption_rate
# alpha
var_on_cycle_parasitic_fuel_type = "Electricity"
obj.on_cycle_parasitic_fuel_type = var_on_cycle_parasitic_fuel_type
# real
var_on_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.on_cycle_parasitic_heat_fraction_to_tank = var_on_cycle_parasitic_heat_fraction_to_tank
# alpha
var_ambient_temperature_indicator = "Schedule"
obj.ambient_temperature_indicator = var_ambient_temperature_indicator
# object-list
var_ambient_temperature_schedule_name = "object-list|Ambient Temperature Schedule Name"
obj.ambient_temperature_schedule_name = var_ambient_temperature_schedule_name
# object-list
var_ambient_temperature_zone_name = "object-list|Ambient Temperature Zone Name"
obj.ambient_temperature_zone_name = var_ambient_temperature_zone_name
# node
var_ambient_temperature_outdoor_air_node_name = "node|Ambient Temperature Outdoor Air Node Name"
obj.ambient_temperature_outdoor_air_node_name = var_ambient_temperature_outdoor_air_node_name
# real
var_off_cycle_loss_coefficient_to_ambient_temperature = 0.0
obj.off_cycle_loss_coefficient_to_ambient_temperature = var_off_cycle_loss_coefficient_to_ambient_temperature
# real
var_off_cycle_loss_fraction_to_zone = 0.5
obj.off_cycle_loss_fraction_to_zone = var_off_cycle_loss_fraction_to_zone
# real
var_on_cycle_loss_coefficient_to_ambient_temperature = 0.0
obj.on_cycle_loss_coefficient_to_ambient_temperature = var_on_cycle_loss_coefficient_to_ambient_temperature
# real
var_on_cycle_loss_fraction_to_zone = 0.5
obj.on_cycle_loss_fraction_to_zone = var_on_cycle_loss_fraction_to_zone
# real
var_peak_use_flow_rate = 0.0
obj.peak_use_flow_rate = var_peak_use_flow_rate
# object-list
var_use_flow_rate_fraction_schedule_name = "object-list|Use Flow Rate Fraction Schedule Name"
obj.use_flow_rate_fraction_schedule_name = var_use_flow_rate_fraction_schedule_name
# object-list
var_cold_water_supply_temperature_schedule_name = "object-list|Cold Water Supply Temperature Schedule Name"
obj.cold_water_supply_temperature_schedule_name = var_cold_water_supply_temperature_schedule_name
# node
var_use_side_inlet_node_name = "node|Use Side Inlet Node Name"
obj.use_side_inlet_node_name = var_use_side_inlet_node_name
# node
var_use_side_outlet_node_name = "node|Use Side Outlet Node Name"
obj.use_side_outlet_node_name = var_use_side_outlet_node_name
# real
var_use_side_effectiveness = 0.5
obj.use_side_effectiveness = var_use_side_effectiveness
# node
var_source_side_inlet_node_name = "node|Source Side Inlet Node Name"
obj.source_side_inlet_node_name = var_source_side_inlet_node_name
# node
var_source_side_outlet_node_name = "node|Source Side Outlet Node Name"
obj.source_side_outlet_node_name = var_source_side_outlet_node_name
# real
var_source_side_effectiveness = 0.5
obj.source_side_effectiveness = var_source_side_effectiveness
# real
var_use_side_design_flow_rate = 0.0
obj.use_side_design_flow_rate = var_use_side_design_flow_rate
# real
var_source_side_design_flow_rate = 0.0
obj.source_side_design_flow_rate = var_source_side_design_flow_rate
# real
var_indirect_water_heating_recovery_time = 0.0001
obj.indirect_water_heating_recovery_time = var_indirect_water_heating_recovery_time
# alpha
var_source_side_flow_control_mode = "StorageTank"
obj.source_side_flow_control_mode = var_source_side_flow_control_mode
# object-list
var_indirect_alternate_setpoint_temperature_schedule_name = "object-list|Indirect Alternate Setpoint Temperature Schedule Name"
obj.indirect_alternate_setpoint_temperature_schedule_name = var_indirect_alternate_setpoint_temperature_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.waterheatermixeds[0].name, var_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].tank_volume, var_tank_volume)
self.assertEqual(idf2.waterheatermixeds[0].setpoint_temperature_schedule_name, var_setpoint_temperature_schedule_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].deadband_temperature_difference, var_deadband_temperature_difference)
self.assertAlmostEqual(idf2.waterheatermixeds[0].maximum_temperature_limit, var_maximum_temperature_limit)
self.assertEqual(idf2.waterheatermixeds[0].heater_control_type, var_heater_control_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_maximum_capacity, var_heater_maximum_capacity)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_minimum_capacity, var_heater_minimum_capacity)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_ignition_minimum_flow_rate, var_heater_ignition_minimum_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_ignition_delay, var_heater_ignition_delay)
self.assertEqual(idf2.waterheatermixeds[0].heater_fuel_type, var_heater_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].heater_thermal_efficiency, var_heater_thermal_efficiency)
self.assertEqual(idf2.waterheatermixeds[0].part_load_factor_curve_name, var_part_load_factor_curve_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_fuel_consumption_rate, var_off_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_fuel_type, var_off_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_parasitic_heat_fraction_to_tank, var_off_cycle_parasitic_heat_fraction_to_tank)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_fuel_consumption_rate, var_on_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_fuel_type, var_on_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_parasitic_heat_fraction_to_tank, var_on_cycle_parasitic_heat_fraction_to_tank)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_indicator, var_ambient_temperature_indicator)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_schedule_name, var_ambient_temperature_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_zone_name, var_ambient_temperature_zone_name)
self.assertEqual(idf2.waterheatermixeds[0].ambient_temperature_outdoor_air_node_name, var_ambient_temperature_outdoor_air_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_loss_coefficient_to_ambient_temperature, var_off_cycle_loss_coefficient_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheatermixeds[0].off_cycle_loss_fraction_to_zone, var_off_cycle_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_loss_coefficient_to_ambient_temperature, var_on_cycle_loss_coefficient_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheatermixeds[0].on_cycle_loss_fraction_to_zone, var_on_cycle_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheatermixeds[0].peak_use_flow_rate, var_peak_use_flow_rate)
self.assertEqual(idf2.waterheatermixeds[0].use_flow_rate_fraction_schedule_name, var_use_flow_rate_fraction_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].cold_water_supply_temperature_schedule_name, var_cold_water_supply_temperature_schedule_name)
self.assertEqual(idf2.waterheatermixeds[0].use_side_inlet_node_name, var_use_side_inlet_node_name)
self.assertEqual(idf2.waterheatermixeds[0].use_side_outlet_node_name, var_use_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].use_side_effectiveness, var_use_side_effectiveness)
self.assertEqual(idf2.waterheatermixeds[0].source_side_inlet_node_name, var_source_side_inlet_node_name)
self.assertEqual(idf2.waterheatermixeds[0].source_side_outlet_node_name, var_source_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheatermixeds[0].source_side_effectiveness, var_source_side_effectiveness)
self.assertAlmostEqual(idf2.waterheatermixeds[0].use_side_design_flow_rate, var_use_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].source_side_design_flow_rate, var_source_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheatermixeds[0].indirect_water_heating_recovery_time, var_indirect_water_heating_recovery_time)
self.assertEqual(idf2.waterheatermixeds[0].source_side_flow_control_mode, var_source_side_flow_control_mode)
self.assertEqual(idf2.waterheatermixeds[0].indirect_alternate_setpoint_temperature_schedule_name, var_indirect_alternate_setpoint_temperature_schedule_name) | 61.848485 | 164 | 0.787686 | [
"Apache-2.0"
] | marcelosalles/pyidf | tests/test_waterheatermixed.py | 12,246 | Python |
# coding=UTF-8
import os
import re
import sys
class BaseStringScript:
# State
STATE_SEARCHING='STATE_SEARCHING'
STATE_IN_STR='STATE_IN_STR'
STATE_IN_PLUR='STATE_IN_PLUR'
# Tag types
TYPE_STR='TYPE_STR'
TYPE_PLUR='TYPE_PLUR'
# String tag start/end
START_STR = '<string'
END_STR = '</string'
# Plurals tag start/end
START_PLUR='<plurals'
END_PLUR = '</plurals'
def ProcessTag(self, line, type):
"""
Process a single string tag.
:param line: an array of lines making a single string tag.
:param type: the tag type, such as TYPE_STR or TYPE_PLUR
:return: an array of lines representing the processed tag.
"""
return line
def ProcessFile(self, file_name):
"""
Process and write a file of string resources.
:param file_name: path to the file to process.
:return: None.
"""
lines = []
state = self.STATE_SEARCHING
curr_tag = []
pending_process_type = None
with open(file_name, 'r') as myfile:
data = myfile.read()
for line in data.split('\n'):
# Searching for a new tag
if state == self.STATE_SEARCHING:
if self.START_STR in line:
state = self.STATE_IN_STR
elif self.START_PLUR in line:
state = self.STATE_IN_PLUR
else:
lines.append(line)
# Inside of a string tag
if state == self.STATE_IN_STR:
curr_tag.append(line)
if self.END_STR in line:
pending_process_type = self.TYPE_STR
# Inside of a plurals tag
if state == self.STATE_IN_PLUR:
curr_tag.append(line)
if self.END_PLUR in line:
pending_process_type = self.TYPE_PLUR
# Some processing needs doing
if pending_process_type:
# Do processing
lines += self.ProcessTag(curr_tag, pending_process_type)
# Reset processing state
pending_process_type = None
state = self.STATE_SEARCHING
curr_tag = []
# Write back to the file
self.WriteFile(file_name, '\n'.join(lines))
def WriteFile(self, file_name, file_contents):
"""
Overwrite the contents of a file.
:param file_name: path to the file to write.
:param file_contents: string containing new file contents.
:return: None
"""
with open(file_name, 'w') as myfile:
myfile.write(file_contents)
| 24.72449 | 66 | 0.626083 | [
"Apache-2.0"
] | 1y445rc/FirebaseUI-Android | scripts/translations/base_string_script.py | 2,423 | Python |
from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
from cloudify.decorators import operation
from cloudify.exceptions import *
from plugin.nodes.utils import *
def build_radl_flavour(config):
ctx.logger.debug('{0} Infrastructure Manager deployment info:'.format(get_log_indentation()))
increase_log_indentation()
type = get_child(dictionary=config, key='type', required=True)
cores = get_child(dictionary=config, key='cores', required=True)
memory = get_child(dictionary=config, key='memory', required=True)
flavour_radl = \
" instance_type = '" + str(type) + "' and \n" + \
" cpu.count = " + str(cores) + " and \n" + \
" memory.size = " + str(memory) + " and \n"
decrease_log_indentation()
return flavour_radl
@operation
def configure(config, simulate, **kwargs):
if (not simulate):
reset_log_indentation()
ctx.logger.debug('{0} Configure operation: Begin'.format(get_log_indentation()))
increase_log_indentation()
radl = get_child(ctx.instance.runtime_properties, key='settings')
if not radl:
radl = create_child(ctx.instance.runtime_properties, key='settings', value={})
radl_network = create_child(radl, key='flavour', value=build_radl_flavour(config))
decrease_log_indentation()
ctx.logger.debug('{0} Configure operation: End'.format(get_log_indentation()))
| 39.444444 | 97 | 0.697887 | [
"Apache-2.0"
] | MSO4SC/cloudify-im-extension | plugin/nodes/flavour.py | 1,420 | Python |
"""Certbot client."""
# version number like 1.2.3a0, must have at least 2 parts, like 1.2
__version__ = '1.14.0.dev0'
| 29.5 | 67 | 0.686441 | [
"Apache-2.0"
] | 4n3i5v74/certbot | certbot/certbot/__init__.py | 118 | Python |
import subprocess
import py
import pytest
@pytest.fixture(
params=["tests/dataset-rdstmc", "tests/dataset-wiki", "tests/dataset-rntutor"]
)
def datasetdir(request):
return py.path.local(request.param)
@pytest.fixture
def messages(datasetdir):
msgdir = datasetdir.join("messages")
return msgdir.listdir(fil="*.xml")
@pytest.fixture
def rncdir(datasetdir):
return datasetdir.join("schemas")
@pytest.fixture
def rootrnc(rncdir):
return rncdir.join("root.rnc")
@pytest.fixture
def rncschemas(rootrnc):
return rootrnc.dirpath().listdir("*.rnc")
def test_validate_by_rnc_onemsg(rootrnc, messages):
cmd = ["pyjing", "-c"]
cmd.append(rootrnc.strpath)
cmd.append(messages[0].strpath)
subprocess.check_call(cmd)
def test_validate_by_rnc_allmsgs(rootrnc, messages):
cmd = ["pyjing", "-c"]
cmd.append(rootrnc.strpath)
cmd.extend(map(str, messages))
subprocess.check_call(cmd)
def test_rnc2rng(rootrnc, tmpdir, rncschemas):
cmd = ["pytrang"]
rngname = rootrnc.new(dirname=tmpdir, ext=".rng")
cmd.append(rootrnc.strpath)
cmd.append(rngname.strpath)
subprocess.check_call(cmd)
rngnames = tmpdir.listdir(fil="*.rng")
assert len(rngnames) == len(rncschemas)
for rnc, rng in zip(sorted(rngnames), sorted(rncschemas)):
assert rnc.purebasename == rng.purebasename
"""RNG section ========================
"""
@pytest.fixture
def rngschemas(rootrnc, tmpdir, rncschemas):
cmd = ["pytrang"]
rngname = rootrnc.new(dirname=tmpdir, ext=".rng")
cmd.append(rootrnc.strpath)
cmd.append(rngname.strpath)
subprocess.check_call(cmd)
rngnames = tmpdir.listdir(fil="*.rng")
assert len(rngnames) == len(rncschemas)
for rnc, rng in zip(sorted(rngnames), sorted(rncschemas)):
assert rnc.purebasename == rng.purebasename
return rngnames
@pytest.fixture
def rootrng(rngschemas):
rootschema = rngschemas[0].new(basename="root.rng")
assert rootschema in rngschemas
rootschema.ensure()
return rootschema
def test_validate_by_rng_onemsg(rootrng, messages):
cmd = ["pyjing"]
cmd.append(rootrng.strpath)
cmd.append(messages[0].strpath)
subprocess.check_call(cmd)
def test_validate_by_rng_allmsgs(rootrng, messages):
cmd = ["pyjing"]
cmd.append(rootrng.strpath)
cmd.extend(map(str, messages))
subprocess.check_call(cmd)
| 23.93 | 82 | 0.695779 | [
"MIT"
] | eugenehp/jingtrang | tests/test_it.py | 2,393 | Python |
"""Config flow to configure the Netgear integration."""
from __future__ import annotations
import logging
from typing import cast
from urllib.parse import urlparse
from pynetgear import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_USER
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.util.network import is_ipv4_address
from .const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DEFAULT_NAME,
DOMAIN,
MODELS_PORT_80,
MODELS_PORT_5555,
PORT_80,
PORT_5555,
)
from .errors import CannotLoginException
from .router import get_api
_LOGGER = logging.getLogger(__name__)
def _discovery_schema_with_defaults(discovery_info):
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input):
user_schema = {vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(schema_input):
return {
vol.Optional(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
}
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): int,
}
)
return self.async_show_form(step_id="init", data_schema=settings_schema)
class NetgearFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the netgear config flow."""
self.placeholders = {
CONF_HOST: DEFAULT_HOST,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: DEFAULT_USER,
CONF_SSL: False,
}
self.discovered = False
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered:
data_schema = _discovery_schema_with_defaults(user_input)
else:
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.placeholders,
)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Initialize flow from ssdp."""
updated_data: dict[str, str | int | bool] = {}
device_url = urlparse(discovery_info.ssdp_location)
if hostname := device_url.hostname:
hostname = cast(str, hostname)
updated_data[CONF_HOST] = hostname
if not is_ipv4_address(str(hostname)):
return self.async_abort(reason="not_ipv4_address")
_LOGGER.debug("Netgear ssdp discovery info: %s", discovery_info)
await self.async_set_unique_id(discovery_info.upnp[ssdp.ATTR_UPNP_SERIAL])
self._abort_if_unique_id_configured(updates=updated_data)
if device_url.scheme == "https":
updated_data[CONF_SSL] = True
else:
updated_data[CONF_SSL] = False
updated_data[CONF_PORT] = DEFAULT_PORT
for model in MODELS_PORT_80:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_80
for model in MODELS_PORT_5555:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_5555
updated_data[CONF_SSL] = True
self.placeholders.update(updated_data)
self.discovered = True
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form()
host = user_input.get(CONF_HOST, self.placeholders[CONF_HOST])
port = self.placeholders[CONF_PORT]
ssl = self.placeholders[CONF_SSL]
username = user_input.get(CONF_USERNAME, self.placeholders[CONF_USERNAME])
password = user_input[CONF_PASSWORD]
if not username:
username = self.placeholders[CONF_USERNAME]
# Open connection and check authentication
try:
api = await self.hass.async_add_executor_job(
get_api, password, host, username, port, ssl
)
except CannotLoginException:
errors["base"] = "config"
if errors:
return await self._show_setup_form(user_input, errors)
# Check if already configured
info = await self.hass.async_add_executor_job(api.get_info)
await self.async_set_unique_id(info["SerialNumber"], raise_on_progress=False)
self._abort_if_unique_id_configured()
config_data = {
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_HOST: host,
CONF_PORT: api.port,
CONF_SSL: api.ssl,
}
if info.get("ModelName") is not None and info.get("DeviceName") is not None:
name = f"{info['ModelName']} - {info['DeviceName']}"
else:
name = info.get("ModelName", DEFAULT_NAME)
return self.async_create_entry(
title=name,
data=config_data,
)
| 32.00463 | 88 | 0.645885 | [
"Apache-2.0"
] | 2004happy/core | homeassistant/components/netgear/config_flow.py | 6,913 | Python |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import RandomUtils
from VectorTestSequence import VectorTestSequence
from base.ChoicesModifier import ChoicesModifier
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
# This test verifies that a basic add vector instruction can be generated and
# executed. It verifies that the initial values are correctly communicated to
# the simulator and that the resulting values are successfully returned. The
# test assumes the use of 512-bit vector registers and 32-bit vector register
# elements.
class MainSequence(VectorTestSequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mInstrList = ("VADD.VV##RISCV",)
self._mRegIndex1 = None
self._mRegIndex2 = None
self._mElemVals1 = None
self._mElemVals2 = None
# Set up the environment prior to generating the test instructions.
def _setUpTest(self):
# Ensure vector element size is set to 32 bits and vector register
# group size is set to 1
choices_mod = ChoicesModifier(self.genThread)
vsew_choice_weights = {
"0x0": 0,
"0x1": 0,
"0x2": 10,
"0x3": 0,
"0x4": 0,
"0x5": 0,
"0x6": 0,
"0x7": 0,
}
choices_mod.modifyRegisterFieldValueChoices("vtype.VSEW", vsew_choice_weights)
vlmul_choice_weights = {
"0x0": 10,
"0x1": 0,
"0x2": 0,
"0x3": 0,
"0x4": 0,
"0x5": 0,
"0x6": 0,
"0x7": 0,
}
choices_mod.modifyRegisterFieldValueChoices("vtype.VLMUL", vlmul_choice_weights)
choices_mod.commitSet()
(self._mRegIndex1, self._mRegIndex2) = self.getRandomRegisters(2, "VECREG", exclude="0")
self._mElemVals1 = self._initializeVectorRegister("v%d" % self._mRegIndex1)
self._mElemVals2 = self._initializeVectorRegister("v%d" % self._mRegIndex2)
# Return a list of test instructions to randomly choose from.
def _getInstructionList(self):
return self._mInstrList
# Return parameters to be passed to Sequence.genInstruction().
def _getInstructionParameters(self):
return {
"vd": self._mRegIndex1,
"vs1": self._mRegIndex1,
"vs2": self._mRegIndex2,
"vm": 1,
}
# Verify additional aspects of the instruction generation and execution.
#
# @param aInstr The name of the instruction.
# @param aInstrRecord A record of the generated instruction.
def _performAdditionalVerification(self, aInstr, aInstrRecord):
for (elem_index, val) in enumerate(self._mElemVals2):
self._mElemVals1[elem_index] += val
reg_name_1 = "v%d" % self._mRegIndex1
for sub_index in range(8):
field_name = "%s_%d" % (reg_name_1, sub_index)
(field_val, valid) = self.readRegister(reg_name_1, field=field_name)
self.assertValidRegisterValue(reg_name_1, valid)
expected_field_val = self._getFieldValue(sub_index, self._mElemVals1)
if field_val != expected_field_val:
self.error(
"Register field %s has unexpected value; "
"Expected=0x%x, Actual=0x%x" % (field_name, expected_field_val, field_val)
)
# Initialize the specified vector register and return a list of 32-bit
# element values.
def _initializeVectorRegister(self, aRegName):
elem_vals = []
for elem_index in range(16):
elem_val = RandomUtils.random32(0, 0xFFFF)
elem_vals.append(elem_val)
for sub_index in range(8):
field_name = "%s_%d" % (aRegName, sub_index)
field_val = self._getFieldValue(sub_index, elem_vals)
self.initializeRegisterFields(aRegName, {field_name: field_val})
return elem_vals
# Get the value of a 64-bit field for a vector register.
#
# @param aSubIndex A 64-bit vector register field index.
# @param aElemVals A list of 32-bit element values.
def _getFieldValue(self, aSubIndex, aElemVals):
field_value = aElemVals[2 * aSubIndex]
field_value |= aElemVals[2 * aSubIndex + 1] << 32
return field_value
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| 37.641791 | 96 | 0.652458 | [
"Apache-2.0"
] | Wlgen/force-riscv | tests/riscv/vector/vector_simple_add_force.py | 5,044 | Python |
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget
class JogWidget(QWidget):
def __init__(self, parent, callback):
super(JogWidget, self).__init__(parent)
self.parent = parent
self.callback = callback
self.wx_current = 0
self.wy_current = 0
self.wz_current = 0
self._x_start_screen = 0
self._y_start_screen = 0
self._z_accumulator = 0
def onIdle(self):
self._z_accumulator = 0
def mousePressEvent(self, event):
pos = event.pos()
self._x_start_screen = pos.x()
self._y_start_screen = pos.y()
self._relative_origin_x = self.wx_current
self._relative_origin_y = self.wy_current
def mouseReleaseEvent(self, event):
"""
Safe Feed
"""
pass
#self.callback("F111")
def wheelEvent(self, event):
delta = event.angleDelta().y()
self._z_accumulator += delta
z_goto = self.wz_current + self._z_accumulator / 1000
self.callback("G1 Z{:0.2f} F100".format(z_goto))
def mouseMoveEvent(self, event):
pos = event.pos()
x_current_screen = pos.x()
y_current_screen = pos.y()
x_goto = self._relative_origin_x + (x_current_screen - self._x_start_screen) / 20
y_goto = self._relative_origin_y + (self._y_start_screen - y_current_screen) / 20
self.callback("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto))
#print("G1 X{:0.2f} Y{:0.2f} F400".format(x_goto, y_goto)) | 30.622642 | 89 | 0.585952 | [
"MIT"
] | comgram/gerbil_gui | classes/jogwidget.py | 1,623 | Python |
from collections import namedtuple
import logging
import random
from Items import ItemFactory
#This file sets the item pools for various modes. Timed modes and triforce hunt are enforced first, and then extra items are specified per mode to fill in the remaining space.
#Some basic items that various modes require are placed here, including pendants and crystals. Medallion requirements for the two relevant entrances are also decided.
alwaysitems = (['Kokiri Sword', 'Gilded Sword', 'Great Fairy Sword', 'Hylian Shield', 'Mirror Shield'] +
['Deku Mask', 'Goron Mask', 'Zora Mask', 'Fierce Deity Mask'] +
['Postmans Hat', 'Blast Mask', 'Great Fairy Mask', 'All Night Mask', 'Stone Mask'] +
['Keaton Mask', 'Bremen Mask', 'Bunny Hood', 'Don Geros Mask', 'Mask of Scents'] +
['Romani Mask', 'Circus Leader Mask', 'Couple Mask', 'Mask of Truth'] +
['Kamaros Mask', 'Garo Mask', 'Captains Hat', 'Gibdo Mask', 'Giant Mask'] +
['Bow', 'Large Quiver', 'Largest Quiver'] + ['Fire Arrows', 'Ice Arrows', 'Light Arrows'] +
['Powder Keg', 'Pictograph Box', 'Lens of Truth', 'Hookshot'] +
['Bomb Bag', 'Big Bomb Bag', ] + ['Bottle'] * 2 + ['Bottle with Gold Dust'] +
['Bottle with Red Potion'] + ['Bottle with Milk'] + ['Bottle with Chateau Romani'] +
['Piece of Heart'] * 52 + ['Heart Container'] * 4 + ['Adult Wallet', 'Giant Wallet'])
notmapcompass = ['Ice Trap'] * 8
rewardlist = ['Odolwa\'s Remains', 'Goht\'s Remains', 'Gyorg\'s Remains', 'Twinmold\'s Remains']
songlist = ['Song of Time', 'Song of Healing', 'Song of Soaring', 'Eponas Song','Song of Storms', 'Sonata of Awakening', 'Goron Lullaby', 'New Wave Bossa Nova', 'Elegy of Emptiness', 'Oath to Order']
# TODO: this could need to be aligned with the location_table
stray_fairy_locations = (['WF-SF1', 'WF-SF2', 'WF-SF3', 'WF-SF4', 'WF-SF5', 'WF-SF6', 'WF-SF7', 'WF-SF8', 'WF-SF9', 'WF-SF10', 'WF-SF11', 'WF-SF12', 'WF-SF13', 'WF-SF14', 'WF-SF15'] +
['SH-SF1', 'SH-SF2', 'SH-SF3', 'SH-SF4', 'SH-SF5', 'SH-SF6', 'SH-SF7', 'SH-SF8', 'SH-SF9', 'SH-SF10', 'SH-SF11', 'SH-SF12', 'SH-SF13', 'SH-SF14', 'SH-SF15'] +
['GB-SF1', 'GB-SF2', 'GB-SF3', 'GB-SF4', 'GB-SF5', 'GB-SF6', 'GB-SF7', 'GB-SF8', 'GB-SF9', 'GB-SF10', 'GB-SF11', 'GB-SF12', 'GB-SF13', 'GB-SF14', 'GB-SF15'] +
['ST-SF1', 'ST-SF2', 'ST-SF3', 'ST-SF4', 'ST-SF5', 'ST-SF6', 'ST-SF7', 'ST-SF8', 'ST-SF9', 'ST-SF10', 'ST-SF11', 'ST-SF12', 'ST-SF13', 'ST-SF14', 'ST-SF15'])
tradeitems = (['Moon Tear', 'Town Title Deed', 'Swamp Title Deed', 'Mountain Title Deed', 'Ocean Title Deed'])
WF_vanilla = (['Recovery Heart'] * 2)
SH_vanilla = (['Recovery Heart'] * 2)
GB_vanilla = (['Recovery Heart'] * 2)
ST_vanilla = (['Recovery Heart'] * 2)
PF_vanilla = (['Recovery Heart'] * 2)
normal_bottles = [
'Bottle',
'Bottle with Milk',
'Bottle with Red Potion',
'Bottle with Green Potion',
'Bottle with Blue Potion',
'Bottle with Fairy',
'Bottle with Fish',
'Bottle with Bugs',
'Bottle with Poe',
'Bottle with Big Poe']
normal_bottle_count = 6
normal_rupees = (
['Rupees (5)'] * 13
+ ['Rupees (20)'] * 5
+ ['Rupees (50)'] * 7
+ ['Rupees (200)'] * 3)
shopsanity_rupees = (
['Rupees (5)'] * 2
+ ['Rupees (20)'] * 10
+ ['Rupees (50)'] * 10
+ ['Rupees (200)'] * 5
+ ['Progressive Wallet'])
vanilla_shop_items = {
'Trading Post Item 1': 'Buy Hylian Shield',
# TODO: Fill out the rest
}
titledeeds = {
'Sad Moon Crater': 'Moon\'s Tear',
# TODO: fill out the rest
}
npc_items = {
# TODO: List all locations which give items by NPC, and set them to give that specific item
}
eventlocations = {
'Majora': 'Majora\'s Mask'
}
junk_pool = (
8 * ['Bombs (5)'] +
2 * ['Bombs (10)'] +
8 * ['Arrows (5)'] +
2 * ['Arrows (10)'] +
5 * ['Deku Stick (1)'] +
5 * ['Deku Nuts (5)'] +
10 * ['Rupees (5)'] +
4 * ['Rupees (20)'] +
20 * ['Ice Trap'])
def get_junk_item(count=1):
ret_junk = []
for _ in range(count):
ret_junk.append(random.choice(junk_pool))
return ret_junk
def generate_itempool(world):
# set up item pool
(pool, placed_items) = get_pool_core(world)
world.itempool = ItemFactory(pool, world)
for (location, item) in placed_items.items():
world.push_item(location, ItemFactory(item, world))
world.get_location(location).event = True
fill_bosses(world)
world.initialize_items()
'''
This is where we decide what items to place and how
'''
def get_pool_core(world):
pool = []
placed_items = {}
'''
# Used to place an item randomly into the pool
pool.append('Kokiri Sword')
# Used to place a specific item in a specific location
placed_items['Kokiri Sword Chest'] = 'Kokiri Sword'
# Adds x items to the pool which are not progression items
pool.extend(get_junk_item(37))
# locations_with_items is a list of key value pairs where
# the key is the location name for an item
# the value is the item being placed at that location
placed_items.update(locations_with_items)
# tells the logic that you start out with the given item
world.state.collect(item)
'''
pool.extend(songlist)
if world.shuffle_mapcompass == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.dungeon_items]:
world.state.collect(item)
pool.extend(get_junk_item())
if world.shuffle_smallkeys == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.small_keys]:
world.state.collect(item)
pool.extend(get_junk_item())
if world.shuffle_bosskeys == 'remove':
for item in [item for dungeon in world.dungeons for item in dungeon.boss_key]:
world.state.collect(item)
pool.extend(get_junk_item())
return (pool, placed_items)
def fill_songs(world, attempts=15):
songs = ItemFactory(songlist)
song_locations = [world.get_location('Song from Skull Kid'), world.get_location('Song from HMS'), world.get_location('Song from Owl Tablet'), world.get_location('Song from Romani'), world.get_location('Song at Grave'), world.get_location('Song from Monkey'), world.get_location('Song from Baby Goron'), world.get_location('Song from Goron Elder'), world.get_location('Song from Zora Eggs'), world.get_location('Song from Igos'), world.get_location('Song from the Giants')]
placed_prizes = [loc.item.name for loc in song_locations if loc.item is not None]
unplaced_prizes = [song for song in songs if song.name not in placed_prizes]
empty_song_locations = [loc for loc in song_locations if loc.item is None]
while attempts:
attempts -= 1
try:
prizepool = list(unplaced_prizes)
prize_locs = list(empty_song_locations)
random.shuffle(prizepool)
random.shuffle(prize_locs)
fill_restrictive(world, world.get_all_state(keys=True), prize_locs, prizepool) #TODO: Set keys to true once keys are properly implemented
except FillError:
logging.getLogger('').info("Failed to place songs. Will retry %s more times", attempts)
for location in empty_song_locations:
location.item = None
continue
break
else:
raise FillError('Unable to place songs')
def fill_bosses(world, bossCount=4):
boss_rewards = ItemFactory(rewardlist)
boss_locations = [world.get_location('Odolwa'), world.get_location('Goht'), world.get_location('Gyorg'), world.get_location('Twinmold')]
placed_prizes = [loc.item.name for loc in boss_locations if loc.item is not None]
unplaced_prizes = [item for item in boss_rewards if item.name not in placed_prizes]
empty_boss_locations = [loc for loc in boss_locations if loc.item is None]
prizepool = list(unplaced_prizes)
prize_locs = list(empty_boss_locations)
while bossCount:
bossCount -= 1
random.shuffle(prizepool)
random.shuffle(prize_locs)
item = prizepool.pop()
loc = prize_locs.pop()
world.push_item(loc, item, False)
world.get_location(loc).event = True
| 45.354497 | 477 | 0.615726 | [
"MIT"
] | mzxrules/MM-Randomizer | ItemList.py | 8,572 | Python |
# Generated by Django 3.1 on 2020-08-08 11:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('socialpages', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['first_name']},
),
]
| 25.12 | 114 | 0.555732 | [
"MIT"
] | OjureFred/SocialGram | socialpages/migrations/0002_auto_20200808_1457.py | 628 | Python |
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from redis_metrics.utils import generate_test_metrics
class Command(BaseCommand):
args = '<metric-name> [<metric-name> ...]'
help = "Creates Lots of Dummy Metrics"
option_list = BaseCommand.option_list + (
make_option(
'-r',
'--randomize',
action='store_true',
dest='randomize',
default=True,
help='Randomize Metric Data'
),
make_option(
'--no-randomize',
action='store_false',
dest='randomize',
default=True,
help='Do not randomize Metric Data'
),
make_option(
'-n',
'--num-days',
action='store',
dest='num_days',
type="int",
default=365 * 3, # Default to 3 years
help='Number of Days worth of data to generate'
),
make_option(
'-c',
'--cap',
action='store',
dest='cap',
default=None,
help='Cap the maximum metric value'
),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("You must provide at least one metric name")
slugs = args
cap = options["cap"]
days = options["num_days"]
randomize = options["randomize"]
self.stdout.write("\nGenerating metrics using the following:\n")
self.stdout.write("Slugs: {0}\n".format(u", ".join(slugs)))
self.stdout.write("Days: {0}\n".format(days))
self.stdout.write("Randomize: {0}\n".format(randomize))
self.stdout.write("Cap: {0}\n".format(cap))
for slug in slugs:
generate_test_metrics(slug, num=days, randomize=randomize, cap=cap)
| 31.096774 | 79 | 0.544606 | [
"MIT"
] | bradmontgomery/django-redis-metrics | redis_metrics/management/commands/generate_test_metrics.py | 1,928 | Python |
from pathlib import Path
from datetime import datetime
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import ignite
import ignite.distributed as idist
from ignite.engine import Events, Engine, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.handlers import Checkpoint, DiskSaver
from ignite.utils import manual_seed, setup_logger
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
import utils
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = "stop-on-{}".format(config["stop_iteration"])
folder_name = "{}_backend-{}-{}_{}".format(config["model"], idist.backend(), idist.get_world_size(), now)
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info("Output path: {}".format(config["output_path"]))
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_trains"]:
from trains import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
# Store 3 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models={"model": model},
metric_name="accuracy",
n_saved=3,
trainer=trainer,
tag="test",
)
# In order to check training resuming we can stop training on a given iteration
if config["stop_iteration"] is not None:
@trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
def _():
logger.info("Stop training on {} iteration".format(trainer.state.iteration))
trainer.terminate()
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
import traceback
print(traceback.format_exc())
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_path="/tmp/cifar10",
output_path="/tmp/output-cifar10/",
model="resnet18",
batch_size=512,
momentum=0.9,
weight_decay=1e-4,
num_workers=12,
num_epochs=24,
learning_rate=0.4,
num_warmup_epochs=4,
validate_every=3,
checkpoint_every=200,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
stop_iteration=None,
with_trains=False,
**spawn_kwargs
):
"""Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_trains (bool): if True, experiment Trains logger is setup. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
if idist.get_rank() > 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
logger.info(
"\nEpoch {} - elapsed: {} - {} metrics:\n {}".format(
epoch, elapsed, tag, "\n".join(["\t{}: {}".format(k, v) for k, v in metrics.items()])
)
)
def log_basic_info(logger, config):
logger.info("Train {} on CIFAR10".format(config["model"]))
logger.info("- PyTorch version: {}".format(torch.__version__))
logger.info("- Ignite version: {}".format(ignite.__version__))
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info("\t{}: {}".format(key, value))
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info("\tbackend: {}".format(idist.backend()))
logger.info("\tworld size: {}".format(idist.get_world_size()))
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
# Supervised part
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# This can be helpful for XLA to avoid performance slow down if fetch loss.item() every iteration
if config["log_every_iters"] > 0 and (engine.state.iteration - 1) % config["log_every_iters"] == 0:
batch_loss = loss.item()
engine.state.saved_batch_loss = batch_loss
else:
batch_loss = engine.state.saved_batch_loss
return {
"batch loss": batch_loss,
}
trainer = Engine(train_step)
trainer.state.saved_batch_loss = -1.0
trainer.state_dict_user_keys.append("saved_batch_loss")
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), "Checkpoint '{}' is not found".format(checkpoint_fp.as_posix())
logger.info("Resume from a checkpoint: {}".format(checkpoint_fp.as_posix()))
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def get_save_handler(config):
if config["with_trains"]:
from ignite.contrib.handlers.trains_logger import TrainsSaver
return TrainsSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
| 35.907303 | 120 | 0.662833 | [
"BSD-3-Clause"
] | HelioStrike/ignite | examples/contrib/cifar10/main.py | 12,783 | Python |
"""
Required device info for the PIC16F1768 devices
"""
from pymcuprog.deviceinfo.eraseflags import ChiperaseEffect
DEVICE_INFO = {
'name': 'pic16f1768',
'architecture': 'PIC16',
# Will erase Flash, User ID and Config words
'default_bulk_erase_address_word': 0x8000,
# Flash
'flash_address_word': 0,
'flash_size_words': 4*1024, # 4KW
'flash_page_size_words': 32,
'flash_write_size_words': 1,
'flash_read_size_words': 1,
'flash_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'flash_isolated_erase': False,
# User ID
'user_id_address_word': 0x8000,
'user_id_size_words': 4,
'user_id_page_size_words': 1,
'user_id_write_size_words': 1,
'user_id_read_size_words': 1,
'user_id_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'user_id_isolated_erase': False,
# Config words
'config_words_address_word': 0x8007,
'config_words_size_words': 2,
'config_words_page_size_words': 1,
'config_words_write_size_words': 1,
'config_words_read_size_words': 1,
'config_words_erase_address_word': 0,
'config_words_chiperase_effect': ChiperaseEffect.ALWAYS_ERASED,
'config_words_isolated_erase': False,
}
| 30.2 | 67 | 0.725993 | [
"MIT"
] | KrystianD-contribution/pymcuprog | pymcuprog/deviceinfo/devices/pic16f1768.py | 1,208 | Python |
__copyright__ = """
Copyright (C) 2020 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import numpy as np
import numpy.linalg as la # noqa
import pyopencl as cl
import pyopencl.clrandom
import pyopencl.clmath
import pytest
from meshmode.array_context import PyOpenCLArrayContext
from meshmode.dof_array import thaw
from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa
from mirgecom.initializers import Vortex2D
from mirgecom.initializers import Lump
from mirgecom.euler import split_conserved
from mirgecom.initializers import SodShock1D
from mirgecom.eos import IdealSingleGas
from grudge.eager import EagerDGDiscretization
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests,
)
logger = logging.getLogger(__name__)
def test_lump_init(ctx_factory):
"""
Simple test to check that Lump initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim
)
order = 3
logger.info(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
# Init soln with Vortex
center = np.zeros(shape=(dim,))
velocity = np.zeros(shape=(dim,))
center[0] = 5
velocity[0] = 1
lump = Lump(center=center, velocity=velocity)
lump_soln = lump(0, nodes)
cv = split_conserved(dim, lump_soln)
p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)
exp_p = 1.0
errmax = discr.norm(p - exp_p, np.inf)
logger.info(f"lump_soln = {lump_soln}")
logger.info(f"pressure = {p}")
assert errmax < 1e-15
def test_vortex_init(ctx_factory):
"""
Simple test to check that Vortex2D initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (-5.0,)], b=[(10.0,), (5.0,)], n=(nel_1d,) * dim
)
order = 3
logger.info(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
# Init soln with Vortex
vortex = Vortex2D()
vortex_soln = vortex(0, nodes)
gamma = 1.4
cv = split_conserved(dim, vortex_soln)
p = 0.4 * (cv.energy - 0.5 * np.dot(cv.momentum, cv.momentum) / cv.mass)
exp_p = cv.mass ** gamma
errmax = discr.norm(p - exp_p, np.inf)
logger.info(f"vortex_soln = {vortex_soln}")
logger.info(f"pressure = {p}")
assert errmax < 1e-15
def test_shock_init(ctx_factory):
"""
Simple test to check that Shock1D initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
dim = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=[(0.0,), (1.0,)], b=[(-0.5,), (0.5,)], n=(nel_1d,) * dim
)
order = 3
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
initr = SodShock1D()
initsoln = initr(t=0.0, x_vec=nodes)
print("Sod Soln:", initsoln)
xpl = 1.0
xpr = 0.1
tol = 1e-15
nodes_x = nodes[0]
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
assert discr.norm(actx.np.where(nodes_x < 0.5, p-xpl, p-xpr), np.inf) < tol
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_uniform(ctx_factory, dim):
"""
Simple test to check that Uniform initializer
creates the expected solution field.
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim
)
order = 1
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f"DIM = {dim}, {len(nodes)}")
print(f"Nodes={nodes}")
from mirgecom.initializers import Uniform
initr = Uniform(numdim=dim)
initsoln = initr(t=0.0, x_vec=nodes)
tol = 1e-15
ssoln = split_conserved(dim, initsoln)
assert discr.norm(ssoln.mass - 1.0, np.inf) < tol
assert discr.norm(ssoln.energy - 2.5, np.inf) < tol
print(f"Uniform Soln:{initsoln}")
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
print(f"Press:{p}")
assert discr.norm(p - 1.0, np.inf) < tol
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_pulse(ctx_factory, dim):
"""
Test of Gaussian pulse generator.
If it looks, walks, and quacks like a duck, then ...
"""
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim
)
order = 1
print(f"Number of elements: {mesh.nelements}")
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f"DIM = {dim}, {len(nodes)}")
print(f"Nodes={nodes}")
tol = 1e-15
from mirgecom.initializers import _make_pulse
amp = 1.0
w = .1
rms2 = w * w
r0 = np.zeros(dim)
r2 = np.dot(nodes, nodes) / rms2
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
print(f"Pulse = {pulse}")
# does it return the expected exponential?
pulse_check = actx.np.exp(-.5 * r2)
print(f"exact: {pulse_check}")
pulse_resid = pulse - pulse_check
print(f"pulse residual: {pulse_resid}")
assert(discr.norm(pulse_resid, np.inf) < tol)
# proper scaling with amplitude?
amp = 2.0
pulse = 0
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
pulse_resid = pulse - (pulse_check + pulse_check)
assert(discr.norm(pulse_resid, np.inf) < tol)
# proper scaling with r?
amp = 1.0
rcheck = np.sqrt(2.0) * nodes
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=rcheck)
assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)
# proper scaling with w?
w = w / np.sqrt(2.0)
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
assert(discr.norm(pulse - (pulse_check * pulse_check), np.inf) < tol)
| 29.278388 | 79 | 0.671337 | [
"MIT"
] | anderson2981/mirgecom | test/test_init.py | 7,993 | Python |
import asyncio
from aiohttp import web
from tt_web import log
from tt_web import postgresql
async def initialize(config, loop):
await postgresql.initialize(config['database'], loop=loop)
async def deinitialize(config, loop):
await postgresql.deinitialize()
async def on_startup(app):
await initialize(app['config'], loop=app.loop)
async def on_cleanup(app):
await deinitialize(app['config'], loop=app.loop)
def register_routers(app):
from . import handlers
app.router.add_post('/accounts/balance', handlers.account_balance)
app.router.add_post('/accounts/history', handlers.account_history)
app.router.add_post('/transactions/start', handlers.start_transaction)
app.router.add_post('/transactions/commit', handlers.commit_transaction)
app.router.add_post('/transactions/rollback', handlers.rollback_transaction)
app.router.add_post('/debug-clear-service', handlers.debug_clear_service)
def create_application(config, loop=None):
app = web.Application(loop=loop)
app['config'] = config
log.initilize(config['log'])
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
register_routers(app)
return app
def run_utility(config, utility):
loop = asyncio.get_event_loop()
async def runner():
await initialize(config, loop=loop)
log.initilize(config['log'])
await utility(loop=loop)
await deinitialize(config, loop=loop)
loop.run_until_complete(runner())
| 22.522388 | 80 | 0.726972 | [
"BSD-3-Clause"
] | devapromix/the-tale | src/tt_bank/tt_bank/service.py | 1,509 | Python |
import pickle
import fcntl
import os
import struct
from collections import defaultdict
from functools import partial
from asyncio import new_event_loop
from io import BytesIO
from .utils import opposite_dict
MESSAGE_LENGTH_FMT = 'I'
def set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def blocking_read(fd, n):
io = BytesIO()
read_amount = 0
while read_amount < n:
data = os.read(fd, n - read_amount)
if not data:
raise IOError('FD closed before all bytes read')
read_amount += len(data)
io.write(data)
return io.getvalue()
class Piping:
def __init__(self, pipe_dict):
self.buffers = defaultdict(bytes)
self.loop = new_event_loop()
for src_fd, dest_fd in pipe_dict.items():
self.loop.add_reader(src_fd, partial(self._read, src_fd, dest_fd))
self.loop.add_writer(dest_fd, partial(self._write, dest_fd))
self.readers_to_writers = dict(pipe_dict)
self.writers_to_readers = opposite_dict(pipe_dict)
def _remove_writer(self, writer_fd):
self.loop.remove_writer(writer_fd)
for reader_fd in self.writers_to_readers.pop(writer_fd):
self.readers_to_writers.pop(reader_fd)
def _remove_reader(self, reader_fd):
# remove all writers that im the last to write to, remove all that write to me, if nothing left stop loop
self.loop.remove_reader(reader_fd)
writer_fd = self.readers_to_writers.pop(reader_fd)
writer_readers = self.writers_to_readers[writer_fd]
writer_readers.remove(reader_fd)
if not writer_fd:
self._remove_writer(writer_fd)
def _read(self, src_fd, dest_fd):
try:
data = os.read(src_fd, 1024)
except OSError:
data = ''
if data:
self.buffers[dest_fd] += data
else:
self._remove_reader(src_fd)
if src_fd in self.writers_to_readers:
self._remove_writer(src_fd)
if not self.readers_to_writers:
self.loop.stop()
def _write(self, dest_fd):
buffer = self.buffers[dest_fd]
if buffer:
self.buffers[dest_fd] = buffer[os.write(dest_fd, buffer):]
def run(self):
self.loop.run_forever()
# TODO: is this needed?
# for dest_fd, buffer in self.buffers.items():
# while buffer:
# buffer = buffer[os.write(dest_fd, buffer):]
def send_message(sock, obj):
message = pickle.dumps(obj)
message_len = struct.pack(MESSAGE_LENGTH_FMT, len(message))
sock.sendall(message_len)
sock.sendall(message)
def receive_message(sock):
len_len = struct.calcsize(MESSAGE_LENGTH_FMT)
len_bytes = blocking_read(sock, len_len)
message_len = struct.unpack(MESSAGE_LENGTH_FMT, len_bytes)[0]
message = blocking_read(sock, message_len)
return pickle.loads(message)
| 32.239583 | 114 | 0.636187 | [
"MIT"
] | kmaork/madbg | madbg/communication.py | 3,095 | Python |
import logging
import sys
import time
from rdflib.graph import Graph
from hexastore import turtle
from hexastore.memory import InMemoryHexastore
logger = logging.getLogger(__name__)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
class Timer:
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
self.interval = self.end - self.start
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
try:
with Timer() as t:
store = InMemoryHexastore()
with Timer() as t1:
triples = []
with open("/Users/alex/Downloads/BNBLODBooks_sample_nt/BNBLODB_sample.nt") as fo:
turtle.parse(fo.read(), lambda s, p, o: triples.append((s, p, o)))
logger.info(f"library=mutant-parse time={t1.interval}")
with Timer() as t2:
store.bulk_insert(triples)
logger.info(f"library=mutant-bulk-insert time={t2.interval}")
finally:
logger.info(f"library=mutant time={t.interval}")
try:
with Timer() as t:
g = Graph()
g.parse("/Users/alex/Downloads/BNBLODBooks_sample_nt/BNBLODB_sample.nt", format="nt")
finally:
logger.info(f"library=rdflib time={t.interval}")
| 25.421053 | 93 | 0.668737 | [
"MIT"
] | alexchamberlain/mutant | benchmarks/bnb.py | 1,449 | Python |
"""
Host management app
"""
from django.urls import path
from .views import *
app_name = 'sys_inspect'
urlpatterns = [
# 设备列表
path('device/list', InspectDevInfoViews.as_view(), name='inspect_devices_list'),
# 添加设备
path('device/add', AddDevView.as_view(), name='inspect_devices_add'),
# 删除设备
path('device/delete', DeleteDevView.as_view(), name='inspect_device_delete'),
# 编辑设备
path('device/edit', EditDevInfoView.as_view(), name='inspect_device_edit'),
# 任务列表
path('content/list', ContentViews.as_view(), name='inspect_contents_list'),
# 添加任务
path('content/add', AddContView.as_view(), name='inspect_contents_add'),
# 删除任务
path('content/delete', DeleteContView.as_view(), name='inspect_contents_delete'),
]
| 22.114286 | 85 | 0.686047 | [
"MIT"
] | MaLei666/oms | apps/sys_inspect/urls.py | 830 | Python |
# -*- coding: utf-8 -*-
import itertools
import logging
import numpy as np
from collections import OrderedDict
from collections.abc import Mapping
from typing import Dict, List, Optional, Tuple, Union
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor, nn
from detectron2.layers import ShapeSpec
from detectron2.structures import BitMasks, Boxes, ImageList, Instances
from detectron2.utils.events import get_event_storage
from .backbone import Backbone
logger = logging.getLogger(__name__)
def _to_container(cfg):
"""
mmdet will assert the type of dict/list.
So convert omegaconf objects to dict/list.
"""
if isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
from mmcv.utils import ConfigDict
return ConfigDict(cfg)
class MMDetBackbone(Backbone):
"""
Wrapper of mmdetection backbones to use in detectron2.
mmdet backbones produce list/tuple of tensors, while detectron2 backbones
produce a dict of tensors. This class wraps the given backbone to produce
output in detectron2's convention, so it can be used in place of detectron2
backbones.
"""
def __init__(
self,
backbone: Union[nn.Module, Mapping],
neck: Union[nn.Module, Mapping, None] = None,
*,
pretrained_backbone: Optional[str] = None,
output_shapes: List[ShapeSpec],
output_names: Optional[List[str]] = None,
):
"""
Args:
backbone: either a backbone module or a mmdet config dict that defines a
backbone. The backbone takes a 4D image tensor and returns a
sequence of tensors.
neck: either a backbone module or a mmdet config dict that defines a
neck. The neck takes outputs of backbone and returns a
sequence of tensors. If None, no neck is used.
pretrained_backbone: defines the backbone weights that can be loaded by
mmdet, such as "torchvision://resnet50".
output_shapes: shape for every output of the backbone (or neck, if given).
stride and channels are often needed.
output_names: names for every output of the backbone (or neck, if given).
By default, will use "out0", "out1", ...
"""
super().__init__()
if isinstance(backbone, Mapping):
from mmdet.models import build_backbone
backbone = build_backbone(_to_container(backbone))
self.backbone = backbone
if isinstance(neck, Mapping):
from mmdet.models import build_neck
neck = build_neck(_to_container(neck))
self.neck = neck
# It's confusing that backbone weights are given as a separate argument,
# but "neck" weights, if any, are part of neck itself. This is the interface
# of mmdet so we follow it. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py
logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...")
self.backbone.init_weights(pretrained_backbone)
# train() in mmdet modules is non-trivial, and has to be explicitly
# called. Reference:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py
self.backbone.train()
if self.neck is not None:
logger.info("Initializing mmdet neck weights ...")
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.neck.train()
self._output_shapes = output_shapes
if not output_names:
output_names = [f"out{i}" for i in range(len(output_shapes))]
self._output_names = output_names
def forward(self, x) -> Dict[str, Tensor]:
outs = self.backbone(x)
if self.neck is not None:
outs = self.neck(outs)
assert isinstance(
outs, (list, tuple)
), "mmdet backbone should return a list/tuple of tensors!"
if len(outs) != len(self._output_shapes):
raise ValueError(
"Length of output_shapes does not match outputs from the mmdet backbone: "
f"{len(outs)} != {len(self._output_shapes)}"
)
return {k: v for k, v in zip(self._output_names, outs)}
def output_shape(self) -> Dict[str, ShapeSpec]:
return {k: v for k, v in zip(self._output_names, self._output_shapes)}
class MMDetDetector(nn.Module):
"""
Wrapper of a mmdetection detector model, for detection and instance segmentation.
Input/output formats of this class follow detectron2's convention, so a
mmdetection model can be trained and evaluated in detectron2.
"""
def __init__(
self,
detector: Union[nn.Module, Mapping],
*,
# Default is 32 regardless of model:
# https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets
size_divisibility=32,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image
"""
super().__init__()
if isinstance(detector, Mapping):
from mmdet.models import build_detector
detector = build_detector(_to_container(detector))
self.detector = detector
self.size_divisibility = size_divisibility
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor
metas = []
rescale = {"height" in x for x in batched_inputs}
if len(rescale) != 1:
raise ValueError("Some inputs have original height/width, but some don't!")
rescale = list(rescale)[0]
output_shapes = []
for input in batched_inputs:
meta = {}
c, h, w = input["image"].shape
meta["img_shape"] = meta["ori_shape"] = (h, w, c)
if rescale:
scale_factor = np.sqrt(h * w / (input["height"] * input["width"]))
ori_shape = (input["height"], input["width"])
output_shapes.append(ori_shape)
meta["ori_shape"] = ori_shape + (c,)
else:
scale_factor = 1.0
output_shapes.append((h, w))
meta["scale_factor"] = scale_factor
meta["flip"] = False
padh, padw = images.shape[-2:]
meta["pad_shape"] = (padh, padw, c)
metas.append(meta)
if self.training:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
if gt_instances[0].has("gt_masks"):
from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks
def convert_mask(m, shape):
# mmdet mask format
if isinstance(m, BitMasks):
return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1])
else:
return mm_PolygonMasks(m.polygons, shape[0], shape[1])
gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances]
else:
gt_masks = None
losses_and_metrics = self.detector.forward_train(
images,
metas,
[x.gt_boxes.tensor for x in gt_instances],
[x.gt_classes for x in gt_instances],
gt_masks=gt_masks,
)
return _parse_losses(losses_and_metrics)
else:
results = self.detector.simple_test(images, metas, rescale=rescale)
results = [
{"instances": _convert_mmdet_result(r, shape)}
for r, shape in zip(results, output_shapes)
]
return results
@property
def device(self):
return self.pixel_mean.device
# Reference: show_result() in
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances:
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0]
else:
bbox_result, segm_result = result, None
bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5
bboxes, scores = bboxes[:, :4], bboxes[:, -1]
labels = [
torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result)
]
labels = torch.cat(labels)
inst = Instances(shape)
inst.pred_boxes = Boxes(bboxes)
inst.scores = scores
inst.pred_classes = labels
if segm_result is not None and len(labels) > 0:
segm_result = list(itertools.chain(*segm_result))
segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result]
segm_result = torch.stack(segm_result, dim=0)
inst.pred_masks = segm_result
return inst
# reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py
def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]:
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(f"{loss_name} is not a tensor or list of tensors")
if "loss" not in loss_name:
# put metrics to storage; don't return them
storage = get_event_storage()
value = log_vars.pop(loss_name).cpu().item()
storage.put_scalar(loss_name, value)
return log_vars
| 39.900369 | 100 | 0.620827 | [
"Apache-2.0"
] | KnightOfTheMoonlight/visdom4detectron2 | detectron2/modeling/mmdet_wrapper.py | 10,813 | Python |
"""
@Author: Rossi
Created At: 2021-02-21
"""
import json
import time
from mako.template import Template
from Broca.faq_engine.index import ESIndex, VectorIndex
from Broca.message import BotMessage
class FAQAgent:
def __init__(self, agent_name, es_index, vector_index, threshold, topk, prompt_threshold,
template, prompt_template):
self.agent_name = agent_name
self.es_index = es_index
self.vector_index = vector_index
self.threshold = threshold
self.topk = topk
self.prompt_threshold = prompt_threshold
self.template = template
self.prompt_template = prompt_template
@classmethod
def from_config(cls, config):
agent_name = config["agent_name"]
es_config = config["es_index"]
es_index = ESIndex.from_config(es_config)
vector_index_config = config["vector_index"]
vector_index = VectorIndex.from_config(vector_index_config)
if config["build_index_at_start"]:
es_index.build_index_from_file(config["document_file"])
time.sleep(5) # wait until the es index gets ready
vector_index.build_index(es_index)
vector_index.load_index()
threshold = config["threshold"]
topk = config["topk"]
prompt_threshold = config["prompt_threshold"]
template = Template(filename=config["template"])
prompt_template = Template(filename=config["prompt_template"])
return cls(agent_name, es_index, vector_index, threshold, topk, prompt_threshold, template, prompt_template)
@classmethod
def from_config_file(cls, config_file):
with open(config_file, encoding="utf-8") as fi:
config = json.load(fi)
return cls.from_config(config)
def handle_message(self, message):
"""Respond to the user message by retriving documents from the knowledge base.
Args:
message ([type]): [description]
"""
query = message.text
candidates, similarities = self.vector_index.retrieve(query, self.topk)
selected = [candidate for candidate, similarity in zip(candidates, similarities) if similarity >= self.threshold]
result = {}
if selected:
documents = self.es_index.get_answer_by_question_ids(selected)
response = self.template.render(documents=documents)
result["response"] = BotMessage(message.sender_id, response.strip())
else:
selected = [candidate for candidate, similarity in zip(candidates, similarities) if similarity >= self.prompt_threshold]
if selected:
documents = self.es_index.get_documents_by_ids(selected)
prompt = self.prompt_template.render(documents=documents)
result["prompt"] = BotMessage(message.sender_id, prompt.strip())
return result
| 39.684932 | 132 | 0.669313 | [
"MIT"
] | lawRossi/Broca | Broca/faq_engine/agent.py | 2,897 | Python |
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!gjwrp$!ldm&fccwk7-bwajlwga)m)!js+pouvnhnxb9+^nbwbw')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| 31.126984 | 110 | 0.500765 | [
"BSD-3-Clause"
] | megcunningham/django-diesel | config/settings/local.py | 1,961 | Python |
import argparse
import torch.optim as optim
import sys
from utils import *
from data import data_generator
import time
import math
from setproctitle import setproctitle
import warnings
sys.path.append("../")
from model import TrellisNetModel
warnings.filterwarnings("ignore") # Suppress the RunTimeWarning on unicode
parser = argparse.ArgumentParser(description='PyTorch TrellisNet Language Model')
parser.add_argument('--dataset', type=str, default='ptb',
help='dataset to use')
parser.add_argument('--name', type=str, default='Trellis_charPTB',
help='name of the process')
parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1050,
help='number of hidden units per layer')
parser.add_argument('--nout', type=int, default=200,
help='number of output units')
parser.add_argument('--lr', type=float, default=2e-3,
help='initial learning rate (default: 2e-3)')
parser.add_argument('--clip', type=float, default=0.2,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=400,
help='upper epoch limit (default: 400)')
parser.add_argument('--batch_size', type=int, default=24, metavar='N',
help='batch size')
# For most of the time, you should change these two together
parser.add_argument('--nlevels', type=int, default=140,
help='levels of the network')
parser.add_argument('--horizon', type=int, default=140,
help='The effective history size')
parser.add_argument('--dropout', type=float, default=0.1,
help='output dropout (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.1,
help='input dropout (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.26,
help='dropout applied to weights (0 = no dropout)')
parser.add_argument('--emb_dropout', type=float, default=0.02,
help='dropout applied to embedding layer (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.29,
help='dropout applied to hidden layers (0 = no dropout)')
parser.add_argument('--wdecay', type=float, default=8e-7,
help='weight decay')
parser.add_argument('--tied', action='store_false',
help='tie the word embedding and softmax weights (default: True)')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--anneal', type=int, default=5,
help='learning rate annealing criteria (default: 5)')
parser.add_argument('--cuda', action='store_false',
help='use CUDA (default: True)')
parser.add_argument('--wnorm', action='store_false',
help='use weight normalization (default: True)')
parser.add_argument('--temporalwdrop', action='store_false',
help='only drop the temporal weights (default: True)')
parser.add_argument('--optim', type=str, default='Adam',
help='optimizer to use (default: Adam)')
parser.add_argument('--repack', action='store_false',
help='use repackaging (default: True)')
parser.add_argument('--eval', action='store_true',
help='evaluation only mode')
parser.add_argument('--aux', type=float, default=0.3,
help='use auxiliary loss (default: 0.3), -1 means no auxiliary loss used')
parser.add_argument('--aux_freq', type=float, default=80,
help='auxiliary loss frequency (default: 80)')
parser.add_argument('--seq_len', type=int, default=0,
help='total sequence length; if this is 0 then it defaults to args.horizon (default: 0)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--when', nargs='+', type=int, default=[220, 350],
help='When to decay the learning rate')
parser.add_argument('--ksize', type=int, default=2,
help='conv kernel size (default: 2)')
parser.add_argument('--dilation', nargs='+', type=int, default=[1],
help='dilation rate (default: [1])')
parser.add_argument('--n_experts', type=int, default=0,
help='number of softmax experts (default: 0)')
parser.add_argument('--load', type=str, default='',
help='path to load the model')
parser.add_argument('--load_weight', type=str, default='',
help='path to load the model weights (please only use --load or --load_weight)')
args = parser.parse_args()
args.save = args.name + ".pt"
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
setproctitle(args.name)
torch.set_default_tensor_type('torch.FloatTensor')
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
file, file_len, valfile, valfile_len, testfile, testfile_len, corpus = data_generator(args)
ntokens = len(corpus.dictionary)
eval_batch_size = 10
test_batch_size = 10
train_data = batchify(char_tensor(corpus, file), args.batch_size, args)
val_data = batchify(char_tensor(corpus, valfile), eval_batch_size, args)
test_data = batchify(char_tensor(corpus, testfile), eval_batch_size, args)
print(train_data.size(), val_data.size())
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open("logs/" + args.name + ".log", "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
self.log.flush()
self.terminal.flush()
pass
sys.stdout = Logger()
###############################################################################
# Build the model
###############################################################################
if len(args.load) > 0:
print("Loaded model\n")
model = torch.load(args.load)
else:
model = TrellisNetModel(ntoken=ntokens,
ninp=args.emsize,
nhid=args.nhid,
nout=args.nout,
nlevels=args.nlevels,
kernel_size=args.ksize,
dilation=args.dilation,
dropout=args.dropout,
dropouti=args.dropouti,
dropouth=args.dropouth,
emb_dropout=args.emb_dropout,
wdrop=args.wdrop,
temporalwdrop=args.temporalwdrop,
tie_weights=args.tied,
repack=args.repack,
wnorm=args.wnorm,
aux=(args.aux > 0),
aux_frequency=args.aux_freq,
load=args.load_weight)
if args.cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = getattr(optim, args.optim)(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
###############################################################################
# Training code
###############################################################################
def evaluate(data_source):
model.eval()
with torch.no_grad():
total_loss = 0
hidden = model.init_hidden(eval_batch_size)
eff_history_mode = (args.seq_len > args.horizon and not args.repack)
if eff_history_mode:
validseqlen = args.seq_len - args.horizon
seq_len = args.seq_len
else:
validseqlen = args.horizon
seq_len = args.horizon
processed_data_size = 0
for i in range(0, data_source.size(0) - 1, validseqlen):
eff_history = args.horizon if eff_history_mode else 0
if i + eff_history >= data_source.size(0) - 1: continue
data, targets = get_batch(data_source, i, seq_len, evaluation=True)
if args.repack:
hidden = repackage_hidden(hidden)
else:
hidden = model.init_hidden(eval_batch_size)
data = data.t()
net = nn.DataParallel(model) if data.size(0) > 10 else model
(_, _, decoded), hidden, all_decoded = net(data, hidden)
decoded = decoded.transpose(0, 1)
targets = targets[eff_history:].contiguous().view(-1)
final_decoded = decoded[eff_history:].contiguous().view(-1, ntokens)
loss = criterion(final_decoded, targets)
loss = loss.data
total_loss += (data.size(1) - eff_history) * loss
processed_data_size += data.size(1) - eff_history
decoded = None
final_decoded = None
targets = None
all_decoded = None # This is for auxiliary losses; not used in evaluation
return total_loss.item() / processed_data_size
def train(epoch):
model.train()
total_loss = 0
total_aux_losses = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
eff_history_mode = (args.seq_len > args.horizon and not args.repack)
if eff_history_mode:
validseqlen = args.seq_len - args.horizon
seq_len = args.seq_len
else:
validseqlen = args.horizon
seq_len = args.horizon
for batch, i in enumerate(range(0, train_data.size(0) - 1, validseqlen)):
# When not using repackaging mode, we DISCARD the first arg.horizon outputs in backprop (which are
# the "effective history".
eff_history = args.horizon if eff_history_mode else 0
if i + eff_history >= train_data.size(0) - 1: continue
data, targets = get_batch(train_data, i, seq_len)
if args.repack:
hidden = repackage_hidden(hidden)
else:
hidden = model.init_hidden(args.batch_size)
optimizer.zero_grad()
data = data.t()
net = nn.DataParallel(model) if data.size(0) > 10 else model
(_, _, decoded), hidden, all_decoded = net(data, hidden)
decoded = decoded.transpose(0, 1)
targets = targets[eff_history:].contiguous().view(-1)
final_decoded = decoded[eff_history:].contiguous().view(-1, ntokens)
# Loss 1: CE loss
raw_loss = criterion(final_decoded, targets)
# Loss 2: Aux loss
aux_losses = 0
if args.aux > 0:
all_decoded = all_decoded[:, :, eff_history:].permute(1, 2, 0, 3).contiguous()
aux_size = all_decoded.size(0)
all_decoded = all_decoded.view(aux_size, -1, ntokens)
aux_losses = args.aux * sum([criterion(all_decoded[i], targets) for i in range(aux_size)])
# Combine losses
loss = raw_loss + aux_losses
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
total_loss += raw_loss.data
if args.aux:
total_aux_losses += aux_losses.data
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / args.log_interval
cur_aux_loss = total_aux_losses.item() / args.log_interval if args.aux else 0
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.5f} | ms/batch {:5.2f} | '
'raw_loss {:5.3f} | aux_loss {:5.2f} | bpc {:5.3f}'.format(
epoch, batch, len(train_data) // validseqlen, lr,
elapsed * 1000 / args.log_interval, cur_loss, cur_aux_loss, cur_loss / math.log(2)))
total_loss = 0
total_aux_losses = 0
start_time = time.time()
sys.stdout.flush()
decoded = None
targets = None
final_decoded = None
all_decoded = None
def inference(epoch):
val_loss = evaluate(val_data)
print('-' * 89)
print('| End of epoch {:3d} | valid loss {:5.3f} | valid bpc {:8.3f}'.format(
epoch, val_loss, val_loss / math.log(2)))
test_loss = evaluate(test_data)
print('| End of epoch {:3d} | test loss {:5.3f} | test bpc {:8.3f}'.format(
epoch, test_loss, test_loss / math.log(2)))
print('-' * 89)
return val_loss, test_loss
if args.eval:
print("Eval only mode")
inference(-1)
sys.exit(0)
lr = args.lr
best_val_loss = None
all_val_losses = []
all_test_losses = []
try:
for epoch in range(1, args.epochs + 1):
loss = train(epoch)
val_loss, test_loss = inference(epoch)
if not best_val_loss or val_loss < best_val_loss:
print("Saving model (new best validation) in " + args.save)
save(model, args)
best_val_loss = val_loss
if epoch in args.when:
print("\n" + "*" * 89)
if lr > 1e-5:
print("Annealing learning rate")
lr = lr / 10.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
all_val_losses.append(val_loss)
all_test_losses.append(test_loss)
sys.stdout.flush()
except KeyboardInterrupt:
print('-' * 89)
print("Saving before quit...")
save(model, args)
# Load the best saved model
with open(args.save, 'rb') as f:
model = torch.load(f)
model.save_weights('weights/pretrained_charptb.pkl')
# Run on test data
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.3f} | test bpc {:8.3f}'.format(
test_loss, test_loss / math.log(2)))
print('=' * 89)
| 39.111717 | 109 | 0.585063 | [
"MIT"
] | CookieBox26/trellisnet | char_PTB/char_ptb.py | 14,354 | Python |
import copy
import sys
from abc import ABC, abstractmethod
from enum import Enum
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Tuple, Union
import yaml
from ._utils import (
_DEFAULT_MARKER_,
ValueKind,
_ensure_container,
_get_value,
_is_interpolation,
_is_missing_literal,
_is_missing_value,
_is_none,
_is_special,
_is_union,
_resolve_optional,
get_ref_type,
get_structured_config_data,
get_value_kind,
get_yaml_loader,
is_container_annotation,
is_dict_annotation,
is_list_annotation,
is_primitive_dict,
is_primitive_type,
is_structured_config,
is_tuple_annotation,
)
from .base import Container, ContainerMetadata, DictKeyType, Node, SCMode
from .errors import (
ConfigCycleDetectedException,
ConfigTypeError,
InterpolationResolutionError,
KeyValidationError,
MissingMandatoryValue,
OmegaConfBaseException,
ReadonlyConfigError,
ValidationError,
)
if TYPE_CHECKING:
from .dictconfig import DictConfig # pragma: no cover
class BaseContainer(Container, ABC):
_resolvers: ClassVar[Dict[str, Any]] = {}
def __init__(self, parent: Optional["Container"], metadata: ContainerMetadata):
if not (parent is None or isinstance(parent, Container)):
raise ConfigTypeError("Parent type is not omegaconf.Container")
super().__init__(parent=parent, metadata=metadata)
self.__dict__["_content"] = None
def _resolve_with_default(
self,
key: Union[DictKeyType, int],
value: Node,
default_value: Any = _DEFAULT_MARKER_,
) -> Any:
"""returns the value with the specified key, like obj.key and obj['key']"""
if _is_missing_value(value):
if default_value is not _DEFAULT_MARKER_:
return default_value
raise MissingMandatoryValue("Missing mandatory value: $FULL_KEY")
resolved_node = self._maybe_resolve_interpolation(
parent=self,
key=key,
value=value,
throw_on_resolution_failure=True,
)
return _get_value(resolved_node)
def __str__(self) -> str:
return self.__repr__()
def __repr__(self) -> str:
if self.__dict__["_content"] is None:
return "None"
elif self._is_interpolation() or self._is_missing():
v = self.__dict__["_content"]
return f"'{v}'"
else:
return self.__dict__["_content"].__repr__() # type: ignore
# Support pickle
def __getstate__(self) -> Dict[str, Any]:
dict_copy = copy.copy(self.__dict__)
# no need to serialize the flags cache, it can be re-constructed later
dict_copy.pop("_flags_cache", None)
dict_copy["_metadata"] = copy.copy(dict_copy["_metadata"])
ref_type = self._metadata.ref_type
if is_container_annotation(ref_type):
if is_dict_annotation(ref_type):
dict_copy["_metadata"].ref_type = Dict
elif is_list_annotation(ref_type):
dict_copy["_metadata"].ref_type = List
else:
assert False
if sys.version_info < (3, 7): # pragma: no cover
element_type = self._metadata.element_type
if _is_union(element_type):
raise OmegaConfBaseException(
"Serializing structured configs with `Union` element type requires python >= 3.7"
)
return dict_copy
# Support pickle
def __setstate__(self, d: Dict[str, Any]) -> None:
from omegaconf import DictConfig
from omegaconf._utils import is_generic_dict, is_generic_list
if isinstance(self, DictConfig):
key_type = d["_metadata"].key_type
# backward compatibility to load OmegaConf 2.0 configs
if key_type is None:
key_type = Any
d["_metadata"].key_type = key_type
element_type = d["_metadata"].element_type
# backward compatibility to load OmegaConf 2.0 configs
if element_type is None:
element_type = Any
d["_metadata"].element_type = element_type
ref_type = d["_metadata"].ref_type
if is_container_annotation(ref_type):
if is_generic_dict(ref_type):
d["_metadata"].ref_type = Dict[key_type, element_type] # type: ignore
elif is_generic_list(ref_type):
d["_metadata"].ref_type = List[element_type] # type: ignore
else:
assert False
d["_flags_cache"] = None
self.__dict__.update(d)
@abstractmethod
def __delitem__(self, key: Any) -> None:
...
def __len__(self) -> int:
if self._is_none() or self._is_missing() or self._is_interpolation():
return 0
content = self.__dict__["_content"]
return len(content)
def merge_with_cli(self) -> None:
args_list = sys.argv[1:]
self.merge_with_dotlist(args_list)
def merge_with_dotlist(self, dotlist: List[str]) -> None:
from omegaconf import OmegaConf
def fail() -> None:
raise ValueError("Input list must be a list or a tuple of strings")
if not isinstance(dotlist, (list, tuple)):
fail()
for arg in dotlist:
if not isinstance(arg, str):
fail()
idx = arg.find("=")
if idx == -1:
key = arg
value = None
else:
key = arg[0:idx]
value = arg[idx + 1 :]
value = yaml.load(value, Loader=get_yaml_loader())
OmegaConf.update(self, key, value)
def is_empty(self) -> bool:
"""return true if config is empty"""
return len(self.__dict__["_content"]) == 0
@staticmethod
def _to_content(
conf: Container,
resolve: bool,
throw_on_missing: bool,
enum_to_str: bool = False,
structured_config_mode: SCMode = SCMode.DICT,
) -> Union[None, Any, str, Dict[DictKeyType, Any], List[Any]]:
from omegaconf import MISSING, DictConfig, ListConfig
def convert(val: Node) -> Any:
value = val._value()
if enum_to_str and isinstance(value, Enum):
value = f"{value.name}"
return value
def get_node_value(key: Union[DictKeyType, int]) -> Any:
try:
node = conf._get_node(key, throw_on_missing_value=throw_on_missing)
except MissingMandatoryValue as e:
conf._format_and_raise(key=key, value=None, cause=e)
assert isinstance(node, Node)
if resolve:
try:
node = node._dereference_node()
except InterpolationResolutionError as e:
conf._format_and_raise(key=key, value=None, cause=e)
if isinstance(node, Container):
value = BaseContainer._to_content(
node,
resolve=resolve,
throw_on_missing=throw_on_missing,
enum_to_str=enum_to_str,
structured_config_mode=structured_config_mode,
)
else:
value = convert(node)
return value
if conf._is_none():
return None
elif conf._is_missing():
if throw_on_missing:
conf._format_and_raise(
key=None,
value=None,
cause=MissingMandatoryValue("Missing mandatory value"),
)
else:
return MISSING
elif not resolve and conf._is_interpolation():
inter = conf._value()
assert isinstance(inter, str)
return inter
if resolve:
_conf = conf._dereference_node()
assert isinstance(_conf, Container)
conf = _conf
if isinstance(conf, DictConfig):
if (
conf._metadata.object_type not in (dict, None)
and structured_config_mode == SCMode.DICT_CONFIG
):
return conf
if structured_config_mode == SCMode.INSTANTIATE and is_structured_config(
conf._metadata.object_type
):
return conf._to_object()
retdict: Dict[DictKeyType, Any] = {}
for key in conf.keys():
value = get_node_value(key)
if enum_to_str and isinstance(key, Enum):
key = f"{key.name}"
retdict[key] = value
return retdict
elif isinstance(conf, ListConfig):
retlist: List[Any] = []
for index in range(len(conf)):
item = get_node_value(index)
retlist.append(item)
return retlist
assert False
@staticmethod
def _map_merge(dest: "BaseContainer", src: "BaseContainer") -> None:
"""merge src into dest and return a new copy, does not modified input"""
from omegaconf import AnyNode, DictConfig, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
src_ref_type = get_ref_type(src)
assert src_ref_type is not None
# If source DictConfig is:
# - None => set the destination DictConfig to None
# - an interpolation => set the destination DictConfig to be the same interpolation
if src._is_none() or src._is_interpolation():
dest._set_value(src._value())
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
return
dest._validate_merge(value=src)
def expand(node: Container) -> None:
rt = node._metadata.ref_type
val: Any
if rt is not Any:
if is_dict_annotation(rt):
val = {}
elif is_list_annotation(rt) or is_tuple_annotation(rt):
val = []
else:
val = rt
elif isinstance(node, DictConfig):
val = {}
else:
assert False
node._set_value(val)
if (
src._is_missing()
and not dest._is_missing()
and is_structured_config(src_ref_type)
):
# Replace `src` with a prototype of its corresponding structured config
# whose fields are all missing (to avoid overwriting fields in `dest`).
src = _create_structured_with_missing_fields(
ref_type=src_ref_type, object_type=src_type
)
if (dest._is_interpolation() or dest._is_missing()) and not src._is_missing():
expand(dest)
src_items = src.items_ex(resolve=False) if not src._is_missing() else []
for key, src_value in src_items:
src_node = src._get_node(key, validate_access=False)
dest_node = dest._get_node(key, validate_access=False)
assert src_node is None or isinstance(src_node, Node)
assert dest_node is None or isinstance(dest_node, Node)
if isinstance(dest_node, DictConfig):
dest_node._validate_merge(value=src_node)
missing_src_value = _is_missing_value(src_value)
if (
isinstance(dest_node, Container)
and dest_node._is_none()
and not missing_src_value
and not _is_none(src_value, resolve=True)
):
expand(dest_node)
if dest_node is not None and dest_node._is_interpolation():
target_node = dest_node._maybe_dereference_node()
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
is_optional, et = _resolve_optional(dest._metadata.element_type)
if dest_node is None and is_structured_config(et) and not missing_src_value:
# merging into a new node. Use element_type as a base
dest[key] = DictConfig(
et, parent=dest, ref_type=et, is_optional=is_optional
)
dest_node = dest._get_node(key)
if dest_node is not None:
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest_node._merge_with(src_value)
elif not missing_src_value:
dest.__setitem__(key, src_value)
else:
if isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
assert isinstance(src_node, ValueNode)
# Compare to literal missing, ignoring interpolation
src_node_missing = _is_missing_literal(src_value)
try:
if isinstance(dest_node, AnyNode):
if src_node_missing:
node = copy.copy(src_node)
# if src node is missing, use the value from the dest_node,
# but validate it against the type of the src node before assigment
node._set_value(dest_node._value())
else:
node = src_node
dest.__setitem__(key, node)
else:
if not src_node_missing:
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
# verified to be compatible above in _validate_merge
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
# explicit flags on the source config are replacing the flag values in the destination
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
@staticmethod
def _list_merge(dest: Any, src: Any) -> None:
from omegaconf import DictConfig, ListConfig, OmegaConf
assert isinstance(dest, ListConfig)
assert isinstance(src, ListConfig)
if src._is_none():
dest._set_value(None)
elif src._is_missing():
# do not change dest if src is MISSING.
if dest._metadata.element_type is Any:
dest._metadata.element_type = src._metadata.element_type
elif src._is_interpolation():
dest._set_value(src._value())
else:
temp_target = ListConfig(content=[], parent=dest._get_parent())
temp_target.__dict__["_metadata"] = copy.deepcopy(
dest.__dict__["_metadata"]
)
is_optional, et = _resolve_optional(dest._metadata.element_type)
if is_structured_config(et):
prototype = DictConfig(et, ref_type=et, is_optional=is_optional)
for item in src._iter_ex(resolve=False):
if isinstance(item, DictConfig):
item = OmegaConf.merge(prototype, item)
temp_target.append(item)
else:
for item in src._iter_ex(resolve=False):
temp_target.append(item)
dest.__dict__["_content"] = temp_target.__dict__["_content"]
# explicit flags on the source config are replacing the flag values in the destination
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
def merge_with(
self,
*others: Union[
"BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any
],
) -> None:
try:
self._merge_with(*others)
except Exception as e:
self._format_and_raise(key=None, value=None, cause=e)
def _merge_with(
self,
*others: Union[
"BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any
],
) -> None:
from .dictconfig import DictConfig
from .listconfig import ListConfig
"""merge a list of other Config objects into this one, overriding as needed"""
for other in others:
if other is None:
raise ValueError("Cannot merge with a None config")
my_flags = {}
if self._get_flag("allow_objects") is True:
my_flags = {"allow_objects": True}
other = _ensure_container(other, flags=my_flags)
if isinstance(self, DictConfig) and isinstance(other, DictConfig):
BaseContainer._map_merge(self, other)
elif isinstance(self, ListConfig) and isinstance(other, ListConfig):
BaseContainer._list_merge(self, other)
else:
raise TypeError("Cannot merge DictConfig with ListConfig")
# recursively correct the parent hierarchy after the merge
self._re_parent()
# noinspection PyProtectedMember
def _set_item_impl(self, key: Any, value: Any) -> None:
"""
Changes the value of the node key with the desired value. If the node key doesn't
exist it creates a new one.
"""
from .nodes import AnyNode, ValueNode
if isinstance(value, Node):
do_deepcopy = not self._get_flag("no_deepcopy_set_nodes")
if not do_deepcopy and isinstance(value, Container):
# if value is from the same config, perform a deepcopy no matter what.
if self._get_root() is value._get_root():
do_deepcopy = True
if do_deepcopy:
value = copy.deepcopy(value)
value._set_parent(None)
try:
old = value._key()
value._set_key(key)
self._validate_set(key, value)
finally:
value._set_key(old)
else:
self._validate_set(key, value)
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot change read-only config container")
input_is_node = isinstance(value, Node)
target_node_ref = self._get_node(key)
input_is_typed_vnode = isinstance(value, ValueNode) and not isinstance(
value, AnyNode
)
target_is_vnode = isinstance(target_node_ref, ValueNode)
def get_target_type_hint(val: Any) -> Any:
if not is_structured_config(val):
type_hint = self._metadata.element_type
else:
target = self._get_node(key)
if target is None:
type_hint = self._metadata.element_type
else:
assert isinstance(target, Node)
type_hint = target._metadata.type_hint
return type_hint
def assign(value_key: Any, val: Node) -> None:
assert val._get_parent() is None
v = val
v._set_parent(self)
v._set_key(value_key)
_deep_update_type_hint(node=v, type_hint=self._metadata.element_type)
self.__dict__["_content"][value_key] = v
if input_is_typed_vnode:
assign(key, value)
else:
# input is not a ValueNode, can be primitive or container
special_value = _is_special(value)
type_hint = get_target_type_hint(value)
# We use the `Node._set_value` method if the target node exists
# 1. the value is special (i.e. MISSING or None or interpolation), or
# 2. the target is a Container and has an explicit ref_type, or
# 3. the target is a typed ValueNode, or
# 4. the target is an AnyNode and the input is a primitive type.
should_set_value = target_node_ref is not None and (
special_value
or (
isinstance(target_node_ref, Container)
and target_node_ref._has_ref_type()
)
or (target_is_vnode and not isinstance(target_node_ref, AnyNode))
or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))
)
if should_set_value:
if special_value and isinstance(value, Node):
value = value._value()
self.__dict__["_content"][key]._set_value(value)
elif input_is_node:
_, ref_type = _resolve_optional(type_hint)
if special_value and (
is_container_annotation(ref_type) or is_structured_config(ref_type)
):
self._wrap_value_and_set(key, value._value(), type_hint)
else:
assign(key, value)
else:
self._wrap_value_and_set(key, value, type_hint)
def _wrap_value_and_set(self, key: Any, val: Any, type_hint: Any) -> None:
from omegaconf.omegaconf import _maybe_wrap
is_optional, ref_type = _resolve_optional(type_hint)
wrapped = _maybe_wrap(
ref_type=ref_type,
key=key,
value=val,
is_optional=is_optional,
parent=self,
)
self.__dict__["_content"][key] = wrapped
@staticmethod
def _item_eq(
c1: Container,
k1: Union[DictKeyType, int],
c2: Container,
k2: Union[DictKeyType, int],
) -> bool:
v1 = c1._get_node(k1)
v2 = c2._get_node(k2)
assert v1 is not None and v2 is not None
assert isinstance(v1, Node)
assert isinstance(v2, Node)
if v1._is_none() and v2._is_none():
return True
if v1._is_missing() and v2._is_missing():
return True
v1_inter = v1._is_interpolation()
v2_inter = v2._is_interpolation()
dv1: Optional[Node] = v1
dv2: Optional[Node] = v2
if v1_inter:
dv1 = v1._maybe_dereference_node()
if v2_inter:
dv2 = v2._maybe_dereference_node()
if v1_inter and v2_inter:
if dv1 is None or dv2 is None:
return v1 == v2
else:
# both are not none, if both are containers compare as container
if isinstance(dv1, Container) and isinstance(dv2, Container):
if dv1 != dv2:
return False
dv1 = _get_value(dv1)
dv2 = _get_value(dv2)
return dv1 == dv2
elif not v1_inter and not v2_inter:
v1 = _get_value(v1)
v2 = _get_value(v2)
ret = v1 == v2
assert isinstance(ret, bool)
return ret
else:
dv1 = _get_value(dv1)
dv2 = _get_value(dv2)
ret = dv1 == dv2
assert isinstance(ret, bool)
return ret
def _is_optional(self) -> bool:
return self.__dict__["_metadata"].optional is True
def _is_interpolation(self) -> bool:
return _is_interpolation(self.__dict__["_content"])
@abstractmethod
def _validate_get(self, key: Any, value: Any = None) -> None:
...
@abstractmethod
def _validate_set(self, key: Any, value: Any) -> None:
...
def _value(self) -> Any:
return self.__dict__["_content"]
def _get_full_key(self, key: Union[DictKeyType, int, slice, None]) -> str:
from .listconfig import ListConfig
from .omegaconf import _select_one
if not isinstance(key, (int, str, Enum, float, bool, slice, bytes, type(None))):
return ""
def _slice_to_str(x: slice) -> str:
if x.step is not None:
return f"{x.start}:{x.stop}:{x.step}"
else:
return f"{x.start}:{x.stop}"
def prepand(full_key: str, parent_type: Any, cur_type: Any, key: Any) -> str:
if isinstance(key, slice):
key = _slice_to_str(key)
elif isinstance(key, Enum):
key = key.name
elif isinstance(key, (int, float, bool)):
key = str(key)
if issubclass(parent_type, ListConfig):
if full_key != "":
if issubclass(cur_type, ListConfig):
full_key = f"[{key}]{full_key}"
else:
full_key = f"[{key}].{full_key}"
else:
full_key = f"[{key}]"
else:
if full_key == "":
full_key = key
else:
if issubclass(cur_type, ListConfig):
full_key = f"{key}{full_key}"
else:
full_key = f"{key}.{full_key}"
return full_key
if key is not None and key != "":
assert isinstance(self, Container)
cur, _ = _select_one(
c=self, key=str(key), throw_on_missing=False, throw_on_type_error=False
)
if cur is None:
cur = self
full_key = prepand("", type(cur), None, key)
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
else:
full_key = prepand("", type(cur._get_parent()), type(cur), cur._key())
else:
cur = self
if cur._key() is None:
return ""
full_key = self._key()
assert cur is not None
memo = {id(cur)} # remember already visited nodes so as to detect cycles
while cur._get_parent() is not None:
cur = cur._get_parent()
if id(cur) in memo:
raise ConfigCycleDetectedException(
f"Cycle when iterating over parents of key `{key!s}`"
)
memo.add(id(cur))
assert cur is not None
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
return full_key
def _create_structured_with_missing_fields(
ref_type: type, object_type: Optional[type] = None
) -> "DictConfig":
from . import MISSING, DictConfig
cfg_data = get_structured_config_data(ref_type)
for v in cfg_data.values():
v._set_value(MISSING)
cfg = DictConfig(cfg_data)
cfg._metadata.optional, cfg._metadata.ref_type = _resolve_optional(ref_type)
cfg._metadata.object_type = object_type
return cfg
def _update_types(node: Node, ref_type: Any, object_type: Optional[type]) -> None:
if object_type is not None and not is_primitive_dict(object_type):
node._metadata.object_type = object_type
if node._metadata.ref_type is Any:
_deep_update_type_hint(node, ref_type)
def _deep_update_type_hint(node: Node, type_hint: Any) -> None:
"""Ensure node is compatible with type_hint, mutating if necessary."""
from omegaconf import DictConfig, ListConfig
from ._utils import get_dict_key_value_types, get_list_element_type
if type_hint is Any:
return
_shallow_validate_type_hint(node, type_hint)
new_is_optional, new_ref_type = _resolve_optional(type_hint)
node._metadata.ref_type = new_ref_type
node._metadata.optional = new_is_optional
if is_list_annotation(new_ref_type) and isinstance(node, ListConfig):
new_element_type = get_list_element_type(new_ref_type)
node._metadata.element_type = new_element_type
if not _is_special(node):
for i in range(len(node)):
_deep_update_subnode(node, i, new_element_type)
if is_dict_annotation(new_ref_type) and isinstance(node, DictConfig):
new_key_type, new_element_type = get_dict_key_value_types(new_ref_type)
node._metadata.key_type = new_key_type
node._metadata.element_type = new_element_type
if not _is_special(node):
for key in node:
if new_key_type is not Any and not isinstance(key, new_key_type):
raise KeyValidationError(
f"Key {key!r} ({type(key).__name__}) is incompatible"
+ f" with key type hint '{new_key_type.__name__}'"
)
_deep_update_subnode(node, key, new_element_type)
def _deep_update_subnode(node: BaseContainer, key: Any, value_type_hint: Any) -> None:
"""Get node[key] and ensure it is compatible with value_type_hint, mutating if necessary."""
subnode = node._get_node(key)
assert isinstance(subnode, Node)
if _is_special(subnode):
# Ensure special values are wrapped in a Node subclass that
# is compatible with the type hint.
node._wrap_value_and_set(key, subnode._value(), value_type_hint)
subnode = node._get_node(key)
assert isinstance(subnode, Node)
_deep_update_type_hint(subnode, value_type_hint)
def _shallow_validate_type_hint(node: Node, type_hint: Any) -> None:
"""Error if node's type, content and metadata are not compatible with type_hint."""
from omegaconf import DictConfig, ListConfig, ValueNode
is_optional, ref_type = _resolve_optional(type_hint)
vk = get_value_kind(node)
if node._is_none():
if not is_optional:
value = _get_value(node)
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type.__name__}'"
)
return
elif vk in (ValueKind.MANDATORY_MISSING, ValueKind.INTERPOLATION):
return
elif vk == ValueKind.VALUE:
if is_primitive_type(ref_type) and isinstance(node, ValueNode):
value = node._value()
if not isinstance(value, ref_type):
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type.__name__}'"
)
elif is_structured_config(ref_type) and isinstance(node, DictConfig):
return
elif is_dict_annotation(ref_type) and isinstance(node, DictConfig):
return
elif is_list_annotation(ref_type) and isinstance(node, ListConfig):
return
else:
if isinstance(node, ValueNode):
value = node._value()
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type}'"
)
else:
raise ValidationError(
f"'{type(node).__name__}' is incompatible"
+ f" with type hint '{ref_type}'"
)
else:
assert False
| 36.87931 | 103 | 0.569331 | [
"BSD-3-Clause"
] | gwenzek/omegaconf | omegaconf/basecontainer.py | 32,085 | Python |
# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from pydantic import Field
from braket.device_schema.device_capabilities import DeviceCapabilities
from braket.device_schema.dwave.dwave_provider_properties_v1 import DwaveProviderProperties
from braket.schema_common import BraketSchemaBase, BraketSchemaHeader
class DwaveDeviceCapabilities(DeviceCapabilities, BraketSchemaBase):
"""
These are the capabilities specific to D-Wave device
Attributes:
provider: Properties specific to D-Wave provider
Examples:
>>> import json
>>> input_json = ...{
... "braketSchemaHeader": {
... "name": "braket.device_schema.dwave.dwave_device_capabilities",
... "version": "1",
... },
... "provider": {
... "braketSchemaHeader": {
... "name": "braket.device_schema.dwave.dwave_provider_properties",
... "version": "1",
... },
... "annealingOffsetStep": 1.45,
... "annealingOffsetStepPhi0": 1.45,
... "annealingOffsetRanges": [[1.45, 1.45], [1.45, 1.45]],
... "annealingDurationRange": [1, 2, 3],
... "couplers": [[1, 2, 3], [1, 2, 3]],
... "defaultAnnealingDuration": 1,
... "defaultProgrammingThermalizationDuration": 1,
... "defaultReadoutThermalizationDuration": 1,
... "extendedJRange": [1, 2, 3],
... "hGainScheduleRange": [1, 2, 3],
... "hRange": [1, 2, 3],
... "jRange": [1, 2, 3],
... "maximumAnnealingSchedulePoints": 1,
... "maximumHGainSchedulePoints": 1,
... "perQubitCouplingRange": [1, 2, 3],
... "programmingThermalizationDurationRange": [1, 2, 3],
... "qubits": [1, 2, 3],
... "qubitCount": 1,
... "quotaConversionRate": 1,
... "readoutThermalizationDurationRange": [1, 2, 3],
... "taskRunDurationRange": [1, 2, 3],
... "topology": {},
... },
... "service": {
... "braketSchemaHeader": {
... "name": "braket.device_schema.device_service_properties",
... "version": "1",
... },
... "executionWindows": [
... {
... "executionDay": "Everyday",
... "windowStartHour": "09:00",
... "windowEndHour": "19:00",
... }
... ],
... "shotsRange": [1, 10],
... "deviceCost": {
... "price": 0.25,
... "unit": "minute"
... },
... "deviceDocumentation": {
... "imageUrl": "image_url",
... "summary": "Summary on the device",
... "externalDocumentationUrl": "exter doc link",
... },
... "deviceLocation": "us-east-1",
... "updatedAt": "2020-06-16T19:28:02.869136"
... },
... "action": {
... "braket.ir.annealing.problem": {
... "actionType": "braket.ir.annealing.problem",
... "version": ["1"],
... }
... },
... "deviceParameters": {DwaveDeviceParameters.schema_json()},
... }
>>> DwaveDeviceCapabilities.parse_raw_schema(json.dumps(input_json))
"""
_PROGRAM_HEADER = BraketSchemaHeader(
name="braket.device_schema.dwave.dwave_device_capabilities", version="1"
)
braketSchemaHeader: BraketSchemaHeader = Field(default=_PROGRAM_HEADER, const=_PROGRAM_HEADER)
provider: DwaveProviderProperties
| 41.866667 | 98 | 0.512966 | [
"Apache-2.0"
] | QPC-database/amazon-braket-schemas-python | src/braket/device_schema/dwave/dwave_device_capabilities_v1.py | 4,396 | Python |
import unittest
from programy.bot import Bot
from programy.config.bot.bot import BotConfiguration
from programy.sentiment.extension import SentimentExtension
from programytest.client import TestClient
class SentimentExtensionTests(unittest.TestCase):
def setUp(self):
self._client = TestClient()
config = BotConfiguration()
config.sentiment_analyser._classname = "programy.sentiment.textblob_sentiment.TextBlobSentimentAnalyser"
config.sentiment_analyser._scores = "programy.sentiment.scores.SentimentScores"
self.client_context = self._client.create_client_context("testuser")
self.client_context._bot = Bot(config=config, client=self._client)
self.client_context._bot.initiate_sentiment_analyser()
def test_invalid_command(self):
extension = SentimentExtension()
self.assertIsNotNone(extension)
result = extension.execute(self.client_context, "XXX")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT SCOREX")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING LAST")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT SCORES")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
result = extension.execute(self.client_context, "SENTIMENT CURRENT")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT INVALID COMMAND", result)
def test_valid_scores_command(self):
extension = SentimentExtension()
self.assertIsNotNone(extension)
result = extension.execute(self.client_context, "SENTIMENT ENABLED")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT ENABLED", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING LAST 1")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT FEELING NEUTRAL AND NEUTRAL", result)
result = extension.execute(self.client_context, "SENTIMENT FEELING OVERALL")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT FEELING NEUTRAL AND NEUTRAL", result)
result = extension.execute(self.client_context, "SENTIMENT SCORE I LIKE YOU")
self.assertIsNotNone(result)
self.assertEqual("SENTIMENT SCORES POSITIVITY NEUTRAL SUBJECTIVITY COMPLETELY OBJECTIVE", result)
| 39.25974 | 112 | 0.726431 | [
"MIT"
] | motazsaad/fit-bot-fb-clt | test/programytest/sentiment/test_extension.py | 3,023 | Python |
from pathlib import Path
from subprocess import PIPE, CalledProcessError
from typing import Iterable, List, Tuple, Union
import matplotlib.pyplot as plt
PathLike = Union[Path, str]
conf_opening, conf_closing = "+++++", "-----"
def profile_config_file(
binary_path: PathLike,
config_path: PathLike,
output_config_path: PathLike,
progress_bar: bool = True,
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
) -> None:
r"""Profile an HPVM configuration file with an HPVM binary,
and write the updated configuration file to a given location.
The configuration file must have the baseline as the first configuration.
:param binary_path: Path to binary to be executed in profiling.
:param config_path: Path to config file (HPVM configuration format)
with configs to enumerate for profiling.
:param output_config_path: Path where the output configs are written.
The output config file has the same configs as the input `config_path` file,
but the performance and energy readings are updated.
:param progress_bar: If `True`, show a progress bar for number of configs already profiled.
:param profile_filename: Name of profile file generated by the binary (in current directory).
This defaults to "profile_info.txt" and should not be changed for HPVM binaries.
:param qos_filename: Name of QoS file generated by the binary (in current directory).
It contains a single float number as the QoS of this run.
This defaults to "final_accuracy" and should not be changed for HPVM binaries.
"""
# Read first line ("the float") and configs in config file
header, configs = read_hpvm_configs(Path(config_path))
if not configs:
raise ValueError("Config file with no configs is unsupported.")
# Modifies configs in place.
profile_configs(
binary_path,
configs[1:],
configs[0],
progress_bar,
profile_filename,
qos_filename,
)
write_hpvm_configs(header, configs, Path(output_config_path))
def profile_configs(
binary_path: PathLike,
configs: Iterable["Config"],
baseline_config: "Config",
progress_bar: bool = True,
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
) -> None:
"""Profile a sequence of HPVM configs.
This function modifies argument `configs` in place."""
from tqdm import tqdm
baseline_time, baseline_acc = measure_config(binary_path, baseline_config)
iterable = tqdm(configs, desc="Configs profiled") if progress_bar else configs
for config in iterable:
time, acc = measure_config(binary_path, config, profile_filename, qos_filename)
speedup = baseline_time / time
config.update_profile_results(speedup, acc, baseline_acc)
return configs
def measure_config(
binary_path: PathLike,
config: "Config",
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
):
from subprocess import check_call
from tempfile import NamedTemporaryFile
import os
temp_file = NamedTemporaryFile("w")
write_hpvm_configs("0.0", [config], Path(temp_file.name))
# Run binary_path binary,
# which generates `profile_filename` and `qos_filename` file in cwd.
try:
with open(os.devnull, "w") as f:
check_call([str(binary_path), "-c", str(temp_file.name)], stdout=f)
except CalledProcessError as e:
print("Output from the program:")
print(e.output)
raise e
time = _read_profile_file(Path(profile_filename))
acc = _read_qos_file(Path(qos_filename))
temp_file.close()
return time, acc
def plot_hpvm_configs(
config_path: PathLike,
save_to: PathLike = None,
show_qos_loss: bool = True,
**fig_kwargs,
) -> plt.Figure:
"""
Plot the QoS-speedup information in an HPVM configuration file.
It is recommended to profile the config file first (using `profile_configs`)
to obtain real speedup numbers.
This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it.
:param config_path: Path to the config file (HPVM configuration format).
:param save_to: File to save figure into. Default is None: don't save figure (just return it).
:param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True.
If False, will use (absolute) QoS instead of QoS loss.
:param fig_kwargs: Arguments to pass to `plt.subplots`.
"""
import numpy as np
_, configs = read_hpvm_configs(config_path)
get_qos = lambda c: c.qos_loss if show_qos_loss else c.qos
qos_speedup = np.array([(get_qos(c), c.speedup) for c in configs])
qoses, speedups = qos_speedup.T
fig, ax = plt.subplots(**fig_kwargs)
ax.scatter(qoses, speedups)
ax.set_xlabel("QoS Loss")
ax.set_ylabel("Speedup (X)")
if save_to:
fig.savefig(save_to, dpi=300)
return fig
class Config:
def __init__(
self,
conf_name: str,
speedup: float,
energy: float,
qos: float,
qos_loss: float,
config_body: List[str],
):
self.conf_name = conf_name
self.speedup = speedup
self.energy = energy
self.qos = qos
self.qos_loss = qos_loss
# We don't care about the information in this part, and we don't parse this.
self.config_body = config_body
def update_profile_results(self, speedup: float, qos: float, base_qos: float):
recorded_base_qos = self.qos + self.qos_loss
if abs(recorded_base_qos - base_qos) > 0.025:
raise ValueError(
f"Baseline QoS mismatch. Original: {recorded_base_qos}, measured: {base_qos}"
)
self.speedup = speedup
self.qos = qos
self.qos_loss = base_qos - qos
def __repr__(self) -> str:
header_fields = [
self.conf_name,
self.speedup,
self.energy,
self.qos,
self.qos_loss,
]
header = " ".join(str(field) for field in header_fields)
lines = [conf_opening, header, *self.config_body, conf_closing]
return "\n".join(lines)
__str__ = __repr__
def read_hpvm_configs(config_file: PathLike) -> Tuple[str, List[Config]]:
# def read_hpvm_configs(config_file, config_num, temp_file):
ret_configs = []
with open(config_file) as f:
text = f.read()
# There's 1 float sitting on the first line of config file.
# We don't use it, but want to keep that intact.
header, *configs = text.split(conf_opening)
header = header.strip()
for config_text in configs:
config_text = config_text.replace(conf_closing, "").strip()
config_header, *config_body = config_text.splitlines()
conf_name, *number_fields = config_header.split(" ")
speedup, energy, qos, qos_drop = [float(s) for s in number_fields]
ret_configs.append(
Config(conf_name, speedup, energy, qos, qos_drop, config_body)
)
return header, ret_configs
def write_hpvm_configs(header: str, configs: Iterable[Config], to_file: PathLike):
text_segs = [header] + [str(config) for config in configs]
with open(to_file, "w") as f:
f.write("\n".join(text_segs))
f.flush()
def _read_profile_file(profile_file_path: Path):
with profile_file_path.open() as f:
target_lines = [line.strip() for line in f if "Total Time" in line]
if len(target_lines) != 1:
raise RuntimeError(f"Profile {profile_file_path} malformed")
(target_line,) = target_lines
return float(target_line.split()[3])
def _read_qos_file(qos_file_path: Path):
with qos_file_path.open() as f:
return float(f.read().strip())
| 36.170507 | 98 | 0.673844 | [
"Apache-2.0"
] | vzyrianov/hpvm-autograd | hpvm/projects/hpvm-profiler/hpvm_profiler/__init__.py | 7,849 | Python |
# pylint: disable=too-many-lines
import os
import random
import shutil
import time
import uuid
from retval import RetVal
from pycryptostring import CryptoString
from pymensago.encryption import EncryptionPair
from pymensago.hash import blake2hash
from pymensago.serverconn import ServerConnection
from integration_setup import login_admin, regcode_admin, setup_test, init_server, init_user, \
init_user2, reset_top_dir
from tests.integration.integration_setup import funcname
server_response = {
'title' : 'Mensago Server Response',
'type' : 'object',
'required' : [ 'Code', 'Status', 'Info', 'Data' ],
'properties' : {
'Code' : {
'type' : 'integer'
},
'Status' : {
'type' : 'string'
},
'Info' : {
'type' : 'string'
},
'Data' : {
'type' : 'object'
}
}
}
def make_test_file(path: str, file_size=-1, file_name='') -> RetVal:
'''Generate a test file containing nothing but zeroes. If the file size is negative, a random
size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be
generated.
Returns:
name: (str) name of the test file generated
size: (int) size of the test file generated
'''
if file_size < 0:
file_size = random.randint(1,10) * 1024
if file_name == '' or not file_name:
file_name = f"{int(time.time())}.{file_size}.{str(uuid.uuid4())}"
try:
fhandle = open(os.path.join(path, file_name), 'w')
except Exception as e:
return RetVal().wrap_exception(e)
fhandle.write('0' * file_size)
fhandle.close()
return RetVal().set_values({ 'name':file_name, 'size':file_size })
def setup_testdir(name) -> str:
'''Creates a test folder for holding files'''
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'testfiles')
if not os.path.exists(topdir):
os.mkdir(topdir)
testdir = os.path.join(topdir, name)
while os.path.exists(testdir):
try:
shutil.rmtree(testdir)
except:
print("Waiting a second for test folder to unlock")
time.sleep(1.0)
os.mkdir(testdir)
return testdir
def test_copy():
'''Tests the COPY command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': '/ wsp ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
# By making this 1MB + 1byte, the file's mere existence will put us over the limit of the 1MB
# disk quota
status = make_test_file(admin_dir, file_size=0x10_0001)
assert not status.error(), 'test_copy: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_copy: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
# Subtest #5: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# We actually have to do an update instead of an insert because the quota checks in earlier
# calls ensure that there is a quota record for admin in the database
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_copy: #5 failed to handle quota limit'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
# Subtest #6: Actual success
conn.send_message({
'Action': 'COPY',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_delete():
'''Test the DELETE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad path
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} some_dir_name"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: failed to handle bad path"
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #2 failed to handle nonexistent file"
# Subtest #3: Actual success
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'DELETE',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {filename}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, f"{funcname()}: #3 failed to delete file"
def test_download():
'''This tests the command DOWNLOAD'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({'Action': 'DOWNLOAD','Data': {}})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' +
' 1000.1000.22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_download: #2 failed to handle non-existent path'
# Subtest #3: Actual success
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #3 failed to create test file: {status.info}"
testname = status['name']
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #3 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 1000, 'test_download: #3 downloaded file had wrong length'
# Set up an 'interrupted' transfer
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"test_download: #4 failed to create test file: {status.info}"
testname = status['name']
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '2500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_download: #4 failed to handle offset > file size'
# Subtest #5: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download'
assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \
'test_download: #5 server failed to respond with file size'
conn.send_message({
'Action': 'DOWNLOAD',
'Data': {
'Path': f"/ wsp {dbdata['admin_wid']} {testname}",
'Offset': '500',
'Size': '1000'
}
})
rawdata = conn.read()
assert len(rawdata) == 500, 'test_download: #5 resumed data had wrong length'
assert blake2hash((('0' * 500) + rawdata).encode()) == \
'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', \
'test_download: #8 resumed file hash failure'
conn.disconnect()
def test_getquotainfo():
'''This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the
disk usage'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid']), file_size=1000)
assert not status.error(), f"Failed to create test workspace file: {status.info}"
conn.send_message({ 'Action': 'GETQUOTAINFO', 'Data': {} })
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_getquotainfo: failed to get quota information'
assert response['Data']['DiskUsage'] == '1000', 'test_getquotainfo: disk usage was incorrect'
assert response['Data']['QuotaSize'] == '0', \
"test_getquotainfo: admin quota wasn't unlimited"
conn.disconnect()
def test_list():
'''Tests the LIST command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_list: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_list: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_list: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #3 failed to handle empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 0, \
'test_list: #3 failed to have empty response for empty directory'
# Subtest #4: A list of files
for i in range(1,6):
tempname = '.'.join([str(1000 * i), '500', str(uuid.uuid4())])
try:
fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111',
tempname), 'w')
except Exception as e:
assert False, 'test_list: #4 failed to create test files: ' + e
fhandle.write('0' * 500)
fhandle.close()
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #4 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 5, \
'test_list: #4 failed to list all files in directory'
# Subtest #5: A list of files with time specifier
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Time': '3000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_list: #5 failed to handle non-empty directory'
assert 'Files' in response['Data'] and len(response['Data']['Files']) == 3, \
'test_list: #5 failed to filter files'
conn.disconnect()
def test_listdirs():
'''Tests the LISTDIRS command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_listdirs: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_listdirs: #2 failed to create test file"
conn.send_message({
'Action': 'LIST',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_listdirs: #2 failed to handle path as file'
# Subtest #3: Empty directory
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #3 failed to handle empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 0, \
'test_listdirs: #3 failed to have empty response for empty directory'
# Subtest #4: A list of directories
for i in range(2,7):
tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)])
try:
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname))
except Exception as e:
assert False, 'test_listdirs: #4 failed to create test directories: ' + e
make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({
'Action': 'LISTDIRS',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_listdirs: #4 failed to handle non-empty directory'
assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 5, \
'test_list: #4 failed to list all subdirectories'
conn.disconnect()
def test_mkdir():
'''Tests the MKDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_mkdir: #1 failed to handle bad path'
# Subtest #2: Actual success - 1 directory
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
# Subtest #3: Directory already exists
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_mkdir: #3 failed to handle existing directory'
# Subtest #4: Actual success - nested directories
multipath = ' '.join(['/', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory'
conn.disconnect()
def test_move():
'''Tests the MOVE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Set up the directory hierarchy
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
# Subtest #1: Nonexistent source file
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': '/ ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef',
'DestDir': '/ ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #1 failed to handle nonexistent source file'
# Subtest #2: Nonexistent destination directory
status = make_test_file(admin_dir)
assert not status.error(), 'test_move: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_move: #2 failed to handle nonexistent destination dir'
# Subtest #3: Source path is a directory
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_move: #3 failed to handle directory as source'
# Subtest #4: Destination is file path
# Normally each file on the system has a unique name, but having a duplicate in this case
# won't matter
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination'
os.remove(os.path.join(inner_dir, status['name']))
# Subtest #5: Actual success
conn.send_message({
'Action': 'MOVE',
'Data': {
'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}",
'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_copy: #6 failed to succeed'
conn.disconnect()
def test_replace():
'''Test the REPLACE command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad old file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'NewPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #1 failed to handle bad old file path"
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
filename = status['name']
# Subtest #2: Bad new file path
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']} some_dir_name",
'Size': "1234",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, f"{funcname()}: #2 failed to handle bad new file path"
# Subtest #4: Destination directory doesn't exist
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111",
'NewPath': "/ wsp 11111111-1111-1111-1111-111111111111",
'Size': "4321",
'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, f"{funcname()}: #4 failed to handle nonexistent destination dir"
# Subtest #5: Actual success
status = make_test_file(admin_dir)
assert not status.error(), f"{funcname()}: #3 failed to create test file"
filename = status["name"]
conn.send_message({
'Action': 'REPLACE',
'Data': {
'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}",
'NewPath': f"/ wsp {dbdata['admin_wid']}",
'Size': "1000",
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, f'{funcname()}: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, f'{funcname()}: #6 failed to replace file'
conn.disconnect()
def test_rmdir():
'''Tests the RMDIR command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Bad directory name
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_rmdir: #1 failed to handle bad path'
# Subtest #2: Directory doesn't exist
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_rmdir: #2 failed to handle nonexistent directory'
# Subtest #3: Call fails because of non-empty directory
multipath = ' '.join(['/ wsp', dbdata['admin_wid'],
'22222222-2222-2222-2222-222222222222',
'33333333-3333-3333-3333-333333333333',
'44444444-4444-4444-4444-444444444444',
'55555555-5555-5555-5555-555555555555'
])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #3 failed to create test hierarchy'
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222',
'Recursive': 'False'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 408, 'test_rmdir: #3 failed to handle non-empty directory'
# Subtest #4: Actual success - non-recursively remove an empty directory
conn.send_message({
'Action': 'RMDIR',
'Data': {
'Path': multipath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_rmdir: #4 failed to remove an empty directory'
def test_select():
'''Tests the SELECT command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
# Subtest #1: Nonexistent path
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': '/ 11111111-1111-1111-1111-111111111111'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_select: #1 failed to handle missing path'
# Subtest #2: Path is a file
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'],
dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert not status.error(), "test_select: #2 failed to create test file"
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_select: #2 failed to handle path as file'
# Subtest #3: Actual success
innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222'])
conn.send_message({
'Action': 'MKDIR',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to create test directory'
conn.send_message({
'Action': 'SELECT',
'Data': {
'Path': innerpath
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_select: #3 failed to work correctly'
conn.disconnect()
def test_setquota():
'''Tests the SETQUOTA command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
init_user2(dbdata, conn)
# Subtest #1: Bad sizes
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': '0',
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size value'
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "Real programmers don't eat quiche ;)",
'Workspaces': '33333333-3333-3333-3333-333333333333'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad size data type'
# Subtest #2: Bad workspace list
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333,'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_setquota: failed to handle bad workspace list'
# Subtest #3: Actual success
conn.send_message({
'Action': 'SETQUOTA',
'Data': {
'Size': "4096",
'Workspaces': '33333333-3333-3333-3333-333333333333, ' \
'44444444-4444-4444-4444-444444444444'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_setquota: failed to handle actual success'
conn.disconnect()
def test_upload():
'''Tests the UPLOAD command'''
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
reset_top_dir(dbdata)
# password is 'SandstoneAgendaTricycle'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \
'dcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'),
CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
# Subtest #1: Missing parameters
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
# Hash parameter is missing
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #1 failed to handle missing parameter'
# Subtest #2: Non-existent path
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': '1000',
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 404, 'test_upload: #2 failed to handle non-existent path'
# Subtest #3: Size too big
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x4000_0000 * 200), # 200GiB isn't all that big :P
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 414, 'test_upload: #3 failed to handle file too big'
# Subtest #4: Insufficient quota remaining
# The administrator normally can't have a quota. We'll just fix that just for this one test
# *heh*
# Normally in Python direct string substitution is a recipe for SQL injection. We're not
# bringing in any insecure code here, so it's only a little bit bad.
cur = dbconn.cursor()
cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata['admin_wid']}', 5100 , 5120)")
dbconn.commit()
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(0x10_0000 * 30), # 30MiB
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 409, 'test_upload: #4 quota check failed'
# We need this to be unlimited for later tests
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
# Subtest #5: Hash mismatch
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:5(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #5 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 410, 'test_upload: #5 failed to handle file hash mismatch'
# Subtest #6: Actual success
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
conn.write('0' * 1000)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #6 failed to handle file hash mismatch'
# Set up an interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #7: Resume offset larger than size of data stored server-side
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '2000'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 400, 'test_upload: #7 failed to handle offset > file size'
# Subtest #8: Resume interrupted transfer - exact match
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '500'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #8 failed to proceed to file upload'
conn.write('0' * 500)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #8 failed to resume with exact offset match'
# Set up one last interrupted transfer
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid']
}
})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload'
assert tempFileName != '', 'test_upload: #6 server failed to return temp file name'
conn.write('0' * 500)
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed"
login_admin(dbdata, conn)
# Subtest #9: Overlapping resume
conn.send_message({
'Action': 'UPLOAD',
'Data': {
'Size': str(1000),
'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp',
'Path': '/ wsp ' + dbdata['admin_wid'],
'TempName': tempFileName,
'Offset': '400'
}
})
response = conn.read_response(server_response)
assert response['Code'] == 100, 'test_upload: #9 failed to proceed to file upload'
conn.write('0' * 600)
response = conn.read_response(server_response)
assert response['Code'] == 200, 'test_upload: #9 failed to resume with overlapping offset'
conn.disconnect()
if __name__ == '__main__':
# test_copy()
# test_delete()
# test_download()
# test_getquotainfo()
# test_list()
# test_listdirs()
# test_mkdir()
# test_move()
test_replace()
# test_rmdir()
# test_setquota()
# test_select()
# test_upload()
| 29.59148 | 99 | 0.693192 | [
"MIT"
] | mensago/mensagod | tests/integration/test_fscmds.py | 42,375 | Python |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on May 8, 2018
@author: talbpaul
Originally from SupervisedLearning.py, split in PR #650 in July 2018
Specific ROM implementation for pickledROM
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .SupervisedLearning import supervisedLearning
#Internal Modules End--------------------------------------------------------------------------------
class pickledROM(supervisedLearning):
"""
Placeholder for ROMs that will be generated by unpickling from file.
"""
def __init__(self,messageHandler,**kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, messageHandler, MessageHandler object, it is in charge of raising errors, and printing messages
@ In, kwargs, dict, an arbitrary list of kwargs
@ Out, None
"""
self.printTag = 'pickledROM'
self.messageHandler = messageHandler
self._dynamicHandling = False
self.initOptionDict = {}
self.features = ['PlaceHolder']
self.target = 'PlaceHolder'
def __confidenceLocal__(self,featureVals):
"""
This should return an estimation of the quality of the prediction.
@ In, featureVals, 2-D numpy array, [n_samples,n_features]
@ Out, confidence, float, the confidence
"""
pass
def __resetLocal__(self):
"""
Reset ROM. After this method the ROM should be described only by the initial parameter settings
@ In, None
@ Out, None
"""
pass
def __returnCurrentSettingLocal__(self):
"""
Returns a dictionary with the parameters and their current values
@ In, None
@ Out, params, dict, dictionary of parameter names and current values
"""
pass
def __returnInitialParametersLocal__(self):
"""
Returns a dictionary with the parameters and their initial values
@ In, None
@ Out, params, dict, dictionary of parameter names and initial values
"""
params = {}
return params
def __evaluateLocal__(self,featureVals):
"""
Evaluates a point.
@ In, featureVals, list, of values at which to evaluate the ROM
@ Out, returnDict, dict, the evaluated point for each target
"""
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.')
def __trainLocal__(self,featureVals,targetVals):
"""
Trains ROM.
@ In, featureVals, np.ndarray, feature values
@ In, targetVals, np.ndarray, target values
"""
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.')
| 37.821782 | 132 | 0.620942 | [
"Apache-2.0"
] | alptezbasaran/raven | framework/SupervisedLearning/pickledROM.py | 3,820 | Python |
#******************************************************************************
# Copyright (C) 2013 Kenneth L. Ho
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# None of the names of the copyright holders may be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#******************************************************************************
# Python module for interfacing with `id_dist`.
r"""
======================================================================
Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)
======================================================================
.. moduleauthor:: Kenneth L. Ho <[email protected]>
.. versionadded:: 0.13
.. currentmodule:: scipy.linalg.interpolative
An interpolative decomposition (ID) of a matrix :math:`A \in
\mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a
factorization
.. math::
A \Pi =
\begin{bmatrix}
A \Pi_{1} & A \Pi_{2}
\end{bmatrix} =
A \Pi_{1}
\begin{bmatrix}
I & T
\end{bmatrix},
where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with
:math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} =
A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`,
where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}`
are the *skeleton* and *interpolation matrices*, respectively.
If :math:`A` does not have exact rank :math:`k`, then there exists an
approximation in the form of an ID such that :math:`A = BP + E`, where
:math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k +
1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k
+ 1}` is the best possible error for a rank-:math:`k` approximation
and, in fact, is achieved by the singular value decomposition (SVD)
:math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times
k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns
and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k
\times k}` is diagonal with nonnegative entries. The principal
advantages of using an ID over an SVD are that:
- it is cheaper to construct;
- it preserves the structure of :math:`A`; and
- it is more efficient to compute with in light of the identity submatrix of :math:`P`.
Routines
========
Main functionality:
.. autosummary::
:toctree: generated/
interp_decomp
reconstruct_matrix_from_id
reconstruct_interp_matrix
reconstruct_skel_matrix
id_to_svd
svd
estimate_spectral_norm
estimate_spectral_norm_diff
estimate_rank
Support functions:
.. autosummary::
:toctree: generated/
seed
rand
References
==========
This module uses the ID software package [1]_ by Martinsson, Rokhlin,
Shkolnisky, and Tygert, which is a Fortran library for computing IDs
using various algorithms, including the rank-revealing QR approach of
[2]_ and the more recent randomized methods described in [3]_, [4]_,
and [5]_. This module exposes its functionality in a way convenient
for Python users. Note that this module does not add any functionality
beyond that of organizing a simpler and more consistent interface.
We advise the user to consult also the `documentation for the ID package
<http://tygert.com/id_doc.4.pdf>`_.
.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a
software package for low-rank approximation of matrices via interpolative
decompositions, version 0.2." http://tygert.com/id_doc.4.pdf.
.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the
compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404,
2005. :doi:`10.1137/030602678`.
.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.
Tygert. "Randomized algorithms for the low-rank approximation of matrices."
*Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.
:doi:`10.1073/pnas.0709640104`.
.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized
algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30
(1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`.
.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast
randomized algorithm for the approximation of matrices." *Appl. Comput.
Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.
Tutorial
========
Initializing
------------
The first step is to import :mod:`scipy.linalg.interpolative` by issuing the
command:
>>> import scipy.linalg.interpolative as sli
Now let's build a matrix. For this, we consider a Hilbert matrix, which is well
know to have low rank:
>>> from scipy.linalg import hilbert
>>> n = 1000
>>> A = hilbert(n)
We can also do this explicitly via:
>>> import numpy as np
>>> n = 1000
>>> A = np.empty((n, n), order='F')
>>> for j in range(n):
>>> for i in range(m):
>>> A[i,j] = 1. / (i + j + 1)
Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This
instantiates the matrix in Fortran-contiguous order and is important for
avoiding data copying when passing to the backend.
We then define multiplication routines for the matrix by regarding it as a
:class:`scipy.sparse.linalg.LinearOperator`:
>>> from scipy.sparse.linalg import aslinearoperator
>>> L = aslinearoperator(A)
This automatically sets up methods describing the action of the matrix and its
adjoint on a vector.
Computing an ID
---------------
We have several choices of algorithm to compute an ID. These fall largely
according to two dichotomies:
1. how the matrix is represented, i.e., via its entries or via its action on a
vector; and
2. whether to approximate it to a fixed relative precision or to a fixed rank.
We step through each choice in turn below.
In all cases, the ID is represented by three parameters:
1. a rank ``k``;
2. an index array ``idx``; and
3. interpolation coefficients ``proj``.
The ID is specified by the relation
``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.
From matrix entries
...................
We first consider a matrix given in terms of its entries.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(A, eps)
where ``eps < 1`` is the desired precision.
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(A, k)
where ``k >= 1`` is the desired rank.
Both algorithms use random sampling and are usually faster than the
corresponding older, deterministic algorithms, which can be accessed via the
commands:
>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)
and:
>>> idx, proj = sli.interp_decomp(A, k, rand=False)
respectively.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector as a
:class:`scipy.sparse.linalg.LinearOperator`.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(L, eps)
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(L, k)
These algorithms are randomized.
Reconstructing an ID
--------------------
The ID routines above do not output the skeleton and interpolation matrices
explicitly but instead return the relevant information in a more compact (and
sometimes more useful) form. To build these matrices, write:
>>> B = sli.reconstruct_skel_matrix(A, k, idx)
for the skeleton matrix and:
>>> P = sli.reconstruct_interp_matrix(idx, proj)
for the interpolation matrix. The ID approximation can then be computed as:
>>> C = np.dot(B, P)
This can also be constructed directly using:
>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)
without having to first compute ``P``.
Alternatively, this can be done explicitly as well using:
>>> B = A[:,idx[:k]]
>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]
>>> C = np.dot(B, P)
Computing an SVD
----------------
An ID can be converted to an SVD via the command:
>>> U, S, V = sli.id_to_svd(B, idx, proj)
The SVD approximation is then:
>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))
The SVD can also be computed "fresh" by combining both the ID and conversion
steps into one command. Following the various ID algorithms above, there are
correspondingly various SVD algorithms that one can employ.
From matrix entries
...................
We consider first SVD algorithms for a matrix given in terms of its entries.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(A, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(A, k)
Both algorithms use random sampling; for the determinstic versions, issue the
keyword ``rand=False`` as above.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(L, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(L, k)
Utility routines
----------------
Several utility routines are also available.
To estimate the spectral norm of a matrix, use:
>>> snorm = sli.estimate_spectral_norm(A)
This algorithm is based on the randomized power method and thus requires only
matrix-vector products. The number of iterations to take can be set using the
keyword ``its`` (default: ``its=20``). The matrix is interpreted as a
:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it
as a :class:`numpy.ndarray`, in which case it is trivially converted using
:func:`scipy.sparse.linalg.aslinearoperator`.
The same algorithm can also estimate the spectral norm of the difference of two
matrices ``A1`` and ``A2`` as follows:
>>> diff = sli.estimate_spectral_norm_diff(A1, A2)
This is often useful for checking the accuracy of a matrix approximation.
Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank
of a matrix as well. This can be done with either:
>>> k = sli.estimate_rank(A, eps)
or:
>>> k = sli.estimate_rank(L, eps)
depending on the representation. The parameter ``eps`` controls the definition
of the numerical rank.
Finally, the random number generation required for all randomized routines can
be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed
values to their original values, use:
>>> sli.seed('default')
To specify the seed values, use:
>>> sli.seed(s)
where ``s`` must be an integer or array of 55 floats. If an integer, the array
of floats is obtained by using ``numpy.random.rand`` with the given integer
seed.
To simply generate some random numbers, type:
>>> sli.rand(n)
where ``n`` is the number of random numbers to generate.
Remarks
-------
The above functions all automatically detect the appropriate interface and work
with both real and complex data types, passing input arguments to the proper
backend routine.
"""
import scipy.linalg._interpolative_backend as _backend
import numpy as np
import sys
__all__ = [
'estimate_rank',
'estimate_spectral_norm',
'estimate_spectral_norm_diff',
'id_to_svd',
'interp_decomp',
'rand',
'reconstruct_interp_matrix',
'reconstruct_matrix_from_id',
'reconstruct_skel_matrix',
'seed',
'svd',
]
_DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)")
_TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)")
_32BIT_ERROR = ValueError("interpolative decomposition on 32-bit systems "
"with complex128 is buggy")
_IS_32BIT = (sys.maxsize < 2**32)
def _is_real(A):
try:
if A.dtype == np.complex128:
return False
elif A.dtype == np.float64:
return True
else:
raise _DTYPE_ERROR
except AttributeError as e:
raise _TYPE_ERROR from e
def seed(seed=None):
"""
Seed the internal random number generator used in this ID package.
The generator is a lagged Fibonacci method with 55-element internal state.
Parameters
----------
seed : int, sequence, 'default', optional
If 'default', the random seed is reset to a default value.
If `seed` is a sequence containing 55 floating-point numbers
in range [0,1], these are used to set the internal state of
the generator.
If the value is an integer, the internal state is obtained
from `numpy.random.RandomState` (MT19937) with the integer
used as the initial seed.
If `seed` is omitted (None), ``numpy.random.rand`` is used to
initialize the generator.
"""
# For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`,
# and :func:`_backend.id_srando`.
if isinstance(seed, str) and seed == 'default':
_backend.id_srando()
elif hasattr(seed, '__len__'):
state = np.asfortranarray(seed, dtype=float)
if state.shape != (55,):
raise ValueError("invalid input size")
elif state.min() < 0 or state.max() > 1:
raise ValueError("values not in range [0,1]")
_backend.id_srandi(state)
elif seed is None:
_backend.id_srandi(np.random.rand(55))
else:
rnd = np.random.RandomState(seed)
_backend.id_srandi(rnd.rand(55))
def rand(*shape):
"""
Generate standard uniform pseudorandom numbers via a very efficient lagged
Fibonacci method.
This routine is used for all random number generation in this package and
can affect ID and SVD results.
Parameters
----------
*shape
Shape of output array
"""
# For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`.
return _backend.id_srand(np.prod(shape)).reshape(shape)
def interp_decomp(A, eps_or_k, rand=True):
"""
Compute ID of a matrix.
An ID of a matrix `A` is a factorization defined by a rank `k`, a column
index array `idx`, and interpolation coefficients `proj` such that::
numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]
The original matrix can then be reconstructed as::
numpy.hstack([A[:,idx[:k]],
numpy.dot(A[:,idx[:k]], proj)]
)[:,numpy.argsort(idx)]
or via the routine :func:`reconstruct_matrix_from_id`. This can
equivalently be written as::
numpy.dot(A[:,idx[:k]],
numpy.hstack([numpy.eye(k), proj])
)[:,np.argsort(idx)]
in terms of the skeleton and interpolation matrices::
B = A[:,idx[:k]]
and::
P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]
respectively. See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
The ID can be computed to any relative precision or rank (depending on the
value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then
this function has the output signature::
k, idx, proj = interp_decomp(A, eps_or_k)
Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output
signature is::
idx, proj = interp_decomp(A, eps_or_k)
.. This function automatically detects the form of the input parameters
and passes them to the appropriate backend. For details, see
:func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,
:func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,
:func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,
:func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,
:func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,
:func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`
Matrix to be factored
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
k : int
Rank required to achieve specified relative precision if
`eps_or_k < 1`.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
k, idx, proj = _backend.iddp_aid(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_aid(eps, A)
else:
if real:
k, idx, proj = _backend.iddp_id(eps, A)
else:
k, idx, proj = _backend.idzp_id(eps, A)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if rand:
if real:
idx, proj = _backend.iddr_aid(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_aid(A, k)
else:
if real:
idx, proj = _backend.iddr_id(A, k)
else:
idx, proj = _backend.idzr_id(A, k)
return idx - 1, proj
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if eps_or_k < 1:
eps = eps_or_k
if real:
k, idx, proj = _backend.iddp_rid(eps, m, n, matveca)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_rid(eps, m, n, matveca)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if real:
idx, proj = _backend.iddr_rid(m, n, matveca, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_rid(m, n, matveca, k)
return idx - 1, proj
else:
raise _TYPE_ERROR
def reconstruct_matrix_from_id(B, idx, proj):
"""
Reconstruct matrix from its ID.
A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`
and `proj`, respectively, can be reconstructed as::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconid` and
:func:`_backend.idz_reconid`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Reconstructed matrix.
"""
if _is_real(B):
return _backend.idd_reconid(B, idx + 1, proj)
else:
return _backend.idz_reconid(B, idx + 1, proj)
def reconstruct_interp_matrix(idx, proj):
"""
Reconstruct interpolation matrix from ID.
The interpolation matrix can be reconstructed from the ID indices and
coefficients `idx` and `proj`, respectively, as::
P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]
The original matrix can then be reconstructed from its skeleton matrix `B`
via::
numpy.dot(B, P)
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconint` and
:func:`_backend.idz_reconint`.
Parameters
----------
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Interpolation matrix.
"""
if _is_real(proj):
return _backend.idd_reconint(idx + 1, proj)
else:
return _backend.idz_reconint(idx + 1, proj)
def reconstruct_skel_matrix(A, k, idx):
"""
Reconstruct skeleton matrix from ID.
The skeleton matrix can be reconstructed from the original matrix `A` and its
ID rank and indices `k` and `idx`, respectively, as::
B = A[:,idx[:k]]
The original matrix can then be reconstructed via::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_interp_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_copycols` and
:func:`_backend.idz_copycols`.
Parameters
----------
A : :class:`numpy.ndarray`
Original matrix.
k : int
Rank of ID.
idx : :class:`numpy.ndarray`
Column index array.
Returns
-------
:class:`numpy.ndarray`
Skeleton matrix.
"""
if _is_real(A):
return _backend.idd_copycols(A, k, idx + 1)
else:
return _backend.idz_copycols(A, k, idx + 1)
def id_to_svd(B, idx, proj):
"""
Convert ID to SVD.
The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and
coefficients `idx` and `proj`, respectively, is::
U, S, V = id_to_svd(B, idx, proj)
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
See also :func:`svd`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_id2svd` and
:func:`_backend.idz_id2svd`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
"""
if _is_real(B):
U, V, S = _backend.idd_id2svd(B, idx + 1, proj)
else:
U, V, S = _backend.idz_id2svd(B, idx + 1, proj)
return U, S, V
def estimate_spectral_norm(A, its=20):
"""
Estimate spectral norm of a matrix by the randomized power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_snorm` and
:func:`_backend.idz_snorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate.
"""
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
m, n = A.shape
matvec = lambda x: A. matvec(x)
matveca = lambda x: A.rmatvec(x)
if _is_real(A):
return _backend.idd_snorm(m, n, matveca, matvec, its=its)
else:
return _backend.idz_snorm(m, n, matveca, matvec, its=its)
def estimate_spectral_norm_diff(A, B, its=20):
"""
Estimate spectral norm of the difference of two matrices by the randomized
power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and
:func:`_backend.idz_diffsnorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
B : :class:`scipy.sparse.linalg.LinearOperator`
Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with
the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate of matrix difference.
"""
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
B = aslinearoperator(B)
m, n = A.shape
matvec1 = lambda x: A. matvec(x)
matveca1 = lambda x: A.rmatvec(x)
matvec2 = lambda x: B. matvec(x)
matveca2 = lambda x: B.rmatvec(x)
if _is_real(A):
return _backend.idd_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
else:
return _backend.idz_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
def svd(A, eps_or_k, rand=True):
"""
Compute SVD of a matrix via an ID.
An SVD of a matrix `A` is a factorization::
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
where `U` and `V` have orthonormal columns and `S` is nonnegative.
The SVD can be computed to any relative precision or rank (depending on the
value of `eps_or_k`).
See also :func:`interp_decomp` and :func:`id_to_svd`.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details, see
:func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,
:func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,
:func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,
:func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,
:func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,
:func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix to be factored, given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and
`rmatvec` methods (to apply the matrix and its adjoint).
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
U, V, S = _backend.iddp_asvd(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_asvd(eps, A)
else:
if real:
U, V, S = _backend.iddp_svd(eps, A)
else:
U, V, S = _backend.idzp_svd(eps, A)
else:
k = int(eps_or_k)
if k > min(A.shape):
raise ValueError("Approximation rank %s exceeds min(A.shape) = "
" %s " % (k, min(A.shape)))
if rand:
if real:
U, V, S = _backend.iddr_asvd(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_asvd(A, k)
else:
if real:
U, V, S = _backend.iddr_svd(A, k)
else:
U, V, S = _backend.idzr_svd(A, k)
elif isinstance(A, LinearOperator):
m, n = A.shape
matvec = lambda x: A.matvec(x)
matveca = lambda x: A.rmatvec(x)
if eps_or_k < 1:
eps = eps_or_k
if real:
U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec)
else:
k = int(eps_or_k)
if real:
U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k)
else:
raise _TYPE_ERROR
return U, S, V
def estimate_rank(A, eps):
"""
Estimate matrix rank to a specified relative precision using randomized
methods.
The matrix `A` can be given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used
for each case. If `A` is of type :class:`numpy.ndarray`, then the output
rank is typically about 8 higher than the actual numerical rank.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details,
see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,
:func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix whose rank is to be estimated, given as either a
:class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`
with the `rmatvec` method (to apply the matrix adjoint).
eps : float
Relative error for numerical rank definition.
Returns
-------
int
Estimated matrix rank.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if real:
rank = _backend.idd_estrank(eps, A)
else:
rank = _backend.idz_estrank(eps, A)
if rank == 0:
# special return value for nearly full rank
rank = min(A.shape)
return rank
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if real:
return _backend.idd_findrank(eps, m, n, matveca)
else:
return _backend.idz_findrank(eps, m, n, matveca)
else:
raise _TYPE_ERROR
| 31.931343 | 92 | 0.628743 | [
"BSD-3-Clause"
] | AtsushiSakai/scipy | scipy/linalg/interpolative.py | 32,091 | Python |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: subscriptionssubscriptionfactory
version_added: '2.9'
short_description: Manage Azure SubscriptionFactory instance.
description:
- 'Create, update and delete instance of Azure SubscriptionFactory.'
options:
enrollment_account_name:
description:
- >-
The name of the enrollment account to which the subscription will be
billed.
required: true
type: str
name:
description:
- The display name of the subscription.
type: str
owners:
description:
- >-
The list of principals that should be granted Owner access on the
subscription. Principals should be of type User, Service Principal or
Security Group.
type: list
suboptions:
object_id:
description:
- Object id of the Principal
required: true
type: str
offer_type:
description:
- >-
The offer type of the subscription. For example, MS-AZR-0017P
(EnterpriseAgreement) and MS-AZR-0148P (EnterpriseAgreement devTest) are
available. Only valid when creating a subscription in a enrollment
account scope.
type: str
additional_parameters:
description:
- >-
Additional, untyped parameters to support custom subscription creation
scenarios.
type: >-
unknown[DictionaryType
{"$id":"45","$type":"DictionaryType","valueType":{"$id":"46","$type":"PrimaryType","knownPrimaryType":"object","name":{"$id":"47","fixed":false,"raw":"Object"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"48","fixed":false},"deprecated":false}]
subscription_link:
description:
- >-
The link to the new subscription. Use this link to check the status of
subscription creation operation.
type: str
state:
description:
- Assert the state of the SubscriptionFactory.
- >-
Use C(present) to create or update an SubscriptionFactory and C(absent)
to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: createSubscription
azure.rm.subscriptionssubscriptionfactory:
enrollment_account_name: myEnrollmentAccount
body:
offerType: MS-AZR-0017P
displayName: Test Ea Azure Sub
owners:
- objectId: 973034ff-acb7-409c-b731-e789672c7b31
- objectId: 67439a9e-8519-4016-a630-f5f805eba567
additionalParameters:
customData:
key1: value1
key2: true
'''
RETURN = '''
subscription_link:
description:
- >-
The link to the new subscription. Use this link to check the status of
subscription creation operation.
returned: always
type: str
sample: null
'''
import time
import json
import re
from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# this is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMSubscriptionFactory(AzureRMModuleBaseExt):
def __init__(self):
self.module_arg_spec = dict(
enrollment_account_name=dict(
type='str',
updatable=False,
disposition='enrollmentAccountName',
required=true
),
name=dict(
type='str',
updatable=False,
disposition='/'
),
owners=dict(
type='list',
disposition='/',
options=dict(
object_id=dict(
type='str',
disposition='objectId',
required=true
)
)
),
offer_type=dict(
type='str',
updatable=False,
disposition='/',
choices=['MS-AZR-0017P',
'MS-AZR-0148P']
),
additional_parameters=dict(
type='unknown[DictionaryType {"$id":"45","$type":"DictionaryType","valueType":{"$id":"46","$type":"PrimaryType","knownPrimaryType":"object","name":{"$id":"47","fixed":false,"raw":"Object"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"48","fixed":false},"deprecated":false}]',
updatable=False,
disposition='/'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.enrollment_account_name = None
self.subscription_link = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200, 201, 202]
self.to_do = Actions.NoAction
self.body = {}
self.query_parameters = {}
self.query_parameters['api-version'] = '2018-03-01-preview'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
super(AzureRMSubscriptionFactory, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.body[key] = kwargs[key]
self.inflate_parameters(self.module_arg_spec, self.body, 0)
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
self.url = ('/providers' +
'/Microsoft.Billing' +
'/enrollmentAccounts' +
'/{{ enrollment_account_name }}' +
'/providers' +
'/Microsoft.Subscription' +
'/createSubscription')
self.url = self.url.replace('{{ enrollment_account_name }}', self.name)
old_response = self.get_resource()
if not old_response:
self.log("SubscriptionFactory instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log('SubscriptionFactory instance already exists')
if self.state == 'absent':
self.to_do = Actions.Delete
else:
modifiers = {}
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
self.results['modifiers'] = modifiers
self.results['compare'] = []
self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
if not self.default_compare(modifiers, self.body, old_response, '', self.results):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log('Need to Create / Update the SubscriptionFactory instance')
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_resource()
# if not old_response:
self.results['changed'] = True
# else:
# self.results['changed'] = old_response.__ne__(response)
self.log('Creation / Update done')
elif self.to_do == Actions.Delete:
self.log('SubscriptionFactory instance deleted')
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_resource()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_resource():
time.sleep(20)
else:
self.log('SubscriptionFactory instance unchanged')
self.results['changed'] = False
response = old_response
if response:
self.results["subscription_link"] = response["subscription_link"]
return self.results
def create_update_resource(self):
# self.log('Creating / Updating the SubscriptionFactory instance {0}'.format(self.))
try:
if self.to_do == Actions.Create:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
else:
response = self.mgmt_client.query(self.url,
'PUT',
self.query_parameters,
self.header_parameters,
self.body,
self.status_code,
600,
30)
except CloudError as exc:
self.log('Error attempting to create the SubscriptionFactory instance.')
self.fail('Error creating the SubscriptionFactory instance: {0}'.format(str(exc)))
try:
response = json.loads(response.text)
except Exception:
response = {'text': response.text}
pass
return response
def delete_resource(self):
# self.log('Deleting the SubscriptionFactory instance {0}'.format(self.))
try:
response = self.mgmt_client.query(self.url,
'DELETE',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
except CloudError as e:
self.log('Error attempting to delete the SubscriptionFactory instance.')
self.fail('Error deleting the SubscriptionFactory instance: {0}'.format(str(e)))
return True
def get_resource(self):
# self.log('Checking if the SubscriptionFactory instance {0} is present'.format(self.))
found = False
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
found = True
self.log("Response : {0}".format(response))
# self.log("SubscriptionFactory instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the SubscriptionFactory instance.')
if found is True:
return response
return False
def main():
AzureRMSubscriptionFactory()
if __name__ == '__main__':
main()
| 36.099723 | 319 | 0.518339 | [
"MIT"
] | audevbot/autorest.cli.debug | generated/ansible-collection/subscriptionssubscriptionfactory.py | 13,032 | Python |
from __future__ import print_function
import gevent
import gevent.core
import os
import time
filename = 'tmp.test__core_stat.%s' % os.getpid()
hub = gevent.get_hub()
DELAY = 0.5
EV_USE_INOTIFY = getattr(gevent.core, 'EV_USE_INOTIFY', None)
try:
open(filename, 'wb', buffering=0).close()
assert os.path.exists(filename), filename
def write():
f = open(filename, 'wb', buffering=0)
f.write(b'x')
f.close()
start = time.time()
greenlet = gevent.spawn_later(DELAY, write)
# If we don't specify an interval, we default to zero.
# libev interprets that as meaning to use its default interval,
# which is about 5 seconds. If we go below it's minimum check
# threshold, it bumps it up to the minimum.
watcher = hub.loop.stat(filename, interval=-1)
if hasattr(watcher, 'path'):
assert watcher.path == filename
assert watcher.interval == -1
with gevent.Timeout(5 + DELAY + 0.5):
hub.wait(watcher)
reaction = time.time() - start - DELAY
print('Watcher %s reacted after %.4f seconds (write)' % (watcher, reaction))
if reaction >= DELAY and EV_USE_INOTIFY:
print('WARNING: inotify failed (write)')
assert reaction >= 0.0, 'Watcher %s reacted too early (write): %.3fs' % (watcher, reaction)
assert watcher.attr is not None, watcher.attr
assert watcher.prev is not None, watcher.prev
# The watcher interval changed after it started; -1 is illegal
assert watcher.interval != -1
greenlet.join()
gevent.spawn_later(DELAY, os.unlink, filename)
start = time.time()
with gevent.Timeout(5 + DELAY + 0.5):
hub.wait(watcher)
reaction = time.time() - start - DELAY
print('Watcher %s reacted after %.4f seconds (unlink)' % (watcher, reaction))
if reaction >= DELAY and EV_USE_INOTIFY:
print('WARNING: inotify failed (unlink)')
assert reaction >= 0.0, 'Watcher %s reacted too early (unlink): %.3fs' % (watcher, reaction)
assert watcher.attr is None, watcher.attr
assert watcher.prev is not None, watcher.prev
finally:
if os.path.exists(filename):
os.unlink(filename)
| 31.691176 | 96 | 0.664501 | [
"MIT"
] | pubnub/gevent | greentest/test__core_stat.py | 2,155 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.urls import include, url
except ImportError:
from django.conf.urls import include, url # noqa: F401
| 19.3 | 59 | 0.715026 | [
"MIT"
] | Nikolina2112/django-rest-framework-jwt | src/rest_framework_jwt/compat.py | 193 | Python |
#!/usr/bin/env python
import urllib,urllib2
import json
import csv
import time
from datetime import date, timedelta
class Admin:
'''A class of tools for administering AGO Orgs or Portals'''
def __init__(self, username, portal=None, password=None):
from . import User
self.user = User(username, portal, password)
def __users__(self, start=0):
'''Retrieve a single page of users.'''
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/users?' + parameters).read()
users = json.loads(response)
return users
def __roles__(self,start=0):
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/roles?' + parameters).read()
roles = json.loads(response)
return roles
def __groups__(self,start=0):
parameters = urllib.urlencode({'token' : self.user.token,
'q':'orgid:'+ self._getOrgID(),
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups?' + parameters).read()
groups = json.loads(response)
return groups
def getRoles(self):
'''
Returns a list of roles defined in the organization.
This is helpful for custom roles because the User's role property simply returns the ID of the role.
THIS DOES NOT INCLUDE THE STANDARD ARCGIS ONLINE ROLES OF ['org_admin', 'org_publisher', 'org_author', 'org_viewer']
'''
allRoles = []
roles = self.__roles__()
for role in roles['roles']:
allRoles.append(role)
while roles['nextStart'] > 0:
roles=self.__roles__(roles['nextStart'])
for role in roles['roles']:
allRoles.append(role)
return allRoles
def getGroups(self):
'''
Returns a list of groups defined in the organization.
'''
allGroups = []
groups = self.__groups__()
for group in groups['results']:
allGroups.append(group)
while groups['nextStart'] > 0:
for group in groups['results']:
allGroups.append(group)
return allGroups
def findGroup(self,title):
'''
Gets a group object by its title.
'''
parameters = urllib.urlencode({'token' : self.user.token,
'q':'title:'+title,
'f' : 'json'})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups?' + parameters).read()
groupUsers = json.loads(response)
if "results" in groupUsers and len(groupUsers["results"]) > 0:
return groupUsers["results"][0]
else:
return None
def getUsersInGroup(self,groupID):
'''
Returns a list of users in a group
'''
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json'})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups/'+groupID+'/users?' + parameters).read()
groupUsers = json.loads(response)
return groupUsers
def getUsers(self, roles=None, daysToCheck=10000):
'''
Returns a list of all users in the organization (requires admin access).
Optionally provide a list of roles to filter the results (e.g. ['org_publisher']).
Optionally provide a number to include only accounts created in the last x number of days.
'''
#if not roles:
# roles = ['org_admin', 'org_publisher', 'org_user']
#roles = ['org_admin', 'org_publisher', 'org_author', 'org_viewer'] # new roles to support Dec 2013 update
#the role property of a user is either one of the standard roles or a custom role ID. Loop through and build a list of ids from the queried roles.
if roles:
standardRoles = ['org_admin', 'org_publisher', 'org_author', 'org_viewer']
queryRoleIDs=[]
#if it's a standard role, go ahead and add it.
for roleName in roles:
if roleName in standardRoles:
queryRoleIDs.append(roleName)
#if it's not a standard role, we'll have to look it to return the ID.
allRoles = self.getRoles()
for role in allRoles:
for roleName in roles:
if roleName == role["name"]:
queryRoleIDs.append(role["id"])
allUsers = []
users = self.__users__()
for user in users['users']:
if roles:
if not user['role'] in queryRoleIDs:
continue
if date.fromtimestamp(float(user['created'])/1000) > date.today()-timedelta(days=daysToCheck):
allUsers.append(user)
while users['nextStart'] > 0:
users = self.__users__(users['nextStart'])
for user in users['users']:
if roles:
if not user['role'] in queryRoleIDs:
continue
if date.fromtimestamp(float(user['created'])/1000) > date.today()-timedelta(days=daysToCheck):
allUsers.append(user)
return allUsers
def createGroup(self,title,snippet=None,description=None,tags=None,access="org",isViewOnly=False,viewOnly=False,inviteOnly=True,thumbnail=None):
'''
Creates a new group
'''
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/createGroup'
parameters ={'token' : self.user.token,
'f' : 'json',
'title' : title,
'description':description,
'snippet':snippet,
'tags':tags,
'access':access,
'isInvitationOnly':inviteOnly,
'isViewOnly':viewOnly,
'thumbnail':thumbnail}
parameters = urllib.urlencode(parameters)
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def createUser(self,username,password,firstName,lastName,email,description,role,provider):
'''
Creates a new user WITHOUT sending an invitation
'''
invitations = [{"username":str(username),
"password":str(password),
"firstname":str(firstName),
"lastname":str(lastName),
"fullname":str(firstName) + " " + str(lastName),
"email":str(email),
"role":str(role)}]
parameters ={'token' : self.user.token,
'f' : 'json',
'subject':'Welcome to the portal',
'html':"blah",
'invitationList':{'invitations':invitations}}
parameters = urllib.urlencode(parameters)
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/invite'
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def addUsersToGroups(self, users, groups):
'''
REQUIRES ADMIN ACCESS
Add organization users to multiple groups and return a list of the status
'''
# Provide one or more usernames in a list.
# e.g. ['user_1', 'user_2']
# Provide one or more group IDs in a list.
# e.g. ['d93aabd856f8459a8905a5bd434d4d4a', 'f84c841a3dfc4591b1ff83281ea5025f']
toolSummary = []
# Assign users to the specified group(s).
parameters = urllib.urlencode({'token': self.user.token, 'f': 'json'})
for group in groups:
# Add Users - REQUIRES POST method (undocumented operation as of 2013-11-12).
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups/' + group + '/addUsers?', 'users=' + ','.join(users) + "&" + parameters).read()
# Users not added will be reported back with each group.
toolSummary.append({group: json.loads(response)})
return toolSummary
def reassignAllUser1ItemsToUser2(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Transfers ownership of all items in userFrom/User1's account to userTo/User2's account, keeping same folder names.
- Does not check for existing folders in userTo's account.
- Does not delete content from userFrom's account.
'''
# request user content for userFrom
# response contains list of items in root folder and list of all folders
parameters = urllib.urlencode({'token': self.user.token, 'f': 'json'})
request = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '?' + parameters
userContent = json.loads(urllib.urlopen(request).read())
# create same folders in userTo's account like those in userFrom's account
for folder in userContent['folders']:
parameters2 = urllib.urlencode({'title' : folder['title'], 'token': self.user.token, 'f': 'json'})
request2 = self.user.portalUrl + '/sharing/rest/content/users/' + userTo + '/createFolder?'
response2 = urllib.urlopen(request2, parameters2).read() # requires POST
# keep track of items and folders
numberOfItems = 0
numberOfFolders = 1
# change ownership of items in ROOT folder
for item in userContent['items']:
parameters3 = urllib.urlencode({'targetUsername' : userTo, 'targetFoldername' : '/', 'token': self.user.token, 'f': 'json'})
request3 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/items/' + item['id'] + '/reassign?'
response3 = urllib.urlopen(request3, parameters3).read() # requires POST
if 'success' in response3:
numberOfItems += 1
### change ownership of items in SUBFOLDERS (nested loop)
# request content in current folder
for folder in userContent['folders']:
parameters4 = urllib.urlencode({'token': self.user.token, 'f': 'json'})
request4 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/' + folder['id'] + '?' + parameters4
folderContent = json.loads(urllib.urlopen(request4).read())
numberOfFolders += 1
# change ownership of items in CURRENT folder to userTo and put in correct folder
for item in folderContent['items']:
parameters5 = urllib.urlencode({'targetUsername' : userTo, 'targetFoldername' : folder['title'], 'token': self.user.token, 'f': 'pjson'})
request5 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/' + folder['id'] + '/items/' + item['id'] + '/reassign?'
response5 = urllib.urlopen(request5, parameters5).read() # requires POST
numberOfItems += 1
# summarize results
print ' ' + str(numberOfItems) + ' ITEMS in ' + str(numberOfFolders) + ' FOLDERS (incl. Home folder) copied'
print ' from USER ' + userFrom + ' to USER ' + userTo
return
def reassignGroupOwnership(self,groupId,userTo):
parameters ={'token' : self.user.token,
'f' : 'json',
'targetUsername':userTo}
parameters = urllib.urlencode(parameters)
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/groups/'+groupId+'/reassign'
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def reassignAllGroupOwnership(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all groups between a pair of accounts.
'''
groups = 0
groupsReassigned = 0
# Get list of userFrom's groups
print 'Requesting ' + userFrom + "'s group info from ArcGIS Online...",
parameters = urllib.urlencode({'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/users/' + userFrom + '?' + parameters
response = urllib.urlopen(request).read()
userFromContent = json.loads(response)
print 'RECEIVED!'
# Determine if userFrom is group owner and, if so, transfer ownership to userTo
print 'Checking groups...',
for group in userFromContent['groups']:
print '.',
groups += 1
if group['owner'] == userFrom:
parameters = urllib.urlencode({'targetUsername' : userTo, 'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/groups/' + group['id'] + '/reassign?'
response = urllib.urlopen(request, parameters).read() # requires POST
if 'success' in response:
groupsReassigned += 1
# Report results
print
print ' CHECKED ' + str(groups) + ' groups ASSOCIATED with ' + userFrom + '.'
print ' REASSIGNED ' + str(groupsReassigned) + ' groups OWNED by ' + userFrom + ' to ' + userTo + '.'
return
def addUser2ToAllUser1Groups(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Adds userTo/User2 to all groups that userFrom/User1 is a member
'''
groups = 0
groupsOwned = 0
groupsAdded = 0
# Get list of userFrom's groups
parameters = urllib.urlencode({'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/users/' + userFrom + '?' + parameters
response = urllib.urlopen(request).read()
userFromContent = json.loads(response)
# Add userTo to each group that userFrom's is a member, but not an owner
for group in userFromContent['groups']:
groups += 1
if group['owner'] == userFrom:
groupsOwned += 1
else:
parameters = urllib.urlencode({'users' : userTo, 'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/groups/' + group['id'] + '/addUsers?'
response = urllib.urlopen(request, parameters).read() # requires POST
if '[]' in response: # This currently undocumented operation does not correctly return "success"
groupsAdded += 1
print ' CHECKED ' + str(groups) + ' groups associated with ' + userFrom + ':'
print ' ' + userFrom + ' OWNS ' + str(groupsOwned) + ' groups (' + userTo + ' NOT added).'
print ' ' + userTo + ' is already a MEMBER of ' + str(groups-groupsOwned-groupsAdded) + ' groups.'
print ' ' + userTo + ' was ADDED to ' + str(groupsAdded) + ' groups.'
return
def migrateAccount(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all content items and groups from userFrom to userTo.
Also adds userTo to all groups which userFrom is a member.
'''
print 'Copying all items from ' + userFrom + ' to ' + userTo + '...'
self.reassignAllUser1ItemsToUser2(self, userFrom, userTo)
print
print 'Reassigning groups owned by ' + userFrom + ' to ' + userTo + '...'
self.reassignAllGroupOwnership(self, userFrom, userTo)
print
print 'Adding ' + userTo + ' as a member of ' + userFrom + "'s groups..."
self.addUser2ToAllUser1Groups(self, userFrom, userTo)
return
def migrateAccounts(self, pathUserMappingCSV):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all content items and groups between pairs of accounts specified in a CSV file.
Also adds userTo to all groups which userFrom is a member.
This function batches migrateAccount using a CSV to feed in the accounts to migrate from/to,
the CSV should have two columns (no column headers/labels): col1=userFrom, col2=userTo)
'''
with open(pathUserMappingCSV, 'rb') as userMappingCSV:
userMapping = csv.reader(userMappingCSV)
for user in userMapping:
userFrom = user[0]
userTo = user[1]
print '=========='
print 'Copying all items from ' + userFrom + ' to ' + userTo + '...'
self.reassignAllUser1ItemsToUser2(self, userFrom, userTo)
print
print 'Reassigning groups owned by ' + userFrom + ' to ' + userTo + '...'
self.reassignAllGroupOwnership(self, userFrom, userTo)
print
print 'Adding ' + userTo + ' as a member of ' + userFrom + "'s groups..."
self.addUser2ToAllUser1Groups(self, userFrom, userTo)
print '=========='
return
def updateServiceItemsThumbnail(self, folder=None):
'''
Fetches catalog of items in portal. If there is no thumbnail, assigns the default.
'''
if(folder!=None):
catalog = self.AGOLUserCatalog(folder,False)
else:
catalog=self.AGOLCatalog(None)
for r in catalog:
if(r.thumbnail==None):
parameters = urllib.urlencode({'thumbnailURL' : 'http://static.arcgis.com/images/desktopapp.png', 'token' : self.user.token, 'f' : 'json'})
requestToUpdate = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + '/items/' +r.id + '/update'
try:
print ("updating " + r.title + " with thumbnail.")
response = urllib.urlopen(requestToUpdate, parameters ).read()
jresult = json.loads(response)
except:
e=1
return None
def registerItems (self, mapservices, folder=''):
'''
Given a set of AGOL items, register them to the portal,
optionally to a specific folder.
'''
self.servicesToRegister=mapservices
if folder==None:
folder=''
icount=0
i=0
for ms in self.servicesToRegister.service_list:
i = i +1
sURL=ms.url
sTitle=ms.title
if ms.thumbnail==None:
sThumbnail ='http://static.arcgis.com/images/desktopapp.png'
elif ms.id !=None:
sThumbnail ="http://www.arcgis.com/sharing/content/items/" + ms.id + "/info/" + ms.thumbnail
else:
sThumbnail='http://static.arcgis.com/images/desktopapp.png'
#todo, handle map service exports
sTags = 'mapping' if ms.tags==None else ms.tags
sType= 'Map Service' if ms.type==None else ms.type
sDescription = '' if ms.description==None else ms.description
sSnippet = '' if ms.snippet ==None else ms.snippet
sExtent = '' if ms.extent==None else ms.extent
sSpatialReference='' if ms.spatialReference==None else ms.spatialReference
sAccessInfo='' if ms.accessInformation==None else ms.accessInformation
sLicenseInfo='' if ms.licenseInfo==None else ms.licenseInfo
sCulture='' if ms.culture == None else ms.culture
parameters = urllib.urlencode({'URL' : sURL,
'title' : sTitle,
'thumbnailURL' : sThumbnail,
'tags' : sTags,
'description' : sDescription,
'snippet': sSnippet,
'extent':sExtent,
'spatialReference':sSpatialReference,
'accessInformation': sAccessInfo,
'licenseInfo': sLicenseInfo,
'culture': sCulture,
'type' : sType,
'token' : self.user.token,
'f' : 'json'})
#todo- use export map on map service items for thumbnail
requestToAdd = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + folder + '/addItem'
try:
if(sType.find('Service')>=0 or sType.find('Web Mapping Application')>=0):
response = urllib.urlopen(requestToAdd, parameters ).read()
jresult = json.loads(response)
print str(i) + ") " + ms.title + ": success= " + str(jresult["success"]) + "," + ms.url + ", " + "(" + jresult["id"] + ")"
if jresult["success"]:
icount=icount+1
except:
print str(i) + ") " + ms.title + ':error!'
print str(icount) + " item(s) added."
def getFolderID(self, folderName):
'''
Return the ID of the folder with the given name.
'''
folders = self._getUserFolders()
for f in folders:
if str(f['title']) == folderName:
return str(f['id'])
return ''
def _getUserFolders(self):
'''
Return all folder objects.
'''
requestToAdd = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + '?f=json&token=' + self.user.token;
response = urllib.urlopen(requestToAdd).read()
jresult = json.loads(response)
return jresult["folders"]
def deleteGroup(self,groupid):
'''
Deletes group
'''
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/groups/'+groupid+'/delete'
parameters ={'token' : self.user.token,
'f' : 'json'}
parameters = urllib.urlencode(parameters)
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def clearGroup(self, groupid):
'''
Unshare all content from the specified group.
CAUTION
'''
groupcatalog = self.AGOLGroupCatalog(groupid)
sItems=''
for f in groupcatalog:
requestToDelete = self.user.portalUrl + '/sharing/rest/content/items/' + f.id + "/unshare?groups=" + groupid
parameters = urllib.urlencode({
'token' : self.user.token,
'f' : 'json'})
print "Unsharing " + f.title
response = urllib.urlopen(requestToDelete,parameters).read()
jresult = json.loads(response)
print "Complete."
return None
def clearFolder(self, folderid):
'''
Delete all content from the specified folder.
CAUTION
'''
foldercatalog = self.AGOLUserCatalog(folderid)
sItems=''
for f in foldercatalog:
sItems+= f.id + ","
if len(sItems)>0: sItems=sItems[:-1]
requestToDelete = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + "/deleteItems"
parameters = urllib.urlencode({'items':sItems,
'token' : self.user.token,
'f' : 'json'})
print "Deleting " + str(len(foldercatalog)) + " items..."
response = urllib.urlopen(requestToDelete,parameters).read()
jresult = json.loads(response)
print "Complete."
return None
def AGOLGroupCatalog(self, groupid):
'''
Return the catalog of items in desiginated group.
'''
sCatalogURL=self.user.portalUrl + "/sharing/rest/search?q=%20group%3A" + groupid + "%20-type:%22Code%20Attachment%22%20-type:%22Featured%20Items%22%20-type:%22Symbol%20Set%22%20-type:%22Color%20Set%22%20-type:%22Windows%20Viewer%20Add%20In%22%20-type:%22Windows%20Viewer%20Configuration%22%20%20-type:%22Code%20Attachment%22%20-type:%22Featured%20Items%22%20-type:%22Symbol%20Set%22%20-type:%22Color%20Set%22%20-type:%22Windows%20Viewer%20Add%20In%22%20-type:%22Windows%20Viewer%20Configuration%22%20&num=100&sortField=title&sortOrder=asc"
return self.AGOLCatalog(None,None,sCatalogURL)
def AGOLUserCatalog(self, folder, includeSize=False):
'''
Return the catalog of CURRENT USER's items from portal, optionally from only a folder.
'''
sCatalogURL = self.user.portalUrl + "/sharing/rest/content/users/" + self.user.username + folder
return self.AGOLCatalog(None,None,sCatalogURL)
def AGOLCatalog(self, query=None, includeSize=False, sCatalogURL=None):
'''
Return all items from all users in a portal, optionally matching a
specified query.
optionally make the additional requests for SIZE.
sCatalogURL can be specified to use a specific folder
'''
resultCount = 0
searchURL = ""
viewURL = ""
orgID = ""
self.sFullSearch = ""
self.bIncludeSize=includeSize
self.orgID = self._getOrgID()
self.catalogURL=sCatalogURL #for cataloging folders
if self.user.portalUrl != None:
self.searchURL = self.user.portalUrl + "/sharing/rest"
self.viewURL = self.user.portalUrl + "/home/item.html?id="
self.query = query
pList=[]
allResults = []
sQuery=self._getCatalogQuery(1,100)#get first batch
print("fetching records 1-100...")
response = urllib.urlopen(sQuery).read()
jresult=json.loads(response)
nextRecord = jresult['nextStart']
totalRecords = jresult['total']
num = jresult['num']
start =jresult['start']
#if this is a folder catalog, use items, not results
sItemsProperty = 'results'
if self.catalogURL!=None and str(self.catalogURL).find("/sharing/rest/content/users/")>0: sItemsProperty='items'
pList = AGOLItems( jresult[sItemsProperty])
for r in pList.AGOLItems_list:
r.itemURL = self.viewURL + r.id
r.created = time.strftime("%Y-%m-%d",time.gmtime(r.created/1000))
r.modified = time.strftime("%Y-%m-%d",time.gmtime(r.modified/1000))
if r.size== -1:
r.size=0
r.size = self._getSize(r)
r.myRowID = len(allResults) + 1;
allResults.append(r)
if (nextRecord>0):
while(nextRecord>0):
sQuery = self._getCatalogQuery(nextRecord, 100)
print("fetching records " + str(nextRecord) + "-" + str(nextRecord+100) + "...")
response = urllib.urlopen(sQuery).read()
jresult=json.loads(response)
nextRecord = jresult['nextStart']
totalRecords = jresult['total']
num = jresult['num']
start =jresult['start']
pList = AGOLItems( jresult['results'])
for r in pList.AGOLItems_list:
r.itemURL = self.viewURL + r.id
r.created = time.strftime("%Y-%m-%d",time.gmtime(r.created/1000))
r.modified = time.strftime("%Y-%m-%d",time.gmtime(r.modified/1000))
if r.size== -1:
r.size=0
r.size = self._getSize(r)
r.myRowID = len(allResults) + 1;
allResults.append(r)
return allResults
def _getSize(self, r):
'''
Issue query for item size.
'''
if(self.bIncludeSize != True):
return 0
print ("fetching size for " + r.title + " (" + r.type + ")")
result=0
sURL = self.searchURL + "/content/items/" + str(r.id) + "?f=json&token=" + self.user.token;
response = urllib.urlopen(sURL).read()
result = json.loads(response)['size']
if(result>0):
result = result/1024
else:
result=0
return result
def _getOrgID(self):
'''
Return the organization's ID.
'''
sURL = self.user.portalUrl + "/sharing/rest/portals/self?f=json&token=" + self.user.token
response = urllib.urlopen(sURL).read()
return str(json.loads(response)['id'])
def _getCatalogQuery(self, start, num):
'''
Format a content query from specified start and number of records.
'''
sQuery=None
if self.query != None:
sQuery = self.query
else:
sQuery = self.sFullSearch
if(self.catalogURL==None):
sCatalogQuery = self.searchURL + "/search?q=" + sQuery
if self.orgID != None:
sCatalogQuery += " orgid:" + self.orgID
else:
#check to ensure ? vs &
if(str(self.catalogURL).find('?')<0):
char="?"
else:
char="&"
sCatalogQuery = self.catalogURL + char + "ts=1"
sCatalogQuery += "&f=json&num="+ str(num) + "&start=" + str(start)
sCatalogQuery += "&token=" + self.user.token
return sCatalogQuery
def updateUserRoles(self, users):
self.usersToUpdate=users
requestToUpdate= self.user.portalUrl + '/sharing/rest/portals/self/updateuserrole'
for u in self.usersToUpdate.user_list:
parameters = urllib.urlencode({'user':u.Username,
'role':u.Role,
'token' : self.user.token,
'f' : 'json'})
print "Updating Role for " + u.Username + " to " + u.Role + "..."
response = urllib.urlopen(requestToUpdate,parameters).read()
jresult = json.loads(response)
success= str(jresult["success"])
print "Success: " + success
print "Complete."
return None
#collection of AGOLItem
class AGOLItems:
def __init__ (self, item_list):
self.AGOLItems_list=[]
for item in item_list:
self.AGOLItems_list.append(AGOLItem(item))
#AGOL item
class AGOLItem:
def __init__(self, item_attributes):
for k, v in item_attributes.items():
setattr(self, k, v)
#collection of Map Services
class MapServices:
def __init__ (self, import_list):
self.service_list=[]
for service in import_list:
self.service_list.append(MapService(service))
#Map Service
class MapService:
def __init__(self, service_attributes):
for k, v in service_attributes.items():
setattr(self, k, v)
#Collection of Usernames and roles
class UsersAttributes:
def __init__ (self, import_list):
self.user_list=[]
for user in import_list:
self.user_list.append(UserAttributes(user))
class UserAttributes:
def __init__(self, user_attributes):
for k, v in user_attributes.items():
setattr(self, k, v)
| 41.250636 | 547 | 0.559603 | [
"Apache-2.0"
] | SpatialStrout/ago-tools | admin.py | 32,423 | Python |
import re
from typing import TypeVar
import questionary
EnumType = TypeVar("EnumType")
# 驼峰命名转蛇形命名
def camel_to_snake(text: str) -> str:
return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower()
# 蛇形命名转驼峰命名
def snake_to_camel(text: str) -> str:
return text.split('_')[0] + "".join(x.title() for x in text.split('_')[1:])
# 驼峰命名转帕斯卡命名
def camel_to_pascal(text: str) -> str:
return text[0].upper() + text[1:]
def question(choices: EnumType) -> questionary.Question:
prompt = camel_to_snake(choices.__name__).replace("_", " ") # type: ignore
return questionary.select(f"Select the {prompt}: ", choices=list(choices))
def binary_question(option: str) -> questionary.Question:
return questionary.confirm(f"Do you want {option}?", default=False)
def text_question(default: str) -> questionary.Question:
return questionary.text(f"The name of the database you want to create? ", default=default)
| 28 | 94 | 0.691558 | [
"MIT"
] | fmw666/fastapi-builder | fastapi_builder/helpers.py | 980 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NetworkVirtualApplianceArgs', 'NetworkVirtualAppliance']
@pulumi.input_type
class NetworkVirtualApplianceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
The set of arguments for constructing a NetworkVirtualAppliance resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input['ManagedServiceIdentityArgs'] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input['VirtualApplianceSkuPropertiesArgs'] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input['SubResourceArgs'] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if boot_strap_configuration_blobs is not None:
pulumi.set(__self__, "boot_strap_configuration_blobs", boot_strap_configuration_blobs)
if cloud_init_configuration is not None:
pulumi.set(__self__, "cloud_init_configuration", cloud_init_configuration)
if cloud_init_configuration_blobs is not None:
pulumi.set(__self__, "cloud_init_configuration_blobs", cloud_init_configuration_blobs)
if id is not None:
pulumi.set(__self__, "id", id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if network_virtual_appliance_name is not None:
pulumi.set(__self__, "network_virtual_appliance_name", network_virtual_appliance_name)
if nva_sku is not None:
pulumi.set(__self__, "nva_sku", nva_sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_appliance_asn is not None:
pulumi.set(__self__, "virtual_appliance_asn", virtual_appliance_asn)
if virtual_hub is not None:
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
BootStrapConfigurationBlobs storage URLs.
"""
return pulumi.get(self, "boot_strap_configuration_blobs")
@boot_strap_configuration_blobs.setter
def boot_strap_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "boot_strap_configuration_blobs", value)
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> Optional[pulumi.Input[str]]:
"""
CloudInitConfiguration string in plain text.
"""
return pulumi.get(self, "cloud_init_configuration")
@cloud_init_configuration.setter
def cloud_init_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_init_configuration", value)
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
CloudInitConfigurationBlob storage URLs.
"""
return pulumi.get(self, "cloud_init_configuration_blobs")
@cloud_init_configuration_blobs.setter
def cloud_init_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cloud_init_configuration_blobs", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]:
"""
The service principal that has read access to cloud-init and config blob.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="networkVirtualApplianceName")
def network_virtual_appliance_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of Network Virtual Appliance.
"""
return pulumi.get(self, "network_virtual_appliance_name")
@network_virtual_appliance_name.setter
def network_virtual_appliance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_virtual_appliance_name", value)
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]:
"""
Network Virtual Appliance SKU.
"""
return pulumi.get(self, "nva_sku")
@nva_sku.setter
def nva_sku(self, value: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]):
pulumi.set(self, "nva_sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> Optional[pulumi.Input[float]]:
"""
VirtualAppliance ASN.
"""
return pulumi.get(self, "virtual_appliance_asn")
@virtual_appliance_asn.setter
def virtual_appliance_asn(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "virtual_appliance_asn", value)
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The Virtual Hub where Network Virtual Appliance is being deployed.
"""
return pulumi.get(self, "virtual_hub")
@virtual_hub.setter
def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_hub", value)
class NetworkVirtualAppliance(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
"""
NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NetworkVirtualApplianceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param NetworkVirtualApplianceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkVirtualApplianceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
cloud_init_configuration: Optional[pulumi.Input[str]] = None,
cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
network_virtual_appliance_name: Optional[pulumi.Input[str]] = None,
nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_appliance_asn: Optional[pulumi.Input[float]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__["boot_strap_configuration_blobs"] = boot_strap_configuration_blobs
__props__.__dict__["cloud_init_configuration"] = cloud_init_configuration
__props__.__dict__["cloud_init_configuration_blobs"] = cloud_init_configuration_blobs
__props__.__dict__["id"] = id
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["network_virtual_appliance_name"] = network_virtual_appliance_name
__props__.__dict__["nva_sku"] = nva_sku
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_appliance_asn"] = virtual_appliance_asn
__props__.__dict__["virtual_hub"] = virtual_hub
__props__.__dict__["address_prefix"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["inbound_security_rules"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_appliance_nics"] = None
__props__.__dict__["virtual_appliance_sites"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20201101:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210301:NetworkVirtualAppliance")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NetworkVirtualAppliance, __self__).__init__(
'azure-native:network/v20201101:NetworkVirtualAppliance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkVirtualAppliance':
"""
Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__["address_prefix"] = None
__props__.__dict__["boot_strap_configuration_blobs"] = None
__props__.__dict__["cloud_init_configuration"] = None
__props__.__dict__["cloud_init_configuration_blobs"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["inbound_security_rules"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["nva_sku"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_appliance_asn"] = None
__props__.__dict__["virtual_appliance_nics"] = None
__props__.__dict__["virtual_appliance_sites"] = None
__props__.__dict__["virtual_hub"] = None
return NetworkVirtualAppliance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[str]:
"""
Address Prefix.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
BootStrapConfigurationBlobs storage URLs.
"""
return pulumi.get(self, "boot_strap_configuration_blobs")
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> pulumi.Output[Optional[str]]:
"""
CloudInitConfiguration string in plain text.
"""
return pulumi.get(self, "cloud_init_configuration")
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
CloudInitConfigurationBlob storage URLs.
"""
return pulumi.get(self, "cloud_init_configuration_blobs")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
The service principal that has read access to cloud-init and config blob.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inboundSecurityRules")
def inbound_security_rules(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
List of references to InboundSecurityRules.
"""
return pulumi.get(self, "inbound_security_rules")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> pulumi.Output[Optional['outputs.VirtualApplianceSkuPropertiesResponse']]:
"""
Network Virtual Appliance SKU.
"""
return pulumi.get(self, "nva_sku")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> pulumi.Output[Optional[float]]:
"""
VirtualAppliance ASN.
"""
return pulumi.get(self, "virtual_appliance_asn")
@property
@pulumi.getter(name="virtualApplianceNics")
def virtual_appliance_nics(self) -> pulumi.Output[Sequence['outputs.VirtualApplianceNicPropertiesResponse']]:
"""
List of Virtual Appliance Network Interfaces.
"""
return pulumi.get(self, "virtual_appliance_nics")
@property
@pulumi.getter(name="virtualApplianceSites")
def virtual_appliance_sites(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
List of references to VirtualApplianceSite.
"""
return pulumi.get(self, "virtual_appliance_sites")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The Virtual Hub where Network Virtual Appliance is being deployed.
"""
return pulumi.get(self, "virtual_hub")
| 47.744 | 1,682 | 0.680379 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | 23,872 | Python |
"""Integration tests for Glesys"""
from unittest import TestCase
import pytest
from lexicon.tests.providers.integration_tests import IntegrationTestsV1
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from define_tests.TheTests
# TODO: migrate to IntegrationTestsV2 and its extended test suite
class GlesysProviderTests(TestCase, IntegrationTestsV1):
"""TestCase for Glesys"""
provider_name = 'glesys'
domain = "capsulecd.com"
def _filter_headers(self):
return ['Authorization']
# TODO: enable the skipped tests
@pytest.mark.skip(reason="new test, missing recording")
def test_provider_when_calling_update_record_should_modify_record_name_specified(self):
return
| 34.583333 | 91 | 0.774699 | [
"MIT"
] | HelixEducation/lexicon | lexicon/tests/providers/test_glesys.py | 830 | Python |
from datetime import timedelta
from random import randint
from ichnaea.data.tasks import (
monitor_api_key_limits,
monitor_api_users,
monitor_queue_size,
)
from ichnaea import util
class TestMonitor(object):
def test_monitor_api_keys_empty(self, celery, stats):
monitor_api_key_limits.delay().get()
stats.check(gauge=[('api.limit', 0)])
def test_monitor_api_keys_one(self, celery, redis, stats):
today = util.utcnow().strftime('%Y%m%d')
rate_key = 'apilimit:no_key_1:v1.geolocate:' + today
redis.incr(rate_key, 13)
monitor_api_key_limits.delay().get()
stats.check(gauge=[
('api.limit', ['key:no_key_1', 'path:v1.geolocate']),
])
def test_monitor_api_keys_multiple(self, celery, redis, stats):
now = util.utcnow()
today = now.strftime('%Y%m%d')
yesterday = (now - timedelta(hours=24)).strftime('%Y%m%d')
data = {
'test': {'v1.search': 11, 'v1.geolocate': 13},
'no_key_1': {'v1.search': 12},
'no_key_2': {'v1.geolocate': 15},
}
for key, paths in data.items():
for path, value in paths.items():
rate_key = 'apilimit:%s:%s:%s' % (key, path, today)
redis.incr(rate_key, value)
rate_key = 'apilimit:%s:%s:%s' % (key, path, yesterday)
redis.incr(rate_key, value - 10)
# add some other items into Redis
redis.lpush('default', 1, 2)
redis.set('cache_something', '{}')
monitor_api_key_limits.delay().get()
stats.check(gauge=[
('api.limit', ['key:test', 'path:v1.geolocate']),
('api.limit', ['key:test', 'path:v1.search']),
('api.limit', ['key:no_key_1', 'path:v1.search']),
('api.limit', ['key:no_key_2', 'path:v1.geolocate']),
])
def test_monitor_queue_size(self, celery, redis, stats):
data = {
'export_queue_internal': 3,
'export_queue_backup:abcd-ef-1234': 7,
}
for name in celery.all_queues:
data[name] = randint(1, 10)
for k, v in data.items():
redis.lpush(k, *range(v))
monitor_queue_size.delay().get()
stats.check(
gauge=[('queue', 1, v, ['queue:' + k]) for k, v in data.items()])
class TestMonitorAPIUsers(object):
@property
def today(self):
return util.utcnow().date()
@property
def today_str(self):
return self.today.strftime('%Y-%m-%d')
def test_empty(self, celery, stats):
monitor_api_users.delay().get()
stats.check(gauge=[('submit.user', 0), ('locate.user', 0)])
def test_one_day(self, celery, geoip_data, redis, stats):
bhutan_ip = geoip_data['Bhutan']['ip']
london_ip = geoip_data['London']['ip']
redis.pfadd(
'apiuser:submit:test:' + self.today_str, bhutan_ip, london_ip)
redis.pfadd(
'apiuser:submit:valid_key:' + self.today_str, bhutan_ip)
redis.pfadd(
'apiuser:locate:valid_key:' + self.today_str, bhutan_ip)
monitor_api_users.delay().get()
stats.check(gauge=[
('submit.user', 1, 2, ['key:test', 'interval:1d']),
('submit.user', 1, 2, ['key:test', 'interval:7d']),
('submit.user', 1, 1, ['key:valid_key', 'interval:1d']),
('submit.user', 1, 1, ['key:valid_key', 'interval:7d']),
('locate.user', 1, 1, ['key:valid_key', 'interval:1d']),
('locate.user', 1, 1, ['key:valid_key', 'interval:7d']),
])
def test_many_days(self, celery, geoip_data, redis, stats):
bhutan_ip = geoip_data['Bhutan']['ip']
london_ip = geoip_data['London']['ip']
days_6 = (self.today - timedelta(days=6)).strftime('%Y-%m-%d')
days_7 = (self.today - timedelta(days=7)).strftime('%Y-%m-%d')
redis.pfadd(
'apiuser:submit:test:' + self.today_str, '127.0.0.1', bhutan_ip)
# add the same IPs + one new one again
redis.pfadd(
'apiuser:submit:test:' + days_6, '127.0.0.1', bhutan_ip, london_ip)
# add one entry which is too old
redis.pfadd(
'apiuser:submit:test:' + days_7, bhutan_ip)
monitor_api_users.delay().get()
stats.check(gauge=[
('submit.user', 1, 2, ['key:test', 'interval:1d']),
# we count unique IPs over the entire 7 day period,
# so it's just 3 uniques
('submit.user', 1, 3, ['key:test', 'interval:7d']),
])
# the too old key was deleted manually
assert not redis.exists('apiuser:submit:test:' + days_7)
| 36.223077 | 79 | 0.561478 | [
"Apache-2.0"
] | BBOXX/ichnaea | ichnaea/data/tests/test_monitor.py | 4,709 | Python |
"""
# =============================================================================
# Creates the stiffness matrix as requested, using the material properties
# provided in the TPD file (for v2020 files).
#
# Author: William Hunter, Tarcísio L. de Oliveira
# Copyright (C) 2008, 2015, William Hunter.
# Copyright (C) 2020, 2021, Tarcísio L. de Oliveira
# =============================================================================
"""
from __future__ import division
import os
from sympy import symbols, Matrix, diff, integrate, zeros
from numpy import abs, array
from ..utils import get_logger
logger = get_logger(__name__)
def create_K(_L, _E, _nu, _k, _t):
# Initialize variables
_a, _b, _c = _L, _L, _L # element dimensions (half-lengths)
_G = _E / (2 * (1 + _nu)) # modulus of rigidity
_g = _E / ((1 + _nu) * (1 - 2 * _nu))
# SymPy symbols:
x, y, z = symbols('x y z')
N1, N2, N3, N4 = symbols('N1 N2 N3 N4')
N5, N6, N7, N8 = symbols('N5 N6 N7 N8')
xlist = [x, x, x, x, x, x, x, x]
ylist = [y, y, y, y, y, y, y, y]
zlist = [z, z, z, z, z, z, z, z]
# Shape functions:
N1 = (_a - x) * (_b - y) * (_c - z) / (8 * _a * _b * _c)
N2 = (_a + x) * (_b - y) * (_c - z) / (8 * _a * _b * _c)
N3 = (_a + x) * (_b + y) * (_c - z) / (8 * _a * _b * _c)
N4 = (_a - x) * (_b + y) * (_c - z) / (8 * _a * _b * _c)
N5 = (_a - x) * (_b - y) * (_c + z) / (8 * _a * _b * _c)
N6 = (_a + x) * (_b - y) * (_c + z) / (8 * _a * _b * _c)
N7 = (_a + x) * (_b + y) * (_c + z) / (8 * _a * _b * _c)
N8 = (_a - x) * (_b + y) * (_c + z) / (8 * _a * _b * _c)
# Create strain-displacement matrix B:
B0 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], xlist))
B1 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], ylist))
B2 = tuple(map(diff, [N1, N2, N3, N4, N5, N6, N7, N8], zlist))
B = Matrix([B0, B1, B2])
# Create conductivity matrix:
C = Matrix([[_k, 0, 0],
[0, _k, 0],
[0, 0, _k]])
dK = B.T * C * B
# Integration:
logger.info('SymPy is integrating: K for H8T...')
K = dK.integrate((x, -_a, _a),(y, -_b, _b),(z, -_c, _c))
# Convert SymPy Matrix to NumPy array:
K = array(K, dtype='double')
C = array(C, dtype='double')
# Set small (<< 0) values equal to zero:
K[abs(K) < 1e-6] = 0
# Return result:
logger.info('Created stiffness matrix.')
return K, B, C
# EOF H8T_K.py
| 32.733333 | 79 | 0.479837 | [
"MIT"
] | TarcLO/topy | topy/data/H8T_K.py | 2,459 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.