repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
xhchrn/open_lth | datasets/imagenet.py | 6b3d04a12a2f868ce851bd09b330ea57957c1de6 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import concurrent.futures
import numpy as np
import os
from PIL import Image
import torchvision
from datasets import base
from platforms.platform import get_platform
def _get_samples(root, y_name, y_num):
y_dir = os.path.join(root, y_name)
if not get_platform().isdir(y_dir): return []
output = [(os.path.join(y_dir, f), y_num) for f in get_platform().listdir(y_dir) if f.lower().endswith('jpeg')]
return output
class Dataset(base.ImageDataset):
"""ImageNet"""
def __init__(self, loc: str, image_transforms):
# Load the data.
classes = sorted(get_platform().listdir(loc))
samples = []
if get_platform().num_workers > 0:
executor = concurrent.futures.ThreadPoolExecutor(max_workers=get_platform().num_workers)
futures = [executor.submit(_get_samples, loc, y_name, y_num) for y_num, y_name in enumerate(classes)]
for d in concurrent.futures.wait(futures)[0]: samples += d.result()
else:
for y_num, y_name in enumerate(classes):
samples += _get_samples(loc, y_name, y_num)
examples, labels = zip(*samples)
super(Dataset, self).__init__(
np.array(examples), np.array(labels), image_transforms,
[torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
@staticmethod
def num_train_examples(): return 1281167
@staticmethod
def num_test_examples(): return 50000
@staticmethod
def num_classes(): return 1000
@staticmethod
def _augment_transforms():
return [
torchvision.transforms.RandomResizedCrop(224, scale=(0.1, 1.0), ratio=(0.8, 1.25)),
torchvision.transforms.RandomHorizontalFlip()
]
@staticmethod
def _transforms():
return [torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224)]
@staticmethod
def get_train_set(use_augmentation, resize):
transforms = Dataset._augment_transforms() if use_augmentation else Dataset._transforms()
return Dataset(os.path.join(get_platform().imagenet_root, 'train'), transforms)
@staticmethod
def get_test_set(resize):
return Dataset(os.path.join(get_platform().imagenet_root, 'val'), Dataset._transforms())
@staticmethod
def example_to_image(example):
with get_platform().open(example, 'rb') as fp:
return Image.open(fp).convert('RGB')
DataLoader = base.DataLoader
| [((17, 12, 17, 38), 'os.path.join', 'os.path.join', ({(17, 25, 17, 29): 'root', (17, 31, 17, 37): 'y_name'}, {}), '(root, y_name)', False, 'import os\n'), ((19, 15, 19, 37), 'os.path.join', 'os.path.join', ({(19, 28, 19, 33): 'y_dir', (19, 35, 19, 36): 'f'}, {}), '(y_dir, f)', False, 'import os\n'), ((41, 12, 41, 30), 'numpy.array', 'np.array', ({(41, 21, 41, 29): 'examples'}, {}), '(examples)', True, 'import numpy as np\n'), ((41, 32, 41, 48), 'numpy.array', 'np.array', ({(41, 41, 41, 47): 'labels'}, {}), '(labels)', True, 'import numpy as np\n'), ((56, 12, 56, 94), 'torchvision.transforms.RandomResizedCrop', 'torchvision.transforms.RandomResizedCrop', (), '', False, 'import torchvision\n'), ((57, 12, 57, 57), 'torchvision.transforms.RandomHorizontalFlip', 'torchvision.transforms.RandomHorizontalFlip', ({}, {}), '()', False, 'import torchvision\n'), ((62, 16, 62, 50), 'torchvision.transforms.Resize', 'torchvision.transforms.Resize', ({(62, 46, 62, 49): '(256)'}, {}), '(256)', False, 'import torchvision\n'), ((62, 52, 62, 90), 'torchvision.transforms.CenterCrop', 'torchvision.transforms.CenterCrop', ({(62, 86, 62, 89): '(224)'}, {}), '(224)', False, 'import torchvision\n'), ((18, 11, 18, 25), 'platforms.platform.get_platform', 'get_platform', ({}, {}), '()', False, 'from platforms.platform import get_platform\n'), ((31, 11, 31, 25), 'platforms.platform.get_platform', 'get_platform', ({}, {}), '()', False, 'from platforms.platform import get_platform\n'), ((42, 13, 42, 91), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', ({(42, 46, 42, 67): '[0.485, 0.456, 0.406]', (42, 69, 42, 90): '[0.229, 0.224, 0.225]'}, {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])', False, 'import torchvision\n'), ((19, 55, 19, 69), 'platforms.platform.get_platform', 'get_platform', ({}, {}), '()', False, 'from platforms.platform import get_platform\n'), ((28, 25, 28, 39), 'platforms.platform.get_platform', 'get_platform', ({}, {}), '()', False, 'from platforms.platform import get_platform\n'), ((67, 36, 67, 50), 'platforms.platform.get_platform', 'get_platform', ({}, {}), '()', False, 'from platforms.platform import get_platform\n'), ((71, 36, 71, 50), 'platforms.platform.get_platform', 'get_platform', ({}, {}), '()', False, 'from platforms.platform import get_platform\n'), ((75, 13, 75, 27), 'platforms.platform.get_platform', 'get_platform', ({}, {}), '()', False, 'from platforms.platform import get_platform\n'), ((76, 19, 76, 33), 'PIL.Image.open', 'Image.open', ({(76, 30, 76, 32): 'fp'}, {}), '(fp)', False, 'from PIL import Image\n'), ((32, 73, 32, 87), 'platforms.platform.get_platform', 'get_platform', ({}, {}), '()', False, 'from platforms.platform import get_platform\n')] |
ZelKnow/sm4 | sm4.py | 2bb232f46a5033b2d89ce097e004e53eb13d90d8 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : sm4.py
@Description : sm4加密算法的实现
@Date : 2021/10/28 15:59:51
@Author : ZelKnow
@Github : https://github.com/ZelKnow
"""
__author__ = "ZelKnow"
from argparse import ArgumentParser, ArgumentError
from binascii import hexlify, unhexlify
from utils import S_BOX, BLOCK_BYTE, FK, CK, BLOCK_HEX
from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding
ENCRYPT = 0 # 加密
DECRYPT = 1 # 解密
class CryptSM4(object):
def __init__(self):
self.rk = []
def T(self, A, L_func):
"""合成置换函数T
T(.) = L(\tau(.))
Args:
A (int): 输入数据
L_func (function): 线性变换L
Returns:
int: 输出数据
"""
B = [S_BOX[(A >> i) & (0x000000ff)] for i in range(0, 32, 8)]
B = [B[i] << (i * 8) for i in range(4)]
C = L_func(sum(B))
return C
def L(self, input):
"""线性变换L,用于轮函数中
L(B) = B ^ (B <<< 2) ^ (B <<< 10) ^ (B <<< 18) ^ (B <<< 24)
Args:
input (int): 输入数据
Returns:
int: 输出数据
"""
return input ^ rotl(input, 2) ^ rotl(input, 10) ^ rotl(
input, 18) ^ rotl(input, 24)
def L_prime(self, input):
"""线性变换L',用于密钥扩展算法
L'(B) = B ^ (B <<< 13) ^ (B <<< 23)
Args:
input (int): 输入数据
Returns:
int: 输出数据
"""
return input ^ rotl(input, 13) ^ rotl(input, 23)
def check_key_iv(self, key_iv):
"""检验key或iv的合法性并转换成字节串
Args:
key_iv (int, str or bytes): key或iv
Raises:
TypeError: 密钥或初始化向量类型错误
ValueError: 密钥或初始化向量长度过长
Returns:
bytes: key或iv
"""
if isinstance(key_iv, str):
key_iv = key_iv.encode(encoding='UTF8')
elif isinstance(key_iv, int):
print(len(num2hex(key_iv, width=32)))
key_iv = unhexlify(num2hex(key_iv, width=32))
elif not isinstance(key_iv, bytes):
raise TypeError("密钥或初始化向量类型错误")
if len(key_iv) > BLOCK_BYTE:
raise ValueError('密钥或初始化向量长度不能大于{}'.format(BLOCK_BYTE))
return unhexlify('00') * (BLOCK_BYTE - len(key_iv)) + key_iv
def set_key(self, key):
"""设置key
Args:
key (int, str or bytes): 密钥
"""
key = self.check_key_iv(key)
input = bytes_to_list(hexlify(key), BLOCK_HEX / 4)
input = [int(i, 16) for i in input]
K = [input[i] ^ FK[i] for i in range(4)] # 存储轮密钥
for i in range(32): # 密钥扩展算法
K.append(K[i] ^ self.T(K[i + 1] ^ K[i + 2] ^ K[i + 3]
^ CK[i], self.L_prime))
self.rk = K[4:]
def F(self, X, i):
"""轮函数F
F = X_0 ^ T(X_1 ^ X_2 ^ X_3 ^ rk)
其中输入为(X_0, X_1, X_2, X_3),轮密钥为rk
Args:
X (list): 输入
i (int): 轮密钥的下标
Returns:
int: 输出
"""
return X[0] ^ self.T(X[1] ^ X[2] ^ X[3] ^ self.rk[i], self.L)
def _crypt(self, x, mode=ENCRYPT):
"""加解密函数
Args:
x (int): 需加解密的数据
mode (int, optional): 加密或解密. Defaults to ENCRYPT.
Returns:
int: 输出
"""
input = [(x >> i) & (0xffffffff) for i in reversed(range(0, 128, 32))]
# 加解密时使用的轮密钥顺序不同
for i in range(32) if mode == ENCRYPT else reversed(range(32)):
input.append(self.F(input[-4:], i)) # 32次迭代运算
output = input[-4:]
output = [output[i] << (i * 32) for i in range(4)] # 反序变换
return sum(output)
def encrypt(self, x):
"""加密函数
Args:
x (int): 需加密的数据
Returns:
int: 输出
"""
return self._crypt(x, ENCRYPT)
def decrypt(self, x):
"""解密函数
Args:
x (int): 需解密的数据
Returns:
int: 输出
"""
return self._crypt(x, DECRYPT)
def _crypt_ECB(self, input, mode=ENCRYPT):
"""ECB加解密函数
Args:
x (int): 需加解密的数据
mode (int, optional): 加密或解密. Defaults to ENCRYPT.
Returns:
int: 输出
"""
input_list = bytes_to_list(input, BLOCK_BYTE) # 将输入拆分成block
input_list = [int(hexlify(i), 16) for i in input_list]
output_list = [self._crypt(x, mode) for x in input_list] # 分别加解密
output_list = [
unhexlify(num2hex(o, width=BLOCK_HEX)) for o in output_list
] # 转成字节流
return list_to_bytes(output_list) # 合并
def encrypt_ECB(self, plain_text):
"""ECB加密函数
Args:
x (int): 需加密的数据
Returns:
int: 输出
"""
return self._crypt_ECB(padding(plain_text), ENCRYPT)
def decrypt_ECB(self, cipher_text):
"""ECB解密函数
Args:
x (int): 需解密的数据
Returns:
int: 输出
"""
try:
cipher_text = unhexlify(cipher_text)
except:
pass
return unpadding(self._crypt_ECB(cipher_text, DECRYPT))
def _crypt_CBC(self, input, iv, mode=ENCRYPT):
"""CBC加解密函数
Args:
x (int): 需加解密的数据
mode (int, optional): 加密或解密. Defaults to ENCRYPT.
Returns:
int: 输出
"""
iv = int(hexlify(self.check_key_iv(iv)), 16) # 初始化向量
input_list = bytes_to_list(input, BLOCK_BYTE) # 拆分成block
input_list = [int(hexlify(i), 16) for i in input_list]
output_list = []
for x in input_list:
if mode == ENCRYPT:
output_list.append(self._crypt(x ^ iv, mode))
iv = output_list[-1]
else:
output_list.append(self._crypt(x, mode) ^ iv)
iv = x
output_list = [
unhexlify(num2hex(o, width=BLOCK_HEX)) for o in output_list
]
return list_to_bytes(output_list)
def encrypt_CBC(self, plain_text, iv):
"""CBC加密函数
Args:
x (int): 需加密的数据
Returns:
int: 输出
"""
return self._crypt_CBC(padding(plain_text), iv, ENCRYPT)
def decrypt_CBC(self, cipher_text, iv):
"""CBC解密函数
Args:
x (int): 需解密的数据
Returns:
int: 输出
"""
try:
cipher_text = unhexlify(cipher_text)
except:
pass
return unpadding(self._crypt_CBC(cipher_text, iv, DECRYPT))
if __name__ == '__main__':
parser = ArgumentParser(description="SM4加解密")
parser.add_argument('crypt', choices=['encrypt', 'decrypt'], help='加密或解密')
parser.add_argument('mode', choices=['ecb', 'cbc'], help='加密模式')
parser.add_argument('source', help='加密/解密目标')
parser.add_argument('key', help='密钥')
parser.add_argument('--iv', help='初始化向量,cbc模式使用')
parser.add_argument('--source_type',
choices=['input', 'bin_file', 'image'],
help='加密目标类型',
default='input')
parser.add_argument('--output', help='输出文件名,如不指定则输出至标准输出流')
args = parser.parse_args()
c = CryptSM4()
c.set_key(args.key)
if args.mode == 'cbc' and args.iv is None:
raise ArgumentError("请输入初始化向量的值")
if args.source_type == 'input':
input = args.source
if input[:2].lower() == '0x':
input = int(input[2:], 16)
elif args.source_type == 'bin_file':
with open(args.source, 'rb') as f:
input = f.read()
else:
from PIL import Image
import numpy as np
source = Image.open(args.source)
img = np.array(source.convert('RGBA'))
shape = img.shape
size = img.size
input = unhexlify(''.join([num2hex(i, width=2)
for i in img.flatten()]))
if args.crypt == 'encrypt':
output = c.encrypt_ECB(input) if args.mode == 'ecb' else c.encrypt_CBC(
input, args.iv)
else:
output = c.decrypt_ECB(input) if args.mode == 'ecb' else c.decrypt_CBC(
input, args.iv)
if args.source_type == 'image':
output = hexlify(output).decode()
output = output[:size * 2]
output = [[int(output[i + j:i + j + 2], 16) for j in range(0, 8, 2)]
for i in range(0, len(output), 8)]
output = np.array(output)
output = Image.fromarray(output.reshape(shape).astype('uint8'))
output.save(args.output)
elif args.output:
with open(args.output, "wb") as f:
f.write(output)
else:
try:
print(output.decode())
except:
print(hexlify(output).decode())
| [((258, 13, 258, 55), 'argparse.ArgumentParser', 'ArgumentParser', (), '', False, 'from argparse import ArgumentParser, ArgumentError\n'), ((169, 21, 169, 53), 'utils.bytes_to_list', 'bytes_to_list', ({(169, 35, 169, 40): 'input', (169, 42, 169, 52): 'BLOCK_BYTE'}, {}), '(input, BLOCK_BYTE)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((175, 15, 175, 41), 'utils.list_to_bytes', 'list_to_bytes', ({(175, 29, 175, 40): 'output_list'}, {}), '(output_list)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((215, 21, 215, 53), 'utils.bytes_to_list', 'bytes_to_list', ({(215, 35, 215, 40): 'input', (215, 42, 215, 52): 'BLOCK_BYTE'}, {}), '(input, BLOCK_BYTE)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((228, 15, 228, 41), 'utils.list_to_bytes', 'list_to_bytes', ({(228, 29, 228, 40): 'output_list'}, {}), '(output_list)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((274, 14, 274, 61), 'argparse.ArgumentError', 'ArgumentError', ({(274, 28, 274, 60): '"""请输入初始化向量的值"""'}, {}), "('请输入初始化向量的值')", False, 'from argparse import ArgumentParser, ArgumentError\n'), ((305, 17, 305, 33), 'numpy.array', 'np.array', ({(305, 26, 305, 32): 'output'}, {}), '(output)', True, 'import numpy as np\n'), ((52, 25, 52, 40), 'utils.rotl', 'rotl', ({(52, 30, 52, 35): 'input', (52, 37, 52, 39): '(24)'}, {}), '(input, 24)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((64, 41, 64, 56), 'utils.rotl', 'rotl', ({(64, 46, 64, 51): 'input', (64, 53, 64, 55): '(23)'}, {}), '(input, 23)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((97, 30, 97, 42), 'binascii.hexlify', 'hexlify', ({(97, 38, 97, 41): 'key'}, {}), '(key)', False, 'from binascii import hexlify, unhexlify\n'), ((186, 31, 186, 50), 'utils.padding', 'padding', ({(186, 39, 186, 49): 'plain_text'}, {}), '(plain_text)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((198, 26, 198, 48), 'binascii.unhexlify', 'unhexlify', ({(198, 36, 198, 47): 'cipher_text'}, {}), '(cipher_text)', False, 'from binascii import hexlify, unhexlify\n'), ((239, 31, 239, 50), 'utils.padding', 'padding', ({(239, 39, 239, 49): 'plain_text'}, {}), '(plain_text)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((251, 26, 251, 48), 'binascii.unhexlify', 'unhexlify', ({(251, 36, 251, 47): 'cipher_text'}, {}), '(cipher_text)', False, 'from binascii import hexlify, unhexlify\n'), ((286, 17, 286, 40), 'PIL.Image.open', 'Image.open', ({(286, 28, 286, 39): 'args.source'}, {}), '(args.source)', False, 'from PIL import Image\n'), ((51, 58, 52, 22), 'utils.rotl', 'rotl', ({(52, 12, 52, 17): 'input', (52, 19, 52, 21): '(18)'}, {}), '(input, 18)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((64, 23, 64, 38), 'utils.rotl', 'rotl', ({(64, 28, 64, 33): 'input', (64, 35, 64, 37): '(13)'}, {}), '(input, 13)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((88, 15, 88, 30), 'binascii.unhexlify', 'unhexlify', ({(88, 25, 88, 29): '"""00"""'}, {}), "('00')", False, 'from binascii import hexlify, unhexlify\n'), ((170, 26, 170, 36), 'binascii.hexlify', 'hexlify', ({(170, 34, 170, 35): 'i'}, {}), '(i)', False, 'from binascii import hexlify, unhexlify\n'), ((173, 22, 173, 49), 'utils.num2hex', 'num2hex', (), '', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((216, 26, 216, 36), 'binascii.hexlify', 'hexlify', ({(216, 34, 216, 35): 'i'}, {}), '(i)', False, 'from binascii import hexlify, unhexlify\n'), ((226, 22, 226, 49), 'utils.num2hex', 'num2hex', (), '', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((301, 17, 301, 32), 'binascii.hexlify', 'hexlify', ({(301, 25, 301, 31): 'output'}, {}), '(output)', False, 'from binascii import hexlify, unhexlify\n'), ((51, 40, 51, 55), 'utils.rotl', 'rotl', ({(51, 45, 51, 50): 'input', (51, 52, 51, 54): '(10)'}, {}), '(input, 10)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((83, 31, 83, 56), 'utils.num2hex', 'num2hex', (), '', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((51, 23, 51, 37), 'utils.rotl', 'rotl', ({(51, 28, 51, 33): 'input', (51, 35, 51, 36): '(2)'}, {}), '(input, 2)', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((82, 22, 82, 47), 'utils.num2hex', 'num2hex', (), '', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((290, 35, 290, 54), 'utils.num2hex', 'num2hex', (), '', False, 'from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding\n'), ((315, 18, 315, 33), 'binascii.hexlify', 'hexlify', ({(315, 26, 315, 32): 'output'}, {}), '(output)', False, 'from binascii import hexlify, unhexlify\n')] |
saadmk11/sendotp-python | sendotp/sendotp.py | b0cd5c3da969d00a753d9614c5bea0e2978859c9 | import json
import requests
from random import randint
class sendotp:
def __init__(self, key, msg):
self.baseUrl = "http://control.msg91.com"
self.authkey = key
try:
msg
except NameError:
self.msg = "Your otp is {{otp}}. Please do not share it with anybody"
else:
self.msg = msg
def actionURLBuilder(self, actionurl):
# print self.baseUrl + '/api/' +str(actionurl)
print (actionurl)
return self.baseUrl + '/api/' + str(actionurl)
def generateOtp(self):
return randint(1000, 9999)
def send(self, contactNumber, senderId, otp):
values = {
'authkey': self.authkey,
'mobile': contactNumber,
'message': self.msg.replace("{{otp}}", str(otp)),
'sender': senderId,
'otp': otp
}
print (self.call('sendotp.php', values))
return otp
def retry(self, contactNumber, retrytype='voice'):
values = {
'authkey': self.authkey,
'mobile': contactNumber,
'retrytype': retrytype
}
print (values)
response = self.call('retryotp.php', values)
return;
def verify(self, contactNumber, otp):
values = {
'authkey': self.authkey,
'mobile': contactNumber,
'otp': otp
}
response = self.call('verifyRequestOTP.php', values)
return response;
def call(self, actionurl, args):
url = self.actionURLBuilder(actionurl)
print (url)
payload = (args)
response = requests.post(url, data=payload, verify=False)
print (response.text)
return response.status_code
| [((26, 15, 26, 34), 'random.randint', 'randint', ({(26, 23, 26, 27): '(1000)', (26, 29, 26, 33): '(9999)'}, {}), '(1000, 9999)', False, 'from random import randint\n'), ((63, 19, 63, 65), 'requests.post', 'requests.post', (), '', False, 'import requests\n')] |
tjeubaoit/algorithm | leetcode/1021-remove-outermost-parentheses.py | a1f2a30e0f736cc3d8b45ed845f724b9a4ed2e9a | class Solution:
def removeOuterParentheses(self, s: str) -> str:
ans = []
ct = 0
for ch in s:
if ch == '(':
ct += 1
if ct != 1:
ans.append(ch)
else:
ct -= 1
if ct != 0:
ans.append(ch)
return ''.join(ans)
if __name__ == '__main__':
# s = '(()())(())'
# s = '(()())(())(()(()))'
s = '()()'
ret = Solution().removeOuterParentheses(s)
print(ret)
| [] |
SamuelNunesDev/starting_point_in_python | venv/Scripts/ex049.py | 9a9e39cabb5f3526ee0037012e3943898c1d9dfa | n = int(input('Digite um número para ver sua tabuada: '))
for c in range(0, 11):
print(f'{n} * {c} = {n * c}')
| [] |
inprod/Js2Py | js2py/evaljs.py | 0af8cb100b7840e23358d220c685507163f2344e | # coding=utf-8
from .translators import translate_js, DEFAULT_HEADER
from .es6 import js6_to_js5
import sys
import time
import json
import six
import os
import hashlib
import codecs
__all__ = [
'EvalJs', 'translate_js', 'import_js', 'eval_js', 'translate_file',
'eval_js6', 'translate_js6', 'run_file', 'disable_pyimport',
'get_file_contents', 'write_file_contents'
]
DEBUG = False
def disable_pyimport():
import pyjsparser.parser
pyjsparser.parser.ENABLE_PYIMPORT = False
def path_as_local(path):
if os.path.isabs(path):
return path
# relative to cwd
return os.path.join(os.getcwd(), path)
def import_js(path, lib_name, globals):
"""Imports from javascript source file.
globals is your globals()"""
with codecs.open(path_as_local(path), "r", "utf-8") as f:
js = f.read()
e = EvalJs()
e.execute(js)
var = e.context['var']
globals[lib_name] = var.to_python()
def get_file_contents(path_or_file):
if hasattr(path_or_file, 'read'):
js = path_or_file.read()
else:
with codecs.open(path_as_local(path_or_file), "r", "utf-8") as f:
js = f.read()
return js
def write_file_contents(path_or_file, contents):
if hasattr(path_or_file, 'write'):
path_or_file.write(contents)
else:
with open(path_as_local(path_or_file), 'w') as f:
f.write(contents)
def translate_file(input_path, output_path):
'''
Translates input JS file to python and saves the it to the output path.
It appends some convenience code at the end so that it is easy to import JS objects.
For example we have a file 'example.js' with: var a = function(x) {return x}
translate_file('example.js', 'example.py')
Now example.py can be easily importend and used:
>>> from example import example
>>> example.a(30)
30
'''
js = get_file_contents(input_path)
py_code = translate_js(js)
lib_name = os.path.basename(output_path).split('.')[0]
head = '__all__ = [%s]\n\n# Don\'t look below, you will not understand this Python code :) I don\'t.\n\n' % repr(
lib_name)
tail = '\n\n# Add lib to the module scope\n%s = var.to_python()' % lib_name
out = head + py_code + tail
write_file_contents(output_path, out)
def run_file(path_or_file, context=None):
''' Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context).
'''
if context is None:
context = EvalJs()
if not isinstance(context, EvalJs):
raise TypeError('context must be the instance of EvalJs')
eval_value = context.eval(get_file_contents(path_or_file))
return eval_value, context
def eval_js(js):
"""Just like javascript eval. Translates javascript to python,
executes and returns python object.
js is javascript source code
EXAMPLE:
>>> import js2py
>>> add = js2py.eval_js('function add(a, b) {return a + b}')
>>> add(1, 2) + 3
6
>>> add('1', 2, 3)
u'12'
>>> add.constructor
function Function() { [python code] }
NOTE: For Js Number, String, Boolean and other base types returns appropriate python BUILTIN type.
For Js functions and objects, returns Python wrapper - basically behaves like normal python object.
If you really want to convert object to python dict you can use to_dict method.
"""
e = EvalJs()
return e.eval(js)
def eval_js6(js):
"""Just like eval_js but with experimental support for js6 via babel."""
return eval_js(js6_to_js5(js))
def translate_js6(js):
"""Just like translate_js but with experimental support for js6 via babel."""
return translate_js(js6_to_js5(js))
class EvalJs(object):
"""This class supports continuous execution of javascript under same context.
>>> ctx = EvalJs()
>>> ctx.execute('var a = 10;function f(x) {return x*x};')
>>> ctx.f(9)
81
>>> ctx.a
10
context is a python dict or object that contains python variables that should be available to JavaScript
For example:
>>> ctx = EvalJs({'a': 30})
>>> ctx.execute('var x = a')
>>> ctx.x
30
You can enable JS require function via enable_require. With this feature enabled you can use js modules
from npm, for example:
>>> ctx = EvalJs(enable_require=True)
>>> ctx.execute("var esprima = require('esprima');")
>>> ctx.execute("esprima.parse('var a = 1')")
You can run interactive javascript console with console method!"""
def __init__(self, context={}, enable_require=False):
self.__dict__['_context'] = {}
exec (DEFAULT_HEADER, self._context)
self.__dict__['_var'] = self._context['var'].to_python()
if enable_require:
def _js_require_impl(npm_module_name):
from .node_import import require
from .base import to_python
return require(to_python(npm_module_name), context=self._context)
setattr(self._var, 'require', _js_require_impl)
if not isinstance(context, dict):
try:
context = context.__dict__
except:
raise TypeError(
'context has to be either a dict or have __dict__ attr')
for k, v in six.iteritems(context):
setattr(self._var, k, v)
def execute(self, js=None, use_compilation_plan=False):
"""executes javascript js in current context
During initial execute() the converted js is cached for re-use. That means next time you
run the same javascript snippet you save many instructions needed to parse and convert the
js code to python code.
This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process
is typically expensive compared to actually running the generated python code.
Note that the cache is just a dict, it has no expiration or cleanup so when running this
in automated situations with vast amounts of snippets it might increase memory usage.
"""
try:
cache = self.__dict__['cache']
except KeyError:
cache = self.__dict__['cache'] = {}
hashkey = hashlib.md5(js.encode('utf-8')).digest()
try:
compiled = cache[hashkey]
except KeyError:
code = translate_js(
js, '', use_compilation_plan=use_compilation_plan)
compiled = cache[hashkey] = compile(code, '<EvalJS snippet>',
'exec')
exec (compiled, self._context)
def eval(self, expression, use_compilation_plan=False):
"""evaluates expression in current context and returns its value"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute(code, use_compilation_plan=use_compilation_plan)
return self['PyJsEvalResult']
def eval_js6(self, expression, use_compilation_plan=False):
"""same as eval, except that the JS code gets translated from es6 to es5 before being executed."""
es5_expression = js6_to_js5(expression)
return self.eval(es5_expression, use_compilation_plan)
def execute_debug(self, js):
"""executes javascript js in current context
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = translate_js(js, '')
# make sure you have a temp folder:
filename = 'temp' + os.sep + '_' + hashlib.md5(
code.encode("utf-8")).hexdigest() + '.py'
try:
with open(filename, mode='w') as f:
f.write(code)
with open(filename, "r") as f:
pyCode = compile(f.read(), filename, 'exec')
exec(pyCode, self._context)
except Exception as err:
raise err
finally:
os.remove(filename)
try:
os.remove(filename + 'c')
except:
pass
def eval_debug(self, expression):
"""evaluates expression in current context and returns its value
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute_debug(code)
return self['PyJsEvalResult']
@property
def context(self):
return self._context
def __getattr__(self, var):
return getattr(self._var, var)
def __getitem__(self, var):
return getattr(self._var, var)
def __setattr__(self, var, val):
return setattr(self._var, var, val)
def __setitem__(self, var, val):
return setattr(self._var, var, val)
def console(self):
"""starts to interact (starts interactive console) Something like code.InteractiveConsole"""
while True:
if six.PY2:
code = raw_input('>>> ')
else:
code = input('>>>')
try:
print(self.eval(code))
except KeyboardInterrupt:
break
except Exception as e:
import traceback
if DEBUG:
sys.stderr.write(traceback.format_exc())
else:
sys.stderr.write('EXCEPTION: ' + str(e) + '\n')
time.sleep(0.01)
#print x
if __name__ == '__main__':
#with open('C:\Users\Piotrek\Desktop\esprima.js', 'rb') as f:
# x = f.read()
e = EvalJs()
e.execute('square(x)')
#e.execute(x)
e.console()
| [((26, 7, 26, 26), 'os.path.isabs', 'os.path.isabs', ({(26, 21, 26, 25): 'path'}, {}), '(path)', False, 'import os\n'), ((29, 24, 29, 35), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((171, 20, 171, 42), 'six.iteritems', 'six.iteritems', ({(171, 34, 171, 41): 'context'}, {}), '(context)', False, 'import six\n'), ((203, 45, 203, 67), 'json.dumps', 'json.dumps', ({(203, 56, 203, 66): 'expression'}, {}), '(expression)', False, 'import json\n'), ((231, 12, 231, 31), 'os.remove', 'os.remove', ({(231, 22, 231, 30): 'filename'}, {}), '(filename)', False, 'import os\n'), ((242, 45, 242, 67), 'json.dumps', 'json.dumps', ({(242, 56, 242, 66): 'expression'}, {}), '(expression)', False, 'import json\n'), ((76, 15, 76, 44), 'os.path.basename', 'os.path.basename', ({(76, 32, 76, 43): 'output_path'}, {}), '(output_path)', False, 'import os\n'), ((233, 16, 233, 41), 'os.remove', 'os.remove', ({(233, 26, 233, 40): "(filename + 'c')"}, {}), "(filename + 'c')", False, 'import os\n'), ((279, 16, 279, 32), 'time.sleep', 'time.sleep', ({(279, 27, 279, 31): '(0.01)'}, {}), '(0.01)', False, 'import time\n'), ((276, 37, 276, 59), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n')] |
mvduin/py-uio | setup.py | 1ad5eb6e1cfeae722535fd6ed7e485a0afd84683 | #!/usr/bin/python3
from setuptools import setup, find_packages
setup(
package_dir = { '': 'src' },
packages = find_packages( where='src' ),
)
| [((7, 15, 7, 43), 'setuptools.find_packages', 'find_packages', (), '', False, 'from setuptools import setup, find_packages\n')] |
FabriSC/Alioth-SC | tools/verity_utils.py | bbe9723401b351c2a34b09a30978373d456d20a2 | #!/usr/bin/env python
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os.path
import shlex
import struct
import common
import sparse_img
from rangelib import RangeSet
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
BLOCK_SIZE = common.BLOCK_SIZE
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
class BuildVerityImageError(Exception):
"""An Exception raised during verity image building."""
def __init__(self, message):
Exception.__init__(self, message)
def GetVerityFECSize(image_size):
cmd = ["fec", "-s", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityTreeSize(image_size):
cmd = ["build_verity_tree", "-s", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityMetadataSize(image_size):
cmd = ["build_verity_metadata", "size", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVeritySize(image_size, fec_supported):
verity_tree_size = GetVerityTreeSize(image_size)
verity_metadata_size = GetVerityMetadataSize(image_size)
verity_size = verity_tree_size + verity_metadata_size
if fec_supported:
fec_size = GetVerityFECSize(image_size + verity_size)
return verity_size + fec_size
return verity_size
def GetSimgSize(image_file):
simg = sparse_img.SparseImage(image_file, build_map=False)
return simg.blocksize * simg.total_blocks
def ZeroPadSimg(image_file, pad_size):
blocks = pad_size // BLOCK_SIZE
logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
simg.AppendFillChunk(0, blocks)
def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
padding_size):
cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
verity_path, verity_fec_path]
common.RunAndCheckOutput(cmd)
def BuildVerityTree(sparse_image_path, verity_image_path):
cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
verity_image_path]
output = common.RunAndCheckOutput(cmd)
root, salt = output.split()
return root, salt
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key, signer_args,
verity_disable):
cmd = ["build_verity_metadata", "build", str(image_size),
verity_metadata_path, root_hash, salt, block_device, signer_path, key]
if signer_args:
cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
if verity_disable:
cmd.append("--verity_disable")
common.RunAndCheckOutput(cmd)
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
"""Appends the unsparse image to the given sparse image.
Args:
sparse_image_path: the path to the (sparse) image
unsparse_image_path: the path to the (unsparse) image
Raises:
BuildVerityImageError: On error.
"""
cmd = ["append2simg", sparse_image_path, unsparse_image_path]
try:
common.RunAndCheckOutput(cmd)
except:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def Append(target, file_to_append, error_message):
"""Appends file_to_append to target.
Raises:
BuildVerityImageError: On error.
"""
try:
with open(target, 'ab') as out_file, \
open(file_to_append, 'rb') as input_file:
for line in input_file:
out_file.write(line)
except IOError:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def CreateVerityImageBuilder(prop_dict):
"""Returns a verity image builder based on the given build properties.
Args:
prop_dict: A dict that contains the build properties. In particular, it will
look for verity-related property values.
Returns:
A VerityImageBuilder instance for Verified Boot 1.0 or Verified Boot 2.0; or
None if the given build doesn't support Verified Boot.
"""
partition_size = prop_dict.get("partition_size")
# partition_size could be None at this point, if using dynamic partitions.
if partition_size:
partition_size = int(partition_size)
# Verified Boot 1.0
verity_supported = prop_dict.get("verity") == "true"
is_verity_partition = "verity_block_device" in prop_dict
if verity_supported and is_verity_partition:
if OPTIONS.verity_signer_path is not None:
signer_path = OPTIONS.verity_signer_path
else:
signer_path = prop_dict["verity_signer_cmd"]
return Version1VerityImageBuilder(
partition_size,
prop_dict["verity_block_device"],
prop_dict.get("verity_fec") == "true",
signer_path,
prop_dict["verity_key"] + ".pk8",
OPTIONS.verity_signer_args,
"verity_disable" in prop_dict)
# Verified Boot 2.0
if (prop_dict.get("avb_hash_enable") == "true" or
prop_dict.get("avb_hashtree_enable") == "true"):
# key_path and algorithm are only available when chain partition is used.
key_path = prop_dict.get("avb_key_path")
algorithm = prop_dict.get("avb_algorithm")
# Image uses hash footer.
if prop_dict.get("avb_hash_enable") == "true":
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASH_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hash_footer_args"])
# Image uses hashtree footer.
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hashtree_footer_args"])
return None
class VerityImageBuilder(object):
"""A builder that generates an image with verity metadata for Verified Boot.
A VerityImageBuilder instance handles the works for building an image with
verity metadata for supporting Android Verified Boot. This class defines the
common interface between Verified Boot 1.0 and Verified Boot 2.0. A matching
builder will be returned based on the given build properties.
More info on the verity image generation can be found at the following link.
https://source.android.com/security/verifiedboot/dm-verity#implementation
"""
def CalculateMaxImageSize(self, partition_size):
"""Calculates the filesystem image size for the given partition size."""
raise NotImplementedError
def CalculateDynamicPartitionSize(self, image_size):
"""Calculates and sets the partition size for a dynamic partition."""
raise NotImplementedError
def PadSparseImage(self, out_file):
"""Adds padding to the generated sparse image."""
raise NotImplementedError
def Build(self, out_file):
"""Builds the verity image and writes it to the given file."""
raise NotImplementedError
class Version1VerityImageBuilder(VerityImageBuilder):
"""A VerityImageBuilder for Verified Boot 1.0."""
def __init__(self, partition_size, block_dev, fec_supported, signer_path,
signer_key, signer_args, verity_disable):
self.version = 1
self.partition_size = partition_size
self.block_device = block_dev
self.fec_supported = fec_supported
self.signer_path = signer_path
self.signer_key = signer_key
self.signer_args = signer_args
self.verity_disable = verity_disable
self.image_size = None
self.verity_size = None
def CalculateDynamicPartitionSize(self, image_size):
# This needs to be implemented. Note that returning the given image size as
# the partition size doesn't make sense, as it will fail later.
raise NotImplementedError
def CalculateMaxImageSize(self, partition_size=None):
"""Calculates the max image size by accounting for the verity metadata.
Args:
partition_size: The partition size, which defaults to self.partition_size
if unspecified.
Returns:
The size of the image adjusted for verity metadata.
"""
if partition_size is None:
partition_size = self.partition_size
assert partition_size > 0, \
"Invalid partition size: {}".format(partition_size)
hi = partition_size
if hi % BLOCK_SIZE != 0:
hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
# verity tree and fec sizes depend on the partition size, which
# means this estimate is always going to be unnecessarily small
verity_size = GetVeritySize(hi, self.fec_supported)
lo = partition_size - verity_size
result = lo
# do a binary search for the optimal size
while lo < hi:
i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
v = GetVeritySize(i, self.fec_supported)
if i + v <= partition_size:
if result < i:
result = i
verity_size = v
lo = i + BLOCK_SIZE
else:
hi = i
self.image_size = result
self.verity_size = verity_size
logger.info(
"Calculated image size for verity: partition_size %d, image_size %d, "
"verity_size %d", partition_size, result, verity_size)
return result
def Build(self, out_file):
"""Creates an image that is verifiable using dm-verity.
Args:
out_file: the output image.
Returns:
AssertionError: On invalid partition sizes.
BuildVerityImageError: On other errors.
"""
image_size = int(self.image_size)
tempdir_name = common.MakeTempDir(suffix="_verity_images")
# Get partial image paths.
verity_image_path = os.path.join(tempdir_name, "verity.img")
verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
# Build the verity tree and get the root hash and salt.
root_hash, salt = BuildVerityTree(out_file, verity_image_path)
# Build the metadata blocks.
BuildVerityMetadata(
image_size, verity_metadata_path, root_hash, salt, self.block_device,
self.signer_path, self.signer_key, self.signer_args,
self.verity_disable)
padding_size = self.partition_size - self.image_size - self.verity_size
assert padding_size >= 0
# Build the full verified image.
Append(
verity_image_path, verity_metadata_path,
"Failed to append verity metadata")
if self.fec_supported:
# Build FEC for the entire partition, including metadata.
verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
BuildVerityFEC(
out_file, verity_image_path, verity_fec_path, padding_size)
Append(verity_image_path, verity_fec_path, "Failed to append FEC")
Append2Simg(
out_file, verity_image_path, "Failed to append verity data")
def PadSparseImage(self, out_file):
sparse_image_size = GetSimgSize(out_file)
if sparse_image_size > self.image_size:
raise BuildVerityImageError(
"Error: image size of {} is larger than partition size of "
"{}".format(sparse_image_size, self.image_size))
ZeroPadSimg(out_file, self.image_size - sparse_image_size)
class VerifiedBootVersion2VerityImageBuilder(VerityImageBuilder):
"""A VerityImageBuilder for Verified Boot 2.0."""
AVB_HASH_FOOTER = 1
AVB_HASHTREE_FOOTER = 2
def __init__(self, partition_name, partition_size, footer_type, avbtool,
key_path, algorithm, salt, signing_args):
self.version = 2
self.partition_name = partition_name
self.partition_size = partition_size
self.footer_type = footer_type
self.avbtool = avbtool
self.algorithm = algorithm
self.key_path = key_path
self.salt = salt
self.signing_args = signing_args
self.image_size = None
def CalculateMinPartitionSize(self, image_size, size_calculator=None):
"""Calculates min partition size for a given image size.
This is used when determining the partition size for a dynamic partition,
which should be cover the given image size (for filesystem files) as well as
the verity metadata size.
Args:
image_size: The size of the image in question.
size_calculator: The function to calculate max image size
for a given partition size.
Returns:
The minimum partition size required to accommodate the image size.
"""
if size_calculator is None:
size_calculator = self.CalculateMaxImageSize
# Use image size as partition size to approximate final partition size.
image_ratio = size_calculator(image_size) / float(image_size)
# Prepare a binary search for the optimal partition size.
lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
# Ensure lo is small enough: max_image_size should <= image_size.
delta = BLOCK_SIZE
max_image_size = size_calculator(lo)
while max_image_size > image_size:
image_ratio = max_image_size / float(lo)
lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
delta *= 2
max_image_size = size_calculator(lo)
hi = lo + BLOCK_SIZE
# Ensure hi is large enough: max_image_size should >= image_size.
delta = BLOCK_SIZE
max_image_size = size_calculator(hi)
while max_image_size < image_size:
image_ratio = max_image_size / float(hi)
hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
delta *= 2
max_image_size = size_calculator(hi)
partition_size = hi
# Start to binary search.
while lo < hi:
mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
max_image_size = size_calculator(mid)
if max_image_size >= image_size: # if mid can accommodate image_size
if mid < partition_size: # if a smaller partition size is found
partition_size = mid
hi = mid
else:
lo = mid + BLOCK_SIZE
logger.info(
"CalculateMinPartitionSize(%d): partition_size %d.", image_size,
partition_size)
return partition_size
def CalculateDynamicPartitionSize(self, image_size):
self.partition_size = self.CalculateMinPartitionSize(image_size)
return self.partition_size
def CalculateMaxImageSize(self, partition_size=None):
"""Calculates max image size for a given partition size.
Args:
partition_size: The partition size, which defaults to self.partition_size
if unspecified.
Returns:
The maximum image size.
Raises:
BuildVerityImageError: On error or getting invalid image size.
"""
if partition_size is None:
partition_size = self.partition_size
assert partition_size > 0, \
"Invalid partition size: {}".format(partition_size)
add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
else "add_hashtree_footer")
cmd = [self.avbtool, add_footer, "--partition_size",
str(partition_size), "--calc_max_image_size"]
cmd.extend(shlex.split(self.signing_args))
proc = common.Run(cmd)
output, _ = proc.communicate()
if proc.returncode != 0:
raise BuildVerityImageError(
"Failed to calculate max image size:\n{}".format(output))
image_size = int(output)
if image_size <= 0:
raise BuildVerityImageError(
"Invalid max image size: {}".format(output))
self.image_size = image_size
return image_size
def PadSparseImage(self, out_file):
# No-op as the padding is taken care of by avbtool.
pass
def Build(self, out_file):
"""Adds dm-verity hashtree and AVB metadata to an image.
Args:
out_file: Path to image to modify.
"""
add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
else "add_hashtree_footer")
cmd = [self.avbtool, add_footer,
"--partition_size", str(self.partition_size),
"--partition_name", self.partition_name,
"--image", out_file]
if self.key_path and self.algorithm:
cmd.extend(["--key", self.key_path, "--algorithm", self.algorithm])
if self.salt:
cmd.extend(["--salt", self.salt])
cmd.extend(shlex.split(self.signing_args))
proc = common.Run(cmd)
output, _ = proc.communicate()
if proc.returncode != 0:
raise BuildVerityImageError("Failed to add AVB footer: {}".format(output))
class HashtreeInfoGenerationError(Exception):
"""An Exception raised during hashtree info generation."""
def __init__(self, message):
Exception.__init__(self, message)
class HashtreeInfo(object):
def __init__(self):
self.hashtree_range = None
self.filesystem_range = None
self.hash_algorithm = None
self.salt = None
self.root_hash = None
def CreateHashtreeInfoGenerator(partition_name, block_size, info_dict):
generator = None
if (info_dict.get("verity") == "true" and
info_dict.get("{}_verity_block_device".format(partition_name))):
partition_size = info_dict["{}_size".format(partition_name)]
fec_supported = info_dict.get("verity_fec") == "true"
generator = VerifiedBootVersion1HashtreeInfoGenerator(
partition_size, block_size, fec_supported)
return generator
class HashtreeInfoGenerator(object):
def Generate(self, image):
raise NotImplementedError
def DecomposeSparseImage(self, image):
raise NotImplementedError
def ValidateHashtree(self):
raise NotImplementedError
class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
"""A class that parses the metadata of hashtree for a given partition."""
def __init__(self, partition_size, block_size, fec_supported):
"""Initialize VerityTreeInfo with the sparse image and input property.
Arguments:
partition_size: The whole size in bytes of a partition, including the
filesystem size, padding size, and verity size.
block_size: Expected size in bytes of each block for the sparse image.
fec_supported: True if the verity section contains fec data.
"""
self.block_size = block_size
self.partition_size = partition_size
self.fec_supported = fec_supported
self.image = None
self.filesystem_size = None
self.hashtree_size = None
self.metadata_size = None
prop_dict = {
'partition_size': str(partition_size),
'verity': 'true',
'verity_fec': 'true' if fec_supported else None,
# 'verity_block_device' needs to be present to indicate a verity-enabled
# partition.
'verity_block_device': '',
# We don't need the following properties that are needed for signing the
# verity metadata.
'verity_key': '',
'verity_signer_cmd': None,
}
self.verity_image_builder = CreateVerityImageBuilder(prop_dict)
self.hashtree_info = HashtreeInfo()
def DecomposeSparseImage(self, image):
"""Calculate the verity size based on the size of the input image.
Since we already know the structure of a verity enabled image to be:
[filesystem, verity_hashtree, verity_metadata, fec_data]. We can then
calculate the size and offset of each section.
"""
self.image = image
assert self.block_size == image.blocksize
assert self.partition_size == image.total_blocks * self.block_size, \
"partition size {} doesn't match with the calculated image size." \
" total_blocks: {}".format(self.partition_size, image.total_blocks)
adjusted_size = self.verity_image_builder.CalculateMaxImageSize()
assert adjusted_size % self.block_size == 0
verity_tree_size = GetVerityTreeSize(adjusted_size)
assert verity_tree_size % self.block_size == 0
metadata_size = GetVerityMetadataSize(adjusted_size)
assert metadata_size % self.block_size == 0
self.filesystem_size = adjusted_size
self.hashtree_size = verity_tree_size
self.metadata_size = metadata_size
self.hashtree_info.filesystem_range = RangeSet(
data=[0, adjusted_size // self.block_size])
self.hashtree_info.hashtree_range = RangeSet(
data=[adjusted_size // self.block_size,
(adjusted_size + verity_tree_size) // self.block_size])
def _ParseHashtreeMetadata(self):
"""Parses the hash_algorithm, root_hash, salt from the metadata block."""
metadata_start = self.filesystem_size + self.hashtree_size
metadata_range = RangeSet(
data=[metadata_start // self.block_size,
(metadata_start + self.metadata_size) // self.block_size])
meta_data = b''.join(self.image.ReadRangeSet(metadata_range))
# More info about the metadata structure available in:
# system/extras/verity/build_verity_metadata.py
META_HEADER_SIZE = 268
header_bin = meta_data[0:META_HEADER_SIZE]
header = struct.unpack("II256sI", header_bin)
# header: magic_number, version, signature, table_len
assert header[0] == 0xb001b001, header[0]
table_len = header[3]
verity_table = meta_data[META_HEADER_SIZE: META_HEADER_SIZE + table_len]
table_entries = verity_table.rstrip().split()
# Expected verity table format: "1 block_device block_device block_size
# block_size data_blocks data_blocks hash_algorithm root_hash salt"
assert len(table_entries) == 10, "Unexpected verity table size {}".format(
len(table_entries))
assert (int(table_entries[3]) == self.block_size and
int(table_entries[4]) == self.block_size)
assert (int(table_entries[5]) * self.block_size == self.filesystem_size and
int(table_entries[6]) * self.block_size == self.filesystem_size)
self.hashtree_info.hash_algorithm = table_entries[7].decode()
self.hashtree_info.root_hash = table_entries[8].decode()
self.hashtree_info.salt = table_entries[9].decode()
def ValidateHashtree(self):
"""Checks that we can reconstruct the verity hash tree."""
# Writes the filesystem section to a temp file; and calls the executable
# build_verity_tree to construct the hash tree.
adjusted_partition = common.MakeTempFile(prefix="adjusted_partition")
with open(adjusted_partition, "wb") as fd:
self.image.WriteRangeDataToFd(self.hashtree_info.filesystem_range, fd)
generated_verity_tree = common.MakeTempFile(prefix="verity")
root_hash, salt = BuildVerityTree(adjusted_partition, generated_verity_tree)
# The salt should be always identical, as we use fixed value.
assert salt == self.hashtree_info.salt, \
"Calculated salt {} doesn't match the one in metadata {}".format(
salt, self.hashtree_info.salt)
if root_hash != self.hashtree_info.root_hash:
logger.warning(
"Calculated root hash %s doesn't match the one in metadata %s",
root_hash, self.hashtree_info.root_hash)
return False
# Reads the generated hash tree and checks if it has the exact same bytes
# as the one in the sparse image.
with open(generated_verity_tree, 'rb') as fd:
return fd.read() == b''.join(self.image.ReadRangeSet(
self.hashtree_info.hashtree_range))
def Generate(self, image):
"""Parses and validates the hashtree info in a sparse image.
Returns:
hashtree_info: The information needed to reconstruct the hashtree.
Raises:
HashtreeInfoGenerationError: If we fail to generate the exact bytes of
the hashtree.
"""
self.DecomposeSparseImage(image)
self._ParseHashtreeMetadata()
if not self.ValidateHashtree():
raise HashtreeInfoGenerationError("Failed to reconstruct the verity tree")
return self.hashtree_info
def CreateCustomImageBuilder(info_dict, partition_name, partition_size,
key_path, algorithm, signing_args):
builder = None
if info_dict.get("avb_enable") == "true":
builder = VerifiedBootVersion2VerityImageBuilder(
partition_name,
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
info_dict.get("avb_avbtool"),
key_path,
algorithm,
# Salt is None because custom images have no fingerprint property to be
# used as the salt.
None,
signing_args)
return builder
| [((28, 9, 28, 36), 'logging.getLogger', 'logging.getLogger', ({(28, 27, 28, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((44, 11, 44, 55), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (), '', False, 'import common\n'), ((50, 11, 50, 55), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (), '', False, 'import common\n'), ((56, 11, 56, 55), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (), '', False, 'import common\n'), ((71, 9, 71, 60), 'sparse_img.SparseImage', 'sparse_img.SparseImage', (), '', False, 'import sparse_img\n'), ((78, 9, 78, 72), 'sparse_img.SparseImage', 'sparse_img.SparseImage', (), '', False, 'import sparse_img\n'), ((86, 2, 86, 31), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', ({(86, 27, 86, 30): 'cmd'}, {}), '(cmd)', False, 'import common\n'), ((92, 11, 92, 40), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', ({(92, 36, 92, 39): 'cmd'}, {}), '(cmd)', False, 'import common\n'), ((106, 2, 106, 31), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', ({(106, 27, 106, 30): 'cmd'}, {}), '(cmd)', False, 'import common\n'), ((121, 4, 121, 33), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', ({(121, 29, 121, 32): 'cmd'}, {}), '(cmd)', False, 'import common\n'), ((315, 19, 315, 62), 'common.MakeTempDir', 'common.MakeTempDir', (), '', False, 'import common\n'), ((467, 11, 467, 26), 'common.Run', 'common.Run', ({(467, 22, 467, 25): 'cmd'}, {}), '(cmd)', False, 'import common\n'), ((501, 11, 501, 26), 'common.Run', 'common.Run', ({(501, 22, 501, 25): 'cmd'}, {}), '(cmd)', False, 'import common\n'), ((611, 42, 612, 51), 'rangelib.RangeSet', 'RangeSet', (), '', False, 'from rangelib import RangeSet\n'), ((613, 40, 615, 69), 'rangelib.RangeSet', 'RangeSet', (), '', False, 'from rangelib import RangeSet\n'), ((621, 21, 623, 72), 'rangelib.RangeSet', 'RangeSet', (), '', False, 'from rangelib import RangeSet\n'), ((630, 13, 630, 49), 'struct.unpack', 'struct.unpack', ({(630, 27, 630, 36): '"""II256sI"""', (630, 38, 630, 48): 'header_bin'}, {}), "('II256sI', header_bin)", False, 'import struct\n'), ((656, 25, 656, 73), 'common.MakeTempFile', 'common.MakeTempFile', (), '', False, 'import common\n'), ((660, 28, 660, 64), 'common.MakeTempFile', 'common.MakeTempFile', (), '', False, 'import common\n'), ((465, 15, 465, 45), 'shlex.split', 'shlex.split', ({(465, 27, 465, 44): 'self.signing_args'}, {}), '(self.signing_args)', False, 'import shlex\n'), ((499, 15, 499, 45), 'shlex.split', 'shlex.split', ({(499, 27, 499, 44): 'self.signing_args'}, {}), '(self.signing_args)', False, 'import shlex\n')] |
tkf/orgviz | orgviz/dones.py | 81a436265daa1fb8294a0186f50df76d9599ae38 | #!/usr/bin/env python
"""org archive to html table converter"""
import os
import datetime
import itertools
from .utils.date import minutestr, total_minutes
def rootname_from_archive_olpath(node):
"""
Find rootname from ARCHIVE_OLPATH property.
Return None if not found.
"""
olpath = node.get_property('ARCHIVE_OLPATH')
if olpath:
olpathlist = olpath.split('/', 1)
if len(olpathlist) > 1:
(rootname, dummy) = olpathlist
else:
rootname = olpath
return rootname
return None
def find_rootname(node):
"""
Find rootname given node
"""
rootname = rootname_from_archive_olpath(node)
if not rootname:
n = node
p = node.parent
while not p.is_root():
n = p
p = p.parent
# n is root node
rootname = rootname_from_archive_olpath(n) or n.heading
return rootname
def key_row_from_node(node):
"""
Return three tuple (key, row) whose elemens are
key object for sorting table and dictionary which has following
keywords: heading, closed, scheduled, effort, clocksum, rootname.
"""
heading = node.heading
# find rootname
rootname = find_rootname(node)
if heading == rootname:
rootname = ""
# calc clocksum if CLOCK exists
clocksum = ''
clocklist = node.clock
if clocklist:
clocksum = sum([total_minutes(c.duration) for c in clocklist])
closed = node.closed
scheduled = node.scheduled
effort = node.get_property('Effort')
row = dict(
heading=heading,
closed=closed and closed.start.strftime('%a %d %b %H:%M'),
scheduled=scheduled and scheduled.start.strftime('%a %d %b %H:%M'),
effort=effort and minutestr(effort),
clocksum=clocksum and minutestr(clocksum),
rootname=rootname,
)
return (closed.start if closed else None, row)
def unique_name_from_paths(pathlist):
namelist = []
for path in pathlist:
name = os.path.basename(path)
if name in namelist:
name_orig = name
i = 1
while name not in namelist:
name = "%s <%d>" % (name_orig, i)
i += 1
namelist.append(name)
return namelist
def sameday(dt1, dt2):
return (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date) and
dt1.year == dt2.year and
dt1.month == dt2.month and
dt1.day == dt2.day)
def table_add_oddday(key_table):
"""
Add oddday key in each rows of key_table *IN PLACE*.
Note that key should be a ``datetime.date`` object.
"""
previous = None
odd = True
for (key, row) in key_table:
this = key
if not sameday(this, previous):
odd = not odd
row['oddday'] = odd
previous = this
def get_data(orgnodes_list, orgpath_list, done, num=100):
"""
Get data for rendering jinja2 template. Data is dictionary like this:
table: list of `row`
list of row generated by ``row_from_node``
orgpathname_list: list of `orgpathname`
orgpathname: dict
contains `orgpath` and `orgname`.
`orgname` is short and unique name for `orgpath`.
title: str
a title
"""
key_table = []
orgname_list = unique_name_from_paths(orgpath_list)
for (nodelist, orgname) in zip(orgnodes_list, orgname_list):
for node in nodelist:
if node.todo == done:
(key, row) = key_row_from_node(node)
if key:
row['orgname'] = orgname
key_table.append((key, row))
orgpathname_list = [
dict(orgpath=orgpath, orgname=orgname)
for (orgpath, orgname) in zip(orgpath_list, orgname_list)]
key_table.sort(reverse=True)
table_add_oddday(key_table)
table = list(itertools.islice((row for (key, row) in key_table), num))
return dict(table=table, orgpathname_list=orgpathname_list,
title='Recently archived tasks')
| [((76, 15, 76, 37), 'os.path.basename', 'os.path.basename', ({(76, 32, 76, 36): 'path'}, {}), '(path)', False, 'import os\n'), ((138, 17, 138, 73), 'itertools.islice', 'itertools.islice', ({(138, 34, 138, 67): '(row for key, row in key_table)', (138, 69, 138, 72): 'num'}, {}), '((row for key, row in key_table), num)', False, 'import itertools\n')] |
ravikumarvc/incubator-tvm | tests/python/unittest/test_lang_tag.py | 9826947ffce0ed40e9d47a0db2abb033e394279e | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import tvm
@tvm.tag_scope(tag="conv")
def compute_conv(data, weight):
N, IC, H, W = data.shape
OC, IC, KH, KW = weight.shape
OH = H - KH + 1
OW = W - KW + 1
ic = tvm.reduce_axis((0, IC), name='ic')
dh = tvm.reduce_axis((0, KH), name='dh')
dw = tvm.reduce_axis((0, KW), name='dw')
return tvm.compute((N, OC, OH, OW), lambda i, oc, h, w: \
tvm.sum(data[i, ic, h+dh, w+dw] * weight[oc, ic, dh, dw],
axis=[ic, dh, dw]))
def test_with():
n = tvm.size_var('n')
m = tvm.size_var('m')
l = tvm.size_var('l')
A = tvm.placeholder((n, l), name='A')
B = tvm.placeholder((m, l), name='B')
with tvm.tag_scope(tag="gemm"):
k = tvm.reduce_axis((0, l), name='k')
C = tvm.compute((n, m), lambda i, j: tvm.sum(A[i, k] * B[j, k], axis=k),
attrs={"hello" : 1, "arr": [10, 12]})
assert C.op.tag == 'gemm'
assert "hello" in C.op.attrs
assert "xx" not in C.op.attrs
assert C.op.attrs["hello"].value == 1
CC = tvm.load_json(tvm.save_json(C))
assert CC.op.attrs["hello"].value == 1
assert CC.op.attrs["arr"][0].value == 10
# str format happened to be json compatible
assert json.loads(str(CC.op.attrs))["arr"][1] == 12
def test_decorator():
n = tvm.size_var('n')
c = tvm.size_var('c')
h = tvm.size_var('h')
w = tvm.size_var('w')
kh = tvm.size_var('kh')
kw = tvm.size_var('kw')
A = tvm.placeholder((n, c, h, w), name='A')
B = tvm.placeholder((c, c, kh, kw), name='B')
C = compute_conv(A, B)
assert C.op.tag == 'conv'
assert len(C.op.attrs) == 0
def test_nested():
n = tvm.size_var('n')
c = tvm.size_var('c')
h = tvm.size_var('h')
w = tvm.size_var('w')
kh = tvm.size_var('kh')
kw = tvm.size_var('kw')
A = tvm.placeholder((n, c, h, w), name='A')
B = tvm.placeholder((c, c, kh, kw), name='B')
try:
with tvm.tag_scope(tag='conv'):
C = compute_conv(A, B)
assert False
except ValueError:
pass
if __name__ == "__main__":
test_with()
test_decorator()
test_nested()
| [((20, 1, 20, 26), 'tvm.tag_scope', 'tvm.tag_scope', (), '', False, 'import tvm\n'), ((27, 9, 27, 44), 'tvm.reduce_axis', 'tvm.reduce_axis', (), '', False, 'import tvm\n'), ((28, 9, 28, 44), 'tvm.reduce_axis', 'tvm.reduce_axis', (), '', False, 'import tvm\n'), ((29, 9, 29, 44), 'tvm.reduce_axis', 'tvm.reduce_axis', (), '', False, 'import tvm\n'), ((36, 8, 36, 25), 'tvm.size_var', 'tvm.size_var', ({(36, 21, 36, 24): '"""n"""'}, {}), "('n')", False, 'import tvm\n'), ((37, 8, 37, 25), 'tvm.size_var', 'tvm.size_var', ({(37, 21, 37, 24): '"""m"""'}, {}), "('m')", False, 'import tvm\n'), ((38, 8, 38, 25), 'tvm.size_var', 'tvm.size_var', ({(38, 21, 38, 24): '"""l"""'}, {}), "('l')", False, 'import tvm\n'), ((40, 8, 40, 41), 'tvm.placeholder', 'tvm.placeholder', (), '', False, 'import tvm\n'), ((41, 8, 41, 41), 'tvm.placeholder', 'tvm.placeholder', (), '', False, 'import tvm\n'), ((59, 8, 59, 25), 'tvm.size_var', 'tvm.size_var', ({(59, 21, 59, 24): '"""n"""'}, {}), "('n')", False, 'import tvm\n'), ((60, 8, 60, 25), 'tvm.size_var', 'tvm.size_var', ({(60, 21, 60, 24): '"""c"""'}, {}), "('c')", False, 'import tvm\n'), ((61, 8, 61, 25), 'tvm.size_var', 'tvm.size_var', ({(61, 21, 61, 24): '"""h"""'}, {}), "('h')", False, 'import tvm\n'), ((62, 8, 62, 25), 'tvm.size_var', 'tvm.size_var', ({(62, 21, 62, 24): '"""w"""'}, {}), "('w')", False, 'import tvm\n'), ((63, 9, 63, 27), 'tvm.size_var', 'tvm.size_var', ({(63, 22, 63, 26): '"""kh"""'}, {}), "('kh')", False, 'import tvm\n'), ((64, 9, 64, 27), 'tvm.size_var', 'tvm.size_var', ({(64, 22, 64, 26): '"""kw"""'}, {}), "('kw')", False, 'import tvm\n'), ((66, 8, 66, 47), 'tvm.placeholder', 'tvm.placeholder', (), '', False, 'import tvm\n'), ((67, 8, 67, 49), 'tvm.placeholder', 'tvm.placeholder', (), '', False, 'import tvm\n'), ((73, 8, 73, 25), 'tvm.size_var', 'tvm.size_var', ({(73, 21, 73, 24): '"""n"""'}, {}), "('n')", False, 'import tvm\n'), ((74, 8, 74, 25), 'tvm.size_var', 'tvm.size_var', ({(74, 21, 74, 24): '"""c"""'}, {}), "('c')", False, 'import tvm\n'), ((75, 8, 75, 25), 'tvm.size_var', 'tvm.size_var', ({(75, 21, 75, 24): '"""h"""'}, {}), "('h')", False, 'import tvm\n'), ((76, 8, 76, 25), 'tvm.size_var', 'tvm.size_var', ({(76, 21, 76, 24): '"""w"""'}, {}), "('w')", False, 'import tvm\n'), ((77, 9, 77, 27), 'tvm.size_var', 'tvm.size_var', ({(77, 22, 77, 26): '"""kh"""'}, {}), "('kh')", False, 'import tvm\n'), ((78, 9, 78, 27), 'tvm.size_var', 'tvm.size_var', ({(78, 22, 78, 26): '"""kw"""'}, {}), "('kw')", False, 'import tvm\n'), ((80, 8, 80, 47), 'tvm.placeholder', 'tvm.placeholder', (), '', False, 'import tvm\n'), ((81, 8, 81, 49), 'tvm.placeholder', 'tvm.placeholder', (), '', False, 'import tvm\n'), ((42, 9, 42, 34), 'tvm.tag_scope', 'tvm.tag_scope', (), '', False, 'import tvm\n'), ((43, 12, 43, 45), 'tvm.reduce_axis', 'tvm.reduce_axis', (), '', False, 'import tvm\n'), ((51, 23, 51, 39), 'tvm.save_json', 'tvm.save_json', ({(51, 37, 51, 38): 'C'}, {}), '(C)', False, 'import tvm\n'), ((32, 8, 33, 34), 'tvm.sum', 'tvm.sum', (), '', False, 'import tvm\n'), ((83, 13, 83, 38), 'tvm.tag_scope', 'tvm.tag_scope', (), '', False, 'import tvm\n'), ((44, 45, 44, 79), 'tvm.sum', 'tvm.sum', (), '', False, 'import tvm\n')] |
scwolof/doepy | doepy/case_studies/discrete_time/MSFB2014.py | acb2cad95428de2c14b28563cff1aa30679e1f39 | """
MIT License
Copyright (c) 2019 Simon Olofsson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from scipy.integrate import odeint
from ..continuous_time import MSFB2014
"""
A. Mesbah, S. Streif, R. Findeisen and R. D. Braatz (2014)
"Active fault diagnosis for nonlinear systems with probabilistic uncertainties"
IFAC Proceedings (2014): 7079-7084
"""
class Model (MSFB2014.Model):
def __init__ (self, name):
super().__init__(name)
def __call__ (self, x, u, p):
f = lambda x,t: self._ode_func(x,u,p)
t = np.linspace(0, self.dt, 51)
X = odeint(f, x, t)
return X[-1]
class M1 (Model):
"""
Nominal scenario (no fault)
"""
def __init__ (self):
super().__init__('M1')
self._ode_func = MSFB2014.M1()
self.p0 = self._ode_func.p0
class M2 (Model):
"""
Multiplicative actuator fault in inlet pump
"""
def __init__ (self):
super().__init__('M2')
self._ode_func = MSFB2014.M2()
self.p0 = self._ode_func.p0
class M3 (Model):
"""
Circular leak in tank
"""
def __init__ (self):
super().__init__('M3')
self._ode_func = MSFB2014.M3()
self.p0 = self._ode_func.p0
class DataGen (M2):
def __init__ (self):
super().__init__()
self.true_param = np.array([ 0.97, 0.82, 0.96, 0.67 ])
def __call__ (self, x, u):
return super().__call__(x, u, self.true_param)
def get ():
return DataGen(), [M1(), M2(), M3()] | [((42, 6, 42, 33), 'numpy.linspace', 'np.linspace', ({(42, 18, 42, 19): '0', (42, 21, 42, 28): 'self.dt', (42, 30, 42, 32): '51'}, {}), '(0, self.dt, 51)', True, 'import numpy as np\n'), ((43, 6, 43, 21), 'scipy.integrate.odeint', 'odeint', ({(43, 13, 43, 14): 'f', (43, 16, 43, 17): 'x', (43, 19, 43, 20): 't'}, {}), '(f, x, t)', False, 'from scipy.integrate import odeint\n'), ((80, 20, 80, 56), 'numpy.array', 'np.array', ({(80, 29, 80, 55): '[0.97, 0.82, 0.96, 0.67]'}, {}), '([0.97, 0.82, 0.96, 0.67])', True, 'import numpy as np\n')] |
mukobi/Pozyx-Gabe | house_code/tutorials_altered/3D_positioning_and_orientation.py | a8b444c2013b1df5043cd25106b72562409b5130 | #!/usr/bin/env python
"""
The pozyx ranging demo (c) Pozyx Labs
please check out https://www.pozyx.io/Documentation/Tutorials/getting_started/Python
This demo requires one (or two) pozyx shields. It demonstrates the 3D orientation and the functionality
to remotely read register data from a pozyx device. Connect one of the Pozyx devices with USB and run this script.
This demo reads the following sensor data:
- pressure
- acceleration
- magnetic field strength
- angular velocity
- the heading, roll and pitch
- the quaternion rotation describing the 3D orientation of the device. This can be used to transform from the body coordinate system to the world coordinate system.
- the linear acceleration (the acceleration excluding gravity)
- the gravitational vector
The data can be viewed in the Processing sketch orientation_3D.pde
"""
from time import time
from time import sleep
from pypozyx import *
from pypozyx.definitions.bitmasks import POZYX_INT_MASK_IMU
from pythonosc.osc_message_builder import OscMessageBuilder
from pythonosc.udp_client import SimpleUDPClient
from modules.user_input_config_functions import UserInputConfigFunctions as UserInput
from modules.file_writing import SensorAndPositionFileWriting as FileWriting
from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging
import time as t
class Orientation3D(object):
"""Reads out all sensor data from either a local or remote Pozyx"""
def __init__(self, pozyx, osc_udp_client, anchors, algorithm=POZYX_POS_ALG_UWB_ONLY,
dimension=POZYX_3D, height=1000, remote_id=None):
self.pozyx = pozyx
self.osc_udp_client = osc_udp_client
self.anchors = anchors
self.algorithm = algorithm
self.dimension = dimension
self.height = height
self.remote_id = remote_id
def setup(self):
"""There is no specific setup functionality"""
self.current_time = time()
"""Sets up the Pozyx for positioning by calibrating its anchor list."""
print("------------POZYX POSITIONING V1.0 -------------")
print("NOTES: ")
print("- No parameters required.")
print()
print("- System will auto start configuration")
print()
print("- System will auto start positioning")
print("------------POZYX POSITIONING V1.0 --------------")
print()
print("START Ranging: ")
self.pozyx.clearDevices(self.remote_id)
self.setAnchorsManual()
self.printPublishConfigurationResult()
def loop(self):
"""Gets new IMU sensor data"""
# check sensor data status
sensor_data = SensorData()
calibration_status = SingleRegister()
if self.remote_id is not None or self.pozyx.checkForFlag(POZYX_INT_MASK_IMU, 0.01) == POZYX_SUCCESS:
status = self.pozyx.getAllSensorData(sensor_data, self.remote_id)
status &= self.pozyx.getCalibrationStatus(calibration_status, self.remote_id)
if status == POZYX_SUCCESS:
# check position status
position = Coordinates()
status = self.pozyx.doPositioning(
position, self.dimension, self.height, self.algorithm, remote_id=self.remote_id)
if status == POZYX_SUCCESS:
# self.print_publish_position(position)
self.publishSensorData(sensor_data, calibration_status)
return sensor_data, position
else:
pass
# self.print_publish_error_code("positioning")
return "Error, no data to print for this line"
def publishSensorData(self, sensor_data, calibration_status):
"""Makes the OSC sensor data package and publishes it"""
self.msg_builder = OscMessageBuilder("/sensordata")
self.msg_builder.add_arg(int(1000 * (time() - self.current_time)))
current_time = time()
self.addSensorData(sensor_data)
self.addCalibrationStatus(calibration_status)
self.osc_udp_client.send(self.msg_builder.build())
def addSensorData(self, sensor_data):
"""Adds the sensor data to the OSC message"""
self.msg_builder.add_arg(sensor_data.pressure)
self.addComponentsOSC(sensor_data.acceleration)
self.addComponentsOSC(sensor_data.magnetic)
self.addComponentsOSC(sensor_data.angular_vel)
self.addComponentsOSC(sensor_data.euler_angles)
self.addComponentsOSC(sensor_data.quaternion)
self.addComponentsOSC(sensor_data.linear_acceleration)
self.addComponentsOSC(sensor_data.gravity_vector)
def addComponentsOSC(self, component):
"""Adds a sensor data component to the OSC message"""
for data in component.data:
self.msg_builder.add_arg(float(data))
def addCalibrationStatus(self, calibration_status):
"""Adds the calibration status data to the OSC message"""
self.msg_builder.add_arg(calibration_status[0] & 0x03)
self.msg_builder.add_arg((calibration_status[0] & 0x0C) >> 2)
self.msg_builder.add_arg((calibration_status[0] & 0x30) >> 4)
self.msg_builder.add_arg((calibration_status[0] & 0xC0) >> 6)
def setAnchorsManual(self):
"""Adds the manually measured anchors to the Pozyx's device list one for one."""
status = self.pozyx.clearDevices(self.remote_id)
for anchor in self.anchors:
status &= self.pozyx.addDevice(anchor, self.remote_id)
if len(anchors) > 4:
status &= self.pozyx.setSelectionOfAnchors(POZYX_ANCHOR_SEL_AUTO, len(anchors))
return status
def printPublishConfigurationResult(self):
"""Prints and potentially publishes the anchor configuration result in a human-readable way."""
list_size = SingleRegister()
status = self.pozyx.getDeviceListSize(list_size, self.remote_id)
print("List size: {0}".format(list_size[0]))
if list_size[0] != len(self.anchors):
self.printPublishErrorCode("configuration")
return
device_list = DeviceList(list_size=list_size[0])
status = self.pozyx.getDeviceIds(device_list, self.remote_id)
print("Calibration result:")
print("Anchors found: {0}".format(list_size[0]))
print("Anchor IDs: ", device_list)
for i in range(list_size[0]):
anchor_coordinates = Coordinates()
status = self.pozyx.getDeviceCoordinates(
device_list[i], anchor_coordinates, self.remote_id)
print("ANCHOR,0x%0.4x, %s" % (device_list[i], str(anchor_coordinates)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message(
"/anchor", [device_list[i], int(anchor_coordinates.x), int(anchor_coordinates.y), int(anchor_coordinates.z)])
sleep(0.025)
def printPublishErrorCode(self, operation):
"""Prints the Pozyx's error and possibly sends it as a OSC packet"""
error_code = SingleRegister()
network_id = self.remote_id
if network_id is None:
self.pozyx.getErrorCode(error_code)
print("ERROR %s, local error code %s" % (operation, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message("/error", [operation, 0, error_code[0]])
return
status = self.pozyx.getErrorCode(error_code, self.remote_id)
if status == POZYX_SUCCESS:
print("ERROR %s on ID %s, error code %s" %
(operation, "0x%0.4x" % network_id, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message(
"/error", [operation, network_id, error_code[0]])
else:
self.pozyx.getErrorCode(error_code)
print("ERROR %s, couldn't retrieve remote error code, local error code %s" %
(operation, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message("/error", [operation, 0, -1])
# should only happen when not being able to communicate with a remote Pozyx.
if __name__ == '__main__':
# shortcut to not have to find out the port yourself
serial_port = get_serial_ports()[0].device
remote_id = 0x6110 # remote device network ID
remote = True # whether to use a remote device
# if not remote:
# remote_id = None
index = 0
previous_cycle_time = 0
current_cycle_time = 0
attributes_to_log = ["acceleration"]
to_use_file = False
filename = None
"""User input configuration section, comment out to use above settings"""
remote = UserInput.use_remote()
remote_id = UserInput.get_remote_id(remote)
to_use_file = UserInput.use_file()
filename = UserInput.get_filename(to_use_file)
attributes_to_log = UserInput.get_multiple_attributes_to_log()
use_processing = True
ip = "127.0.0.1"
network_port = 8888
anchors = [DeviceCoordinates(0x6863, 1, Coordinates(0, 0, 2000)),
DeviceCoordinates(0x615a, 1, Coordinates(0, 18288, 1000)),
DeviceCoordinates(0x607c, 1, Coordinates(18288, 0, 1000)),
DeviceCoordinates(0x6134, 1, Coordinates(18288, 18288, 2000))]
# algorithm = POZYX_POS_ALG_UWB_ONLY # positioning algorithm to use
algorithm = POZYX_POS_ALG_TRACKING # tracking positioning algorithm
dimension = POZYX_3D # positioning dimension
height = 1000 # height of device, required in 2.5D positioning
pozyx = PozyxSerial(serial_port)
osc_udp_client = SimpleUDPClient(ip, network_port)
o = Orientation3D(pozyx, osc_udp_client, anchors, algorithm, dimension, height, remote_id)
o.setup()
logfile = None
if to_use_file:
logfile = open(filename, 'a')
FileWriting.write_sensor_and_position_header_to_file(logfile)
start = ConsoleLogging.get_time()
try:
while True:
# updates elapsed time and time difference
elapsed = ConsoleLogging.get_elapsed_time(ConsoleLogging, start)
previous_cycle_time = current_cycle_time
current_cycle_time = elapsed
time_difference = current_cycle_time - previous_cycle_time
# store iterate_file returns as a tuple or an error message
loop_results = o.loop()
if type(loop_results) == tuple:
one_cycle_sensor_data, one_cycle_position = loop_results
formatted_data_dictionary = ConsoleLogging.format_sensor_data(
one_cycle_sensor_data, attributes_to_log)
if type(formatted_data_dictionary) == dict:
formatted_data_dictionary["Position"] = [
"x:", one_cycle_position.x, "y:", one_cycle_position.y, "z:", one_cycle_position.z]
ConsoleLogging.log_sensor_data_to_console(index, elapsed, formatted_data_dictionary)
if to_use_file:
FileWriting.write_sensor_and_position_data_to_file(
index, elapsed, time_difference,
logfile, one_cycle_sensor_data, one_cycle_position)
# if the iterate_file didn't return a tuple, it returned an error string
else:
error_string = loop_results
ConsoleLogging.print_data_error_message(index, elapsed, error_string)
index += 1 # increment data index
# this allows Windows users to exit the while iterate_file by pressing ctrl+c
except KeyboardInterrupt:
pass
if to_use_file:
logfile.close()
| [((201, 13, 201, 35), 'modules.user_input_config_functions.UserInputConfigFunctions.use_remote', 'UserInput.use_remote', ({}, {}), '()', True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((202, 16, 202, 47), 'modules.user_input_config_functions.UserInputConfigFunctions.get_remote_id', 'UserInput.get_remote_id', ({(202, 40, 202, 46): 'remote'}, {}), '(remote)', True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((203, 18, 203, 38), 'modules.user_input_config_functions.UserInputConfigFunctions.use_file', 'UserInput.use_file', ({}, {}), '()', True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((204, 15, 204, 50), 'modules.user_input_config_functions.UserInputConfigFunctions.get_filename', 'UserInput.get_filename', ({(204, 38, 204, 49): 'to_use_file'}, {}), '(to_use_file)', True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((205, 24, 205, 66), 'modules.user_input_config_functions.UserInputConfigFunctions.get_multiple_attributes_to_log', 'UserInput.get_multiple_attributes_to_log', ({}, {}), '()', True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((222, 21, 222, 54), 'pythonosc.udp_client.SimpleUDPClient', 'SimpleUDPClient', ({(222, 37, 222, 39): 'ip', (222, 41, 222, 53): 'network_port'}, {}), '(ip, network_port)', False, 'from pythonosc.udp_client import SimpleUDPClient\n'), ((231, 12, 231, 37), 'modules.console_logging_functions.ConsoleLoggingFunctions.get_time', 'ConsoleLogging.get_time', ({}, {}), '()', True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((50, 28, 50, 34), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((91, 27, 91, 59), 'pythonosc.osc_message_builder.OscMessageBuilder', 'OscMessageBuilder', ({(91, 45, 91, 58): '"""/sensordata"""'}, {}), "('/sensordata')", False, 'from pythonosc.osc_message_builder import OscMessageBuilder\n'), ((93, 23, 93, 29), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((229, 8, 229, 69), 'modules.file_writing.SensorAndPositionFileWriting.write_sensor_and_position_header_to_file', 'FileWriting.write_sensor_and_position_header_to_file', ({(229, 61, 229, 68): 'logfile'}, {}), '(logfile)', True, 'from modules.file_writing import SensorAndPositionFileWriting as FileWriting\n'), ((235, 22, 235, 76), 'modules.console_logging_functions.ConsoleLoggingFunctions.get_elapsed_time', 'ConsoleLogging.get_elapsed_time', ({(235, 54, 235, 68): 'ConsoleLogging', (235, 70, 235, 75): 'start'}, {}), '(ConsoleLogging, start)', True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((153, 16, 153, 28), 'time.sleep', 'sleep', ({(153, 22, 153, 27): '(0.025)'}, {}), '(0.025)', False, 'from time import sleep\n'), ((246, 44, 247, 61), 'modules.console_logging_functions.ConsoleLoggingFunctions.format_sensor_data', 'ConsoleLogging.format_sensor_data', ({(247, 20, 247, 41): 'one_cycle_sensor_data', (247, 43, 247, 60): 'attributes_to_log'}, {}), '(one_cycle_sensor_data, attributes_to_log)', True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((251, 16, 251, 100), 'modules.console_logging_functions.ConsoleLoggingFunctions.log_sensor_data_to_console', 'ConsoleLogging.log_sensor_data_to_console', ({(251, 58, 251, 63): 'index', (251, 65, 251, 72): 'elapsed', (251, 74, 251, 99): 'formatted_data_dictionary'}, {}), '(index, elapsed,\n formatted_data_dictionary)', True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((259, 16, 259, 85), 'modules.console_logging_functions.ConsoleLoggingFunctions.print_data_error_message', 'ConsoleLogging.print_data_error_message', ({(259, 56, 259, 61): 'index', (259, 63, 259, 70): 'elapsed', (259, 72, 259, 84): 'error_string'}, {}), '(index, elapsed, error_string)', True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((253, 20, 255, 75), 'modules.file_writing.SensorAndPositionFileWriting.write_sensor_and_position_data_to_file', 'FileWriting.write_sensor_and_position_data_to_file', ({(254, 24, 254, 29): 'index', (254, 31, 254, 38): 'elapsed', (254, 40, 254, 55): 'time_difference', (255, 24, 255, 31): 'logfile', (255, 33, 255, 54): 'one_cycle_sensor_data', (255, 56, 255, 74): 'one_cycle_position'}, {}), '(index, elapsed,\n time_difference, logfile, one_cycle_sensor_data, one_cycle_position)', True, 'from modules.file_writing import SensorAndPositionFileWriting as FileWriting\n'), ((92, 45, 92, 51), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n')] |
Jasper912/jupyter-hdfs-kernel | hdfs_kernel/exceptions.py | 4b933cab675cb908a1d2332f040c7fce697fce61 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
#
# Author: huangnj
# Time: 2019/09/27
import traceback
from functools import wraps
from hdfs_kernel.constants import EXPECTED_ERROR_MSG, INTERNAL_ERROR_MSG
from hdfs.util import HdfsError
# == EXCEPTIONS ==
class SessionManagementException(Exception):
pass
class CommandNotAllowedException(Exception):
pass
class CommandExecuteException(Exception):
pass
# option parse Error
class OptionParsingError(RuntimeError):
pass
class OptionParsingExit(Exception):
def __init__(self, status, msg):
self.msg = msg
self.status = status
# == DECORATORS FOR EXCEPTION HANDLING ==
EXPECTED_EXCEPTIONS = [HdfsError, SessionManagementException, CommandNotAllowedException,
CommandExecuteException, OptionParsingExit, OptionParsingError]
def handle_expected_exceptions(f):
"""A decorator that handles expected exceptions. Self can be any object with
an "ipython_display" attribute.
Usage:
@handle_expected_exceptions
def fn(self, ...):
etc..."""
exceptions_to_handle = tuple(EXPECTED_EXCEPTIONS)
# Notice that we're NOT handling e.DataFrameParseException here. That's because DataFrameParseException
# is an internal error that suggests something is wrong with LivyClientLib's implementation.
@wraps(f)
def wrapped(self, *args, **kwargs):
try:
out = f(self, *args, **kwargs)
except exceptions_to_handle as err:
# Do not log! as some messages may contain private client information
self.send_error(EXPECTED_ERROR_MSG.format(err))
return None
else:
return out
return wrapped
def wrap_unexpected_exceptions(f, execute_if_error=None):
"""A decorator that catches all exceptions from the function f and alerts the user about them.
Self can be any object with a "logger" attribute and a "ipython_display" attribute.
All exceptions are logged as "unexpected" exceptions, and a request is made to the user to file an issue
at the Github repository. If there is an error, returns None if execute_if_error is None, or else
returns the output of the function execute_if_error.
Usage:
@wrap_unexpected_exceptions
def fn(self, ...):
..etc """
@wraps(f)
def wrapped(self, *args, **kwargs):
try:
out = f(self, *args, **kwargs)
except Exception as e:
self.logger.error(u"ENCOUNTERED AN INTERNAL ERROR: {}\n\tTraceback:\n{}".format(e, traceback.format_exc()))
self.send_error(INTERNAL_ERROR_MSG.format(e))
return None if execute_if_error is None else execute_if_error()
else:
return out
return wrapped
| [((50, 5, 50, 13), 'functools.wraps', 'wraps', ({(50, 11, 50, 12): 'f'}, {}), '(f)', False, 'from functools import wraps\n'), ((73, 5, 73, 13), 'functools.wraps', 'wraps', ({(73, 11, 73, 12): 'f'}, {}), '(f)', False, 'from functools import wraps\n'), ((56, 28, 56, 58), 'hdfs_kernel.constants.EXPECTED_ERROR_MSG.format', 'EXPECTED_ERROR_MSG.format', ({(56, 54, 56, 57): 'err'}, {}), '(err)', False, 'from hdfs_kernel.constants import EXPECTED_ERROR_MSG, INTERNAL_ERROR_MSG\n'), ((79, 28, 79, 56), 'hdfs_kernel.constants.INTERNAL_ERROR_MSG.format', 'INTERNAL_ERROR_MSG.format', ({(79, 54, 79, 55): 'e'}, {}), '(e)', False, 'from hdfs_kernel.constants import EXPECTED_ERROR_MSG, INTERNAL_ERROR_MSG\n'), ((78, 95, 78, 117), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n')] |
vishalvvr/transtats | dashboard/tests/test_inventory.py | ec71f40b338cab36eb907f6faba262dfeb858b80 | # Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from fixture import DjangoFixture
from fixture.style import NamedDataStyle
from fixture.django_testcase import FixtureTestCase
from dashboard.managers.inventory import InventoryManager
from dashboard.models import Product
from dashboard.tests.testdata.db_fixtures import (
LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData
)
db_fixture = DjangoFixture(style=NamedDataStyle())
class InventoryManagerTest(FixtureTestCase):
inventory_manager = InventoryManager()
fixture = db_fixture
datasets = [LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData]
def test_get_locales(self):
"""
Test get_locales
"""
japanese_locale = self.inventory_manager.get_locales(pick_locales=['ja_JP'])
self.assertEqual(len(japanese_locale), 1)
self.assertEqual(japanese_locale[0].lang_name, 'Japanese')
self.assertEqual(japanese_locale[0].locale_alias, 'ja')
self.assertEqual(japanese_locale[0].locale_script, 'Hani')
def test_get_active_locales_count(self):
"""
Test get_active_locales_count
"""
active_locales = self.inventory_manager.get_active_locales_count()
self.assertEqual(active_locales, 3)
def test_get_locale_alias(self):
"""
Test get_locale_alias
"""
locale_alias = self.inventory_manager.get_locale_alias('fr_FR')
self.assertEqual(locale_alias, 'fr')
locale_alias = self.inventory_manager.get_locale_alias('de_DE')
self.assertEqual(locale_alias, 'de_DE')
def test_get_alias_locale(self):
"""
Test get_alias_locale
"""
alias_locale = self.inventory_manager.get_alias_locale('fr')
self.assertEqual(alias_locale, 'fr_FR')
alias_locale = self.inventory_manager.get_alias_locale('de_DE')
self.assertEqual(alias_locale, 'de_DE')
def test_get_locales_set(self):
"""
Test get_locales_set
"""
active_locales, inactive_locales, aliases = \
self.inventory_manager.get_locales_set()
self.assertEqual(len(active_locales), 3)
self.assertEqual(len(inactive_locales), 1)
self.assertEqual(len(aliases), 4)
def test_get_locale_lang_tuple(self):
"""
Test get_locale_lang_tuple
"""
ru_tuple = ('ru_RU', 'Russian')
fr_tuple = ('fr_FR', 'French')
locale_lang_tuple = self.inventory_manager.get_locale_lang_tuple()
self.assertEqual(len(locale_lang_tuple), 3)
locale_lang_tuple = self.inventory_manager.get_locale_lang_tuple(locales=['fr_FR', 'ru_RU'])
self.assertEqual(len(locale_lang_tuple), 2)
self.assertTupleEqual(locale_lang_tuple[0], ru_tuple)
self.assertTupleEqual(locale_lang_tuple[1], fr_tuple)
def test_get_langset(self):
"""
Test get_get_langset
"""
lang_set = self.inventory_manager.get_langset(langset_slug='custom-set')
self.assertEqual(lang_set.lang_set_name, 'Custom Set')
self.assertEqual(lang_set.lang_set_color, 'Peru')
def test_get_langsets(self):
"""
Test get_langsets
"""
lang_sets = self.inventory_manager.get_langsets(
fields=['lang_set_name', 'locale_ids']
)
self.assertEqual(len(lang_sets), 2)
self.assertNotIn('lang_set_color', vars(lang_sets[0]))
self.assertListEqual(lang_sets[0].locale_ids, ['fr_FR', 'ja_JP'])
def test_get_locale_groups(self):
"""
Test get_locale_groups
"""
locale_groups = self.inventory_manager.get_locale_groups('ja_JP')
self.assertDictEqual(locale_groups, {'ja_JP': ['custom-set', 'f27-set']})
def test_get_all_locales_groups(self):
"""
Test get_all_locales_groups
"""
groups_of_all_locales = self.inventory_manager.get_all_locales_groups()
self.assertDictEqual(groups_of_all_locales,
{'ja_JP': ['custom-set', 'f27-set'], 'fr_FR': ['custom-set', 'f27-set'],
'ru_RU': ['f27-set'], 'ko_KR': []})
def test_get_translation_platforms(self):
"""
Test get_translation_platforms
"""
transplatforms = self.inventory_manager.get_translation_platforms(engine='zanata')
self.assertEqual(transplatforms[1].api_url, 'https://translate.zanata.org')
self.assertEqual(transplatforms[1].platform_slug, 'ZNTAPUB')
def test_get_ci_platforms(self):
"""
Test get_translation_platforms
"""
ciplatforms = self.inventory_manager.get_translation_platforms(ci=True)
self.assertEqual(ciplatforms[0].api_url, 'https://cloud.memsource.com/web')
self.assertEqual(ciplatforms[0].platform_slug, 'MSRCPUB')
def test_get_transplatforms_set(self):
"""
Test get_transplatforms_set
"""
active_platforms, inactive_platforms = self.inventory_manager.get_transplatforms_set()
self.assertEqual(len(active_platforms), 3)
self.assertEqual(len(inactive_platforms), 0)
def test_get_engine_from_slug(self):
"""
Test get_engine_from_slug
"""
platform_engine = self.inventory_manager.get_engine_from_slug(
PlatformData.platform_zanata_fedora.platform_slug
)
self.assertEqual(platform_engine, 'zanata')
platform_engine = self.inventory_manager.get_engine_from_slug(
PlatformData.platform_memsource_cloud.platform_slug
)
self.assertEqual(platform_engine, 'memsource')
def test_get_transplatform_slug_url(self):
"""
test get_transplatform_slug_url
"""
slug_url_tuple = self.inventory_manager.get_transplatform_slug_url()
self.assertTupleEqual(slug_url_tuple, (('MSRCPUB', 'https://cloud.memsource.com/web'),
('ZNTAFED', 'https://fedora.zanata.org'),
('ZNTAPUB', 'https://translate.zanata.org')))
def test_get_relbranch_locales(self):
"""
Test get_relbranch_locales
"""
relbranch_locales = self.inventory_manager.get_relbranch_locales("nonexisting-relbranch")
self.assertFalse(relbranch_locales)
relbranch_locales = self.inventory_manager.get_relbranch_locales('fedora-27')
self.assertListEqual(relbranch_locales, ['ja_JP', 'fr_FR', 'ru_RU'])
def test_get_release_streams(self):
"""
Test get_release_streams
"""
relstream_fedora = Product.objects.get(product_name='Fedora')
relstream_rhel = Product.objects.get(product_name='RHEL')
release_streams = self.inventory_manager.get_release_streams()
self.assertEqual(len(release_streams), 2)
self.assertIn(relstream_fedora, release_streams)
self.assertIn(relstream_rhel, release_streams)
release_streams = self.inventory_manager.get_release_streams(stream_slug='RHEL')
self.assertEqual(len(release_streams), 1)
self.assertIn(relstream_rhel, release_streams)
release_streams = self.inventory_manager.get_release_streams(only_active=True)
self.assertEqual(len(release_streams), 1)
self.assertIn(relstream_fedora, release_streams)
def test_get_relstream_slug_name(self):
"""
Test get_relstream_slug_name
"""
relstream_slug_name_tuple = self.inventory_manager.get_relstream_slug_name()
self.assertEqual(len(relstream_slug_name_tuple), 1)
self.assertTupleEqual(relstream_slug_name_tuple[0], ('fedora', 'Fedora'))
def test_get_relstream_build_tags(self):
"""
Test get_relstream_build_tags
"""
tags = self.inventory_manager.get_relstream_build_tags(stream_slug='fedora')
self.assertIsInstance(tags, dict)
self.assertDictEqual(tags, {'fedora': ['f28', 'f29', 'rawhide']})
| [((32, 24, 32, 42), 'dashboard.managers.inventory.InventoryManager', 'InventoryManager', ({}, {}), '()', False, 'from dashboard.managers.inventory import InventoryManager\n'), ((27, 33, 27, 49), 'fixture.style.NamedDataStyle', 'NamedDataStyle', ({}, {}), '()', False, 'from fixture.style import NamedDataStyle\n'), ((192, 27, 192, 69), 'dashboard.models.Product.objects.get', 'Product.objects.get', (), '', False, 'from dashboard.models import Product\n'), ((193, 25, 193, 65), 'dashboard.models.Product.objects.get', 'Product.objects.get', (), '', False, 'from dashboard.models import Product\n')] |
nolanliou/fedlearner | web_console_v2/api/fedlearner_webconsole/rpc/server.py | 54127c465b3b5d77ae41b823e42efbc1b707e826 | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=broad-except, cyclic-import
import logging
import threading
from concurrent import futures
import grpc
from fedlearner_webconsole.proto import (
service_pb2, service_pb2_grpc,
common_pb2
)
from fedlearner_webconsole.db import db
from fedlearner_webconsole.project.models import Project
from fedlearner_webconsole.workflow.models import (
Workflow, WorkflowState, TransactionState
)
from fedlearner_webconsole.exceptions import (
UnauthorizedException
)
class RPCServerServicer(service_pb2_grpc.WebConsoleV2ServiceServicer):
def __init__(self, server):
self._server = server
def CheckConnection(self, request, context):
try:
return self._server.check_connection(request)
except UnauthorizedException as e:
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('CheckConnection rpc server error: %s', repr(e))
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
def UpdateWorkflowState(self, request, context):
try:
return self._server.update_workflow_state(request)
except UnauthorizedException as e:
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('UpdateWorkflowState rpc server error: %s', repr(e))
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
def GetWorkflow(self, request, context):
try:
return self._server.get_workflow(request)
except UnauthorizedException as e:
return service_pb2.GetWorkflowResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('GetWorkflow rpc server error: %s', repr(e))
return service_pb2.GetWorkflowResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
class RpcServer(object):
def __init__(self):
self._lock = threading.Lock()
self._started = False
self._server = None
self._app = None
def start(self, app):
assert not self._started, "Already started"
self._app = app
listen_port = app.config.get('GRPC_LISTEN_PORT', 1999)
with self._lock:
self._server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
service_pb2_grpc.add_WebConsoleV2ServiceServicer_to_server(
RPCServerServicer(self), self._server)
self._server.add_insecure_port('[::]:%d' % listen_port)
self._server.start()
self._started = True
def stop(self):
if not self._started:
return
with self._lock:
self._server.stop(None).wait()
del self._server
self._started = False
def check_auth_info(self, auth_info):
logging.debug('auth_info: %s', auth_info)
project = Project.query.filter_by(
name=auth_info.project_name).first()
if project is None:
raise UnauthorizedException('Invalid project')
project_config = project.get_config()
# TODO: fix token verification
# if project_config.token != auth_info.auth_token:
# raise UnauthorizedException('Invalid token')
if project_config.domain_name != auth_info.target_domain:
raise UnauthorizedException('Invalid domain')
source_party = None
for party in project_config.participants:
if party.domain_name == auth_info.source_domain:
source_party = party
if source_party is None:
raise UnauthorizedException('Invalid domain')
return project, source_party
def check_connection(self, request):
with self._app.app_context():
_, party = self.check_auth_info(request.auth_info)
logging.debug(
'received check_connection from %s', party.domain_name)
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS))
def update_workflow_state(self, request):
with self._app.app_context():
project, party = self.check_auth_info(request.auth_info)
logging.debug(
'received update_workflow_state from %s: %s',
party.domain_name, request)
name = request.workflow_name
state = WorkflowState(request.state)
target_state = WorkflowState(request.target_state)
transaction_state = TransactionState(request.transaction_state)
workflow = Workflow.query.filter_by(
name=request.workflow_name,
project_id=project.id).first()
if workflow is None:
assert state == WorkflowState.NEW
assert target_state == WorkflowState.READY
workflow = Workflow(
name=name,
project_id=project.id,
state=state, target_state=target_state,
transaction_state=transaction_state)
db.session.add(workflow)
db.session.commit()
db.session.refresh(workflow)
workflow.update_state(
state, target_state, transaction_state)
db.session.commit()
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS),
transaction_state=workflow.transaction_state.value)
def _filter_workflow(self, workflow, modes):
# filter peer-readable and peer-writable variables
if workflow is None:
return
var_list = [
i for i in workflow.variables if i.access_mode in modes]
workflow.ClearField('variables')
for i in var_list:
workflow.variables.append(i)
for job_def in workflow.job_definitions:
var_list = [
i for i in job_def.variables if i.access_mode in modes]
job_def.ClearField('variables')
for i in var_list:
job_def.variables.append(i)
def get_workflow(self, request):
with self._app.app_context():
project, party = self.check_auth_info(request.auth_info)
workflow = Workflow.query.filter_by(
name=request.workflow_name,
project_id=project.id).first()
assert workflow is not None
config = workflow.get_config()
self._filter_workflow(
config,
[
common_pb2.Variable.PEER_READABLE,
common_pb2.Variable.PEER_WRITABLE
])
# job details
jobs = [service_pb2.JobDetail(
name=job.name, state=job.get_state_for_front())
for job in workflow.get_jobs()]
# fork info
forked_from = ''
if workflow.forked_from:
forked_from = Workflow.query.get(workflow.forked_from).name
return service_pb2.GetWorkflowResponse(
name=request.workflow_name,
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS),
config=config,
jobs=jobs,
state=workflow.state.value,
target_state=workflow.target_state.value,
transaction_state=workflow.transaction_state.value,
forkable=workflow.forkable,
forked_from=forked_from,
reuse_job_names=workflow.get_reuse_job_names(),
peer_reuse_job_names=workflow.get_peer_reuse_job_names(),
fork_proposal_config=workflow.get_fork_proposal_config()
)
rpc_server = RpcServer()
| [((88, 21, 88, 37), 'threading.Lock', 'threading.Lock', ({}, {}), '()', False, 'import threading\n'), ((116, 8, 116, 49), 'logging.debug', 'logging.debug', ({(116, 22, 116, 37): '"""auth_info: %s"""', (116, 39, 116, 48): 'auth_info'}, {}), "('auth_info: %s', auth_info)", False, 'import logging\n'), ((120, 18, 120, 58), 'fedlearner_webconsole.exceptions.UnauthorizedException', 'UnauthorizedException', ({(120, 40, 120, 57): '"""Invalid project"""'}, {}), "('Invalid project')", False, 'from fedlearner_webconsole.exceptions import UnauthorizedException\n'), ((126, 18, 126, 57), 'fedlearner_webconsole.exceptions.UnauthorizedException', 'UnauthorizedException', ({(126, 40, 126, 56): '"""Invalid domain"""'}, {}), "('Invalid domain')", False, 'from fedlearner_webconsole.exceptions import UnauthorizedException\n'), ((132, 18, 132, 57), 'fedlearner_webconsole.exceptions.UnauthorizedException', 'UnauthorizedException', ({(132, 40, 132, 56): '"""Invalid domain"""'}, {}), "('Invalid domain')", False, 'from fedlearner_webconsole.exceptions import UnauthorizedException\n'), ((138, 12, 139, 71), 'logging.debug', 'logging.debug', ({(139, 16, 139, 51): '"""received check_connection from %s"""', (139, 53, 139, 70): 'party.domain_name'}, {}), "('received check_connection from %s', party.domain_name)", False, 'import logging\n'), ((146, 12, 148, 43), 'logging.debug', 'logging.debug', ({(147, 16, 147, 60): '"""received update_workflow_state from %s: %s"""', (148, 16, 148, 33): 'party.domain_name', (148, 35, 148, 42): 'request'}, {}), "('received update_workflow_state from %s: %s', party.\n domain_name, request)", False, 'import logging\n'), ((150, 20, 150, 48), 'fedlearner_webconsole.workflow.models.WorkflowState', 'WorkflowState', ({(150, 34, 150, 47): 'request.state'}, {}), '(request.state)', False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((151, 27, 151, 62), 'fedlearner_webconsole.workflow.models.WorkflowState', 'WorkflowState', ({(151, 41, 151, 61): 'request.target_state'}, {}), '(request.target_state)', False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((152, 32, 152, 75), 'fedlearner_webconsole.workflow.models.TransactionState', 'TransactionState', ({(152, 49, 152, 74): 'request.transaction_state'}, {}), '(request.transaction_state)', False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((170, 12, 170, 31), 'fedlearner_webconsole.db.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from fedlearner_webconsole.db import db\n'), ((99, 16, 99, 58), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', (), '', False, 'from concurrent import futures\n'), ((117, 18, 118, 40), 'fedlearner_webconsole.project.models.Project.query.filter_by', 'Project.query.filter_by', (), '', False, 'from fedlearner_webconsole.project.models import Project\n'), ((159, 27, 163, 56), 'fedlearner_webconsole.workflow.models.Workflow', 'Workflow', (), '', False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((164, 16, 164, 40), 'fedlearner_webconsole.db.db.session.add', 'db.session.add', ({(164, 31, 164, 39): 'workflow'}, {}), '(workflow)', False, 'from fedlearner_webconsole.db import db\n'), ((165, 16, 165, 35), 'fedlearner_webconsole.db.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from fedlearner_webconsole.db import db\n'), ((166, 16, 166, 44), 'fedlearner_webconsole.db.db.session.refresh', 'db.session.refresh', ({(166, 35, 166, 43): 'workflow'}, {}), '(workflow)', False, 'from fedlearner_webconsole.db import db\n'), ((141, 23, 142, 51), 'fedlearner_webconsole.proto.common_pb2.Status', 'common_pb2.Status', (), '', False, 'from fedlearner_webconsole.proto import service_pb2, service_pb2_grpc, common_pb2\n'), ((153, 23, 155, 38), 'fedlearner_webconsole.workflow.models.Workflow.query.filter_by', 'Workflow.query.filter_by', (), '', False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((172, 27, 173, 55), 'fedlearner_webconsole.proto.common_pb2.Status', 'common_pb2.Status', (), '', False, 'from fedlearner_webconsole.proto import service_pb2, service_pb2_grpc, common_pb2\n'), ((195, 23, 197, 38), 'fedlearner_webconsole.workflow.models.Workflow.query.filter_by', 'Workflow.query.filter_by', (), '', False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((213, 30, 213, 70), 'fedlearner_webconsole.workflow.models.Workflow.query.get', 'Workflow.query.get', ({(213, 49, 213, 69): 'workflow.forked_from'}, {}), '(workflow.forked_from)', False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((216, 23, 217, 51), 'fedlearner_webconsole.proto.common_pb2.Status', 'common_pb2.Status', (), '', False, 'from fedlearner_webconsole.proto import service_pb2, service_pb2_grpc, common_pb2\n')] |
haru-256/ExpertPython3_Source | chapter15/async_aiohttp.py | 5ef412ef217c6078248ff9546e23ed9b69aadcff | """
「非同期プログラミング」の節で登場するサンプルコード
aiohttpを使って非同期にHTTPのリクエストを送信する方法
"""
import asyncio
import time
import aiohttp
from asyncrates import get_rates
SYMBOLS = ('USD', 'EUR', 'PLN', 'NOK', 'CZK')
BASES = ('USD', 'EUR', 'PLN', 'NOK', 'CZK')
async def fetch_rates(session, place):
return await get_rates(session, place)
async def present_result(result):
base, rates = (await result)
rates_line = ", ".join(
[f"{rates[symbol]:7.03} {symbol}" for symbol in SYMBOLS]
)
print(f"1 {base} = {rates_line}")
async def main():
async with aiohttp.ClientSession() as session:
await asyncio.wait([
asyncio.create_task(present_result(fetch_rates(session, base)))
for base in BASES
])
if __name__ == "__main__":
started = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
elapsed = time.time() - started
print()
print(f"経過時間: {elapsed:.2f}s")
| [((39, 14, 39, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((40, 11, 40, 35), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((18, 17, 18, 42), 'asyncrates.get_rates', 'get_rates', ({(18, 27, 18, 34): 'session', (18, 36, 18, 41): 'place'}, {}), '(session, place)', False, 'from asyncrates import get_rates\n'), ((31, 15, 31, 38), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ({}, {}), '()', False, 'import aiohttp\n'), ((42, 14, 42, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
OleksiiOleksenko/intel_mpx_explained | experiments/nginx/run.py | dd6da57e0fcf22df358d1a742079b414620a7c88 | #!/usr/bin/env python
from __future__ import print_function
import logging
import os
import signal
from time import sleep
from subprocess import Popen, PIPE
import socket
from core.common_functions import *
from core.run import Runner
class NginxPerf(Runner):
"""
Runs Nginx
"""
name = "nginx"
exp_name = "nginx"
bench_suite = False
benchmarks = {"nginx": ""}
test_benchmarks = {"nginx": ""}
client_numbers = [1, 5, 9, 13, 17, 21, 25, 29]
ab = "ab"
duration = 20 # in seconds
requests_num = 1000000 # some huge number so we always take 20 seconds
def __init__(self, *args, **kwargs):
super(NginxPerf, self).__init__(*args, **kwargs)
if self.config.input_type == "test":
self.client_numbers = (1,)
def per_benchmark_action(self, type_, benchmark, args):
self.log_build(type_, benchmark)
build_path = "/".join([self.dirs["build"], type_])
self.current_exe = build_path + '/sbin/' + benchmark
build_benchmark(
b=benchmark,
t=type_,
makefile=self.dirs['bench_src'],
build_path=build_path
)
# generate an input file
with open(build_path + "/html/index.html", "w") as f:
f.write("<html><body><h1>It works!</h1>")
random_text = my_check_output("lorem -p 10")
f.write(random_text)
f.write("</body></html>")
# config Nginx
replace_in_file(build_path + "/conf/nginx.conf", "listen 80;", "listen 8080;", ignoreifcontains=True)
replace_in_file(build_path + "/conf/nginx.conf", "worker_processes 1;", "worker_processes auto;", ignoreifcontains=True)
def per_thread_action(self, type_, benchmark, args, thread_num):
servercmd = "{action} {exe} -g \"daemon off;\"".format(
action=self.action,
exe=self.current_exe,
)
logging.debug("Server command: %s" % servercmd)
# by default start client on local machine
if env.get("CLIENT_MACHINE"):
ssh = "ssh %s" % env["CLIENT_MACHINE"]
logging.debug("Using remote client: %s" % env["CLIENT_MACHINE"])
else:
ssh = ""
logging.debug("Using local client (use CLIENT_MACHINE env var to specify remote client)")
myip = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
with open(self.dirs["log_file"], "a") as f:
for client_number in self.client_numbers:
# start server
my_check_output("pkill -9 nginx > /dev/null || true") # for sanity
sleep(1)
server = Popen(servercmd, shell=True, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid)
sleep(1)
# start client (possibly on another machine)
msg = self.run_message.format(input=client_number, **locals())
self.log_run(msg)
f.write("[run] " + msg + "\n")
out = my_check_output("{ssh} {ab} -k -t {duration} -n {requests_num} -c {client_number} http://{myip}:8080/".format(
ab=self.ab,
duration=self.duration,
requests_num=self.requests_num,
**locals()
))
f.write("===== client =====\n")
f.write(out)
# log and stop server
f.write("===== return code is %s =====\n" % str(server.poll()))
try:
os.killpg(server.pid, signal.SIGINT)
except:
pass
f.write("===== stdout =====\n")
for line in server.stdout:
f.write(line.decode('utf-8'))
f.write("===== stderr =====\n")
for line in server.stderr:
f.write(line.decode('utf-8'))
sleep(1)
def set_logging(self):
self.num_benchmarks = len(self.benchmarks) * len(self.types) * self.num_runs * len(self.client_numbers)
logging.info("Total runs: %d" % self.num_benchmarks)
def main(benchmark_name=None):
runner = NginxPerf()
runner.main()
| [((65, 8, 65, 55), 'logging.debug', 'logging.debug', ({(65, 22, 65, 54): "('Server command: %s' % servercmd)"}, {}), "('Server command: %s' % servercmd)", False, 'import logging\n'), ((116, 8, 116, 60), 'logging.info', 'logging.info', ({(116, 21, 116, 59): "('Total runs: %d' % self.num_benchmarks)"}, {}), "('Total runs: %d' % self.num_benchmarks)", False, 'import logging\n'), ((70, 12, 70, 76), 'logging.debug', 'logging.debug', ({(70, 26, 70, 75): "('Using remote client: %s' % env['CLIENT_MACHINE'])"}, {}), "('Using remote client: %s' % env['CLIENT_MACHINE'])", False, 'import logging\n'), ((73, 12, 73, 102), 'logging.debug', 'logging.debug', ({(73, 26, 73, 101): '"""Using local client (use CLIENT_MACHINE env var to specify remote client)"""'}, {}), "(\n 'Using local client (use CLIENT_MACHINE env var to specify remote client)'\n )", False, 'import logging\n'), ((81, 16, 81, 24), 'time.sleep', 'sleep', ({(81, 22, 81, 23): '(1)'}, {}), '(1)', False, 'from time import sleep\n'), ((82, 25, 82, 101), 'subprocess.Popen', 'Popen', (), '', False, 'from subprocess import Popen, PIPE\n'), ((83, 16, 83, 24), 'time.sleep', 'sleep', ({(83, 22, 83, 23): '(1)'}, {}), '(1)', False, 'from time import sleep\n'), ((112, 16, 112, 24), 'time.sleep', 'sleep', ({(112, 22, 112, 23): '(1)'}, {}), '(1)', False, 'from time import sleep\n'), ((103, 20, 103, 56), 'os.killpg', 'os.killpg', ({(103, 30, 103, 40): 'server.pid', (103, 42, 103, 55): 'signal.SIGINT'}, {}), '(server.pid, signal.SIGINT)', False, 'import os\n'), ((75, 66, 75, 86), 'socket.gethostname', 'socket.gethostname', ({}, {}), '()', False, 'import socket\n'), ((75, 198, 75, 246), 'socket.socket', 'socket.socket', ({(75, 212, 75, 226): 'socket.AF_INET', (75, 228, 75, 245): 'socket.SOCK_DGRAM'}, {}), '(socket.AF_INET, socket.SOCK_DGRAM)', False, 'import socket\n')] |
verdammelt/tavi | tavi/test/unit/base/document_no_fields_test.py | 3bb39a6e6ab936f6e9511a4058817697e3df098b | # -*- coding: utf-8 -*-
import unittest
from tavi.base.documents import BaseDocument
class BaseDocumentNoFieldsTest(unittest.TestCase):
class NoFieldsSample(BaseDocument):
pass
def setUp(self):
super(BaseDocumentNoFieldsTest, self).setUp()
self.no_fields_sample = self.NoFieldsSample()
def test_get_fields(self):
self.assertEqual([], self.no_fields_sample.fields)
def test_get_errors(self):
self.assertEqual(0, self.no_fields_sample.errors.count)
def test_valid(self):
self.assertEqual(True, self.no_fields_sample.valid)
def test_get_field_values(self):
self.assertEqual({}, self.no_fields_sample.field_values)
| [] |
hoafaloaf/seqparse | seqparse/test/test_seqparse.py | 1d2446070c5627a5cb880d00ef327b892b4dedef | """Test file sequence discovery on disk."""
# "Future" Libraries
from __future__ import print_function
# Standard Libraries
import os
import unittest
# Third Party Libraries
import mock
from builtins import range
from future.utils import lrange
from . import (DirEntry, generate_entries, initialise_mock_scandir_data,
mock_scandir_deep)
from .. import (__version__, get_parser, get_sequence, get_version, invert,
validate_frame_sequence)
from ..sequences import FileSequence, FrameChunk, FrameSequence
###############################################################################
# class: TestSeqparseModule
class TestSeqparseModule(unittest.TestCase):
"""Test file discovery on the seqparse module."""
_test_ext = "exr"
_test_file_name = "TEST_DIR"
_test_root = "test_dir"
_singletons = ["singleton0.jpg", "singleton1.jpg"]
def setUp(self):
"""Set up the test case."""
pass
@mock.patch("seqparse.seqparse.scandir")
def test_singletons(self, mock_api_call):
"""Seqparse: Test file singleton discovery from disk location."""
# Expected outputs ...
output = [os.path.join(self._test_root, x) for x in self._singletons]
entries = list()
for file_name in output:
entries.append(DirEntry(file_name))
mock_api_call.return_value = iter(entries)
parser = get_parser()
parser.scan_path(self._test_root)
file_names = parser.singletons
self.assertIn(self._test_root, file_names)
self.assertEqual(self._test_root, file_names[self._test_root].path)
self.assertEqual(len(file_names), 1)
self.assertEqual(
len(file_names[self._test_root]), len(self._singletons))
self.assertEqual(
sorted(self._singletons), sorted(file_names[self._test_root]))
# Check parser output ...
self.assertEqual(sorted(map(str, parser.output())), output)
# Test seqs_only option ...
self.assertEqual(sorted(parser.output(seqs_only=True)), [])
@mock.patch("seqparse.seqparse.scandir")
def test_single_padded_file(self, mock_api_call):
"""Seqparse: Test single padded file sequence discovery."""
frames = {4: [1]}
# Expected outputs ...
frame_seq_output = "0001"
file_seq_output = ".".join(
(self._test_file_name, frame_seq_output, self._test_ext))
final_output = os.path.join(self._test_root, file_seq_output)
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
data = parser.sequences
test_output = list(parser.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertTrue(4 in file_seq[self._test_ext])
self.assertEqual(len(file_seq[self._test_ext]), 1)
# And finally, the file sequence.
file_seq = file_seq[self._test_ext][4]
self.assertEqual(len(file_seq), len(frames[4]))
self.assertEqual(str(file_seq), final_output)
@mock.patch("seqparse.seqparse.scandir")
def test_simple_sequence(self, mock_api_call):
"""Seqparse: Test simple file sequence discovery."""
frames = {4: [0, 1, 2, 3, 4]}
# Expected outputs ...
frame_seq_output = "0000-0004"
file_seq_output = ".".join(
(self._test_file_name, frame_seq_output, self._test_ext))
final_output = os.path.join(self._test_root, file_seq_output)
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
data = parser.sequences
test_output = list(parser.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertTrue(4 in file_seq[self._test_ext])
self.assertEqual(len(file_seq[self._test_ext]), 1)
# And finally, the file sequence.
file_seq = file_seq[self._test_ext][4]
self.assertEqual(len(file_seq), len(frames[4]))
self.assertEqual(str(file_seq), final_output)
@mock.patch("seqparse.seqparse.scandir")
def test_complex_sequence(self, mock_api_call):
"""Seqparse: Test complex file sequence discovery."""
frames = {
1: [5, 6, 7, 8, 114, 199, 2000],
3: [8, 9, 10, 12],
4: [0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 101]
}
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
# Expected output frame sequences. Note how frames 114, 199 move to the
# "pad 3" group and 2000 moves to the "pad 4" group!
output_seqs = {
1: "5-8",
3: "008-010,012,114,199",
4: "0000-0006,0008-0012x2,0101,2000"
}
# Expected final output (where "/" is os.sep):
# test_dir/TEST_DIR.5-8.exr
# test_dir/TEST_DIR.008-010,012,114,199.exr
# test_dir/TEST_DIR.0000-0006,0008-0012x2,0101,2000.exr
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
final_output = list()
for pad, seq_frames in sorted(output_seqs.items()):
bits = (self._test_file_name, seq_frames, self._test_ext)
output_seqs[pad] = os.path.join(self._test_root, ".".join(bits))
final_output.append(output_seqs[pad])
data = parser.sequences
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 3)
self.assertEqual(list(map(str, test_output)), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertEqual(set(file_seq[self._test_ext]), set(output_seqs))
# And finally, the file sequences.
for pad in sorted(output_seqs):
self.assertEqual(output_seqs[pad],
str(file_seq[self._test_ext][pad]))
@mock.patch("seqparse.seqparse.scandir")
def test_nested_sequences(self, mock_api_call):
"""Seqparse: Test file sequence discovery in nested directories."""
mock_api_call.side_effect = mock_scandir_deep
print("\n\n SEQUENCES\n ---------")
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root)
for seq in parser.output():
print(" ", seq)
print("\n MAX LEVELS\n ----------")
for max_levels in range(-1, 4):
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root, max_levels=max_levels)
expected_seqs = max_levels + 2
if max_levels == -1:
expected_seqs = 5
seqs = list(parser.output())
blurb = " o max_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(max_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("\n MIN LEVELS\n ----------")
for min_levels in range(-1, 4):
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root, min_levels=min_levels)
expected_seqs = 3 - min_levels
if min_levels == -1:
expected_seqs = 5
seqs = list(parser.output())
blurb = " o min_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(min_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("")
def test_valid_frame_sequences(self):
"""Seqparse: Test validity of simple frame ranges."""
good_frame_seqs = [
"0001", ",0001", "0001,", "0001-0001", "0001-0001x0",
"0001-0003x3", "0001,0003", "0001,,0003", "0001-0010",
"0001-0010x0", "0001-0011x2", "0001-0012x2", "0001-0005,0007-0010",
"0001-0005x2,0007-0010", "0001-0005,0007-0011x2",
"0001-0005,0006,0008-0012x2", "0001,0003-0007,0009-0015x2",
"3,1,5,7", "01-05,03-07"
]
bad_frame_seqs = [
"-0001", "0001-", "0001x2", "x2", "0001,0003x2", "0001-0005x",
"0010-0001", "x", ",", ",,", ""
]
print("\n\n GOOD SEQUENCES\n --------------")
for frame_seq in good_frame_seqs:
output = validate_frame_sequence(frame_seq)
print(' o {!r} --> {!r}'.format(frame_seq, output))
self.assertTrue(output)
print("\n BAD SEQUENCES\n -------------")
for frame_seq in bad_frame_seqs:
print(' o {!r}'.format(frame_seq))
self.assertFalse(validate_frame_sequence(frame_seq))
print("")
def test_add_file_sequence(self):
"""Seqparse: Test file sequence addition via seqparse.add_file."""
input_file = ".".join((self._test_file_name, "0005", self._test_ext))
input_file = os.path.join(self._test_root, input_file)
# Expected outputs ...
input_frame_seq = "0000-0004"
output_frame_seq = "0000-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
output_file_seq = ".".join(
(self._test_file_name, output_frame_seq, self._test_ext))
output_file_seq = os.path.join(self._test_root, output_file_seq)
print("\n\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
input_frame_seq = "0000-0002,,0003-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
print("\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
@mock.patch("seqparse.seqparse.scandir")
def test_inversion(self, mock_api_call):
"""Seqparse: Test usage of the "missing" option in Seqparse.output."""
file_path = os.path.join(self._test_root, self._test_file_name)
chunk_in = FrameChunk(first=1, last=11, step=2, pad=4)
fseq = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_in)
input_entries = [DirEntry(x) for x in fseq]
mock_api_call.return_value = input_entries
chunk_out = FrameChunk(first=2, last=10, step=2, pad=4)
expected = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_out)
parser = get_parser()
parser.scan_path(self._test_root)
inverted = list(parser.output(missing=True))
self.assertEqual(len(inverted), 1)
print("\n\n SEQUENCE\n --------")
print(" input files: ", fseq)
print(" expected files:", expected)
print(" inverted files:", inverted[0])
self.assertEqual(str(inverted[0]), str(expected))
fseq = FileSequence(
name=file_path, ext=self._test_ext, frames=[1, 2, 3, 4, 6], pad=4)
input_entries = [DirEntry(x) for x in fseq]
mock_api_call.return_value = input_entries
expected = FileSequence(
name=file_path, ext=self._test_ext, frames=[5], pad=4)
parser = get_parser()
parser.scan_path(self._test_root)
inverted = list(parser.output(missing=True))
self.assertEqual(len(inverted), 1)
print("\n\n SEQUENCE\n --------")
print(" input files: ", fseq)
print(" expected files:", expected)
print(" inverted files:", inverted[0])
self.assertEqual(str(inverted[0]), str(expected))
@mock.patch("seqparse.seqparse.scandir")
def test_scan_options(self, mock_api_call):
"""Seqparse: Make sure scan_options works as expected."""
frames = {4: (1, 2, 3, 4, 6)}
input_entries = generate_entries(
name="test", ext="py", frames=frames, root=self._test_root)
input_entries.extend(
generate_entries(
name=".test", ext="py", frames=frames, root=self._test_root))
input_entries.append(
DirEntry(os.path.join(self._test_root, "pony.py")))
mock_api_call.return_value = input_entries
parser = get_parser()
parser.scan_options["stat"] = True
parser.scan_path(self._test_root)
output = list(parser.output())
expected = [
os.path.join(self._test_root, "test.0001-0004,0006.py"),
os.path.join(self._test_root, "pony.py")
]
self.assertEqual(len(output), 2)
self.assertEqual(list(map(str, output)), expected)
self.assertEqual(output[0].ctime, 1490908340)
self.assertEqual(output[0].mtime, 1490908305)
self.assertEqual(output[0].size, 36520)
parser = get_parser()
parser.scan_options["all"] = True
parser.scan_path(self._test_root)
output = list(parser.output())
expected = [
os.path.join(self._test_root, ".test.0001-0004,0006.py"),
os.path.join(self._test_root, "test.0001-0004,0006.py"),
os.path.join(self._test_root, "pony.py")
]
self.assertEqual(len(output), 3)
self.assertEqual(list(map(str, output)), expected)
def test_api_calls(self):
"""Seqparse: Test API calls at root of module."""
chunk = FrameChunk(first=1, last=7, step=2, pad=4)
seq = get_sequence(lrange(1, 8, 2), pad=4)
self.assertTrue(isinstance(seq, FrameSequence))
self.assertEqual(str(seq), "0001-0007x2")
expected = FrameChunk(first=2, last=6, step=2, pad=4)
inverted = invert(chunk)
self.assertEqual(str(inverted), str(expected))
inverted = invert(seq)
self.assertEqual(str(inverted), str(expected))
with self.assertRaises(TypeError):
invert(get_parser())
self.assertEqual(get_version(), __version__)
| [((37, 5, 37, 44), 'mock.patch', 'mock.patch', ({(37, 16, 37, 43): '"""seqparse.seqparse.scandir"""'}, {}), "('seqparse.seqparse.scandir')", False, 'import mock\n'), ((68, 5, 68, 44), 'mock.patch', 'mock.patch', ({(68, 16, 68, 43): '"""seqparse.seqparse.scandir"""'}, {}), "('seqparse.seqparse.scandir')", False, 'import mock\n'), ((121, 5, 121, 44), 'mock.patch', 'mock.patch', ({(121, 16, 121, 43): '"""seqparse.seqparse.scandir"""'}, {}), "('seqparse.seqparse.scandir')", False, 'import mock\n'), ((174, 5, 174, 44), 'mock.patch', 'mock.patch', ({(174, 16, 174, 43): '"""seqparse.seqparse.scandir"""'}, {}), "('seqparse.seqparse.scandir')", False, 'import mock\n'), ((237, 5, 237, 44), 'mock.patch', 'mock.patch', ({(237, 16, 237, 43): '"""seqparse.seqparse.scandir"""'}, {}), "('seqparse.seqparse.scandir')", False, 'import mock\n'), ((377, 5, 377, 44), 'mock.patch', 'mock.patch', ({(377, 16, 377, 43): '"""seqparse.seqparse.scandir"""'}, {}), "('seqparse.seqparse.scandir')", False, 'import mock\n'), ((429, 5, 429, 44), 'mock.patch', 'mock.patch', ({(429, 16, 429, 43): '"""seqparse.seqparse.scandir"""'}, {}), "('seqparse.seqparse.scandir')", False, 'import mock\n'), ((77, 23, 77, 69), 'os.path.join', 'os.path.join', ({(77, 36, 77, 51): 'self._test_root', (77, 53, 77, 68): 'file_seq_output'}, {}), '(self._test_root, file_seq_output)', False, 'import os\n'), ((130, 23, 130, 69), 'os.path.join', 'os.path.join', ({(130, 36, 130, 51): 'self._test_root', (130, 53, 130, 68): 'file_seq_output'}, {}), '(self._test_root, file_seq_output)', False, 'import os\n'), ((250, 26, 250, 38), 'builtins.range', 'range', ({(250, 32, 250, 34): '(-1)', (250, 36, 250, 37): '(4)'}, {}), '(-1, 4)', False, 'from builtins import range\n'), ((268, 26, 268, 38), 'builtins.range', 'range', ({(268, 32, 268, 34): '(-1)', (268, 36, 268, 37): '(4)'}, {}), '(-1, 4)', False, 'from builtins import range\n'), ((318, 21, 318, 62), 'os.path.join', 'os.path.join', ({(318, 34, 318, 49): 'self._test_root', (318, 51, 318, 61): 'input_file'}, {}), '(self._test_root, input_file)', False, 'import os\n'), ((325, 25, 325, 70), 'os.path.join', 'os.path.join', ({(325, 38, 325, 53): 'self._test_root', (325, 55, 325, 69): 'input_file_seq'}, {}), '(self._test_root, input_file_seq)', False, 'import os\n'), ((328, 26, 328, 72), 'os.path.join', 'os.path.join', ({(328, 39, 328, 54): 'self._test_root', (328, 56, 328, 71): 'output_file_seq'}, {}), '(self._test_root, output_file_seq)', False, 'import os\n'), ((354, 25, 354, 70), 'os.path.join', 'os.path.join', ({(354, 38, 354, 53): 'self._test_root', (354, 55, 354, 69): 'input_file_seq'}, {}), '(self._test_root, input_file_seq)', False, 'import os\n'), ((380, 20, 380, 71), 'os.path.join', 'os.path.join', ({(380, 33, 380, 48): 'self._test_root', (380, 50, 380, 70): 'self._test_file_name'}, {}), '(self._test_root, self._test_file_name)', False, 'import os\n'), ((41, 18, 41, 50), 'os.path.join', 'os.path.join', ({(41, 31, 41, 46): 'self._test_root', (41, 48, 41, 49): 'x'}, {}), '(self._test_root, x)', False, 'import os\n'), ((450, 12, 450, 67), 'os.path.join', 'os.path.join', ({(450, 25, 450, 40): 'self._test_root', (450, 42, 450, 66): '"""test.0001-0004,0006.py"""'}, {}), "(self._test_root, 'test.0001-0004,0006.py')", False, 'import os\n'), ((451, 12, 451, 52), 'os.path.join', 'os.path.join', ({(451, 25, 451, 40): 'self._test_root', (451, 42, 451, 51): '"""pony.py"""'}, {}), "(self._test_root, 'pony.py')", False, 'import os\n'), ((465, 12, 465, 68), 'os.path.join', 'os.path.join', ({(465, 25, 465, 40): 'self._test_root', (465, 42, 465, 67): '""".test.0001-0004,0006.py"""'}, {}), "(self._test_root, '.test.0001-0004,0006.py')", False, 'import os\n'), ((466, 12, 466, 67), 'os.path.join', 'os.path.join', ({(466, 25, 466, 40): 'self._test_root', (466, 42, 466, 66): '"""test.0001-0004,0006.py"""'}, {}), "(self._test_root, 'test.0001-0004,0006.py')", False, 'import os\n'), ((467, 12, 467, 52), 'os.path.join', 'os.path.join', ({(467, 25, 467, 40): 'self._test_root', (467, 42, 467, 51): '"""pony.py"""'}, {}), "(self._test_root, 'pony.py')", False, 'import os\n'), ((476, 27, 476, 42), 'future.utils.lrange', 'lrange', ({(476, 34, 476, 35): '1', (476, 37, 476, 38): '8', (476, 40, 476, 41): '2'}, {}), '(1, 8, 2)', False, 'from future.utils import lrange\n'), ((441, 21, 441, 61), 'os.path.join', 'os.path.join', ({(441, 34, 441, 49): 'self._test_root', (441, 51, 441, 60): '"""pony.py"""'}, {}), "(self._test_root, 'pony.py')", False, 'import os\n')] |
ragreener1/deliveroo-scraping | deliveroo_scraping.py | c8e3de2503a6198734904fb937a77dd38ef05581 | import urllib.request
import pandas as pd
import sqlite3
import re
from bs4 import BeautifulSoup
# Parameters
postcodes_list = ["W1F7EY"]
db_name = "scraped.db"
# This is so that Deliveroo think the scraper is Google Chrome
# as opposed to a web scraper
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11' +
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*' +
';q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
def process_menu(doc, url, tags_df, tag_type, restaurants, restaurants_to_tags,
menu_sections, menu_items):
# This function processes the menu
# This gets the restaurant_name by finding the <h1> tag with the CSS class
# restaurant_name
restaurant_name = doc.find("h1", class_="restaurant__name", text=True).text
# This gets the deliveroo_name by selecting the appropriate part from the
# URL
# This will fail on restaurants not in London
deliveroo_name = re.findall(
'(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\?postcode=)',
url)[0]
# This adds this to the restaurants dataframe
# This isn't very efficient, if you were wanting to scrape large numbers
# you wouldn't want to use .append
restaurants = restaurants.append(
{"name": restaurant_name, "deliveroo_name": deliveroo_name},
ignore_index=True)
# This gets the restaurant_id by finding the index of what as inserted
# Again this isn't very efficient
restaurant_id = restaurants[
(restaurants == [restaurant_name, deliveroo_name]).all(
axis=1)].index[0]
restaurant_tags = []
# Deal with tags
# Start by finding all <small> tags with the CSS class tag
for tag in doc.find_all("small", class_="tag"):
# The second element of the <small> CSS class is the type of the tag
# this could be locale or food etc.
tagtype = tag['class'][1]
# The name of the tag is what is inside the <small>
name = tag.text
# See if the tagtype exists in the tag_type dataframe
type_matches = tag_type[(tag_type == [tagtype]).all(axis=1)]
# If it doesn't
if len(type_matches) == 0:
# Add it (again not very efficient)
tag_type = tag_type.append({"name": tagtype}, ignore_index=True)
# Update the matches
type_matches = tag_type[(tag_type == [tagtype]).all(axis=1)]
# See if the tag already exists in the tags_df dataframe
matches = tags_df[
(tags_df == [name, type_matches.index[0]]).all(axis=1)]
# If it doesn't
if len(matches) == 0:
# Add it
entry = {"name": name, "type": type_matches.index[0]}
tags_df = tags_df.append(entry, ignore_index=True)
matches = tags_df[(tags_df == [name, type_matches.index[0]]).all(
axis=1)]
# Add the tag to a list of tags for that restaurant
restaurant_tags.append(matches.index[0])
# For each tag
for tag in restaurant_tags:
# Add this to restaurants_to_tags df
restaurants_to_tags = restaurants_to_tags.append(
{"restaurant_id": restaurant_id, "tag_id": tag}, ignore_index=True)
# For each category (in the menu, e.g. Sides, Mains, Desserts, Drinks -
# different for every restaurant though!) process the menu items
# This is found by looking for <div> tags with the CSS class
# menu-index-page__menu-category
categories = doc.find_all("div", class_="menu-index-page__menu-category")
for category in categories:
# the category name is inside the h3 inside the div
category_name = category.h3.text
# Add the category to the menu_sections data frame. Again this isn't
# efficient.
menu_sections = menu_sections.append(
{"restaurant_id": restaurant_id, "name": category_name},
ignore_index=True)
# Get the id in the menu_sections data frame
category_id = menu_sections[
(menu_sections == [restaurant_id, category_name]).all(
axis=1)].index[0]
# Get each of the items in that category
category_items = []
# For each menu item. Found by looking for <div> inside the category
# with the CSS class menu-index-page__item_content
items_html = category.find_all("div",
class_="menu-index-page__item-content")
for menu_item in items_html:
# The name is the <h6> with the CSS class
# menu-index-page__item-title
item_name = \
menu_item.find("h6", class_="menu-index-page__item-title").text
# The price is the <span> with the CSS class
# menu-index-page__item-price. The £ symbol is dropped, it is then
# converted to a floating-point number (decimal), multiplied by 100
# so that it is in pence. It is then converted to an integer.
#
# https://stackoverflow.com/questions/3730019/why-not-use-double-or-float-to-represent-currency
price_as_text = \
menu_item.find("span", class_="menu-index-page__item-price")\
.text[1:]
price_as_float = float(price_as_text)
item_price = int(price_as_float * 100)
# If an item is popular it has a <span> with the CSS class
# menu-index-page__item-popular
# So this tries to find it, if it exists is_item_popular = True,
# False otherwise.
is_item_popular = menu_item.find(
"span", class_="menu-index-page__item-popular") is not None
# Add this menu_item to category_items
category_items.append(
{"menu_section_id": category_id,
"name": item_name,
"price_in_pence": item_price,
"is_popular": is_item_popular}
)
# Add all the menu items in that category to the menu_items data frame,
# this is more efficient than doing this one at a time
menu_items = menu_items.append(category_items, ignore_index=True)
# Return the updated dataframes
return (tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items)
def get_restaurant_and_process_menu(url, tags_df, tag_type, restaurants,
restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs,
postcodes):
# This functions gets the restaurant and then processes its menu if it
# hasn't been processed before
# Get the deliveroo name from the url
deliveroo_name = re.findall(
'(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\?postcode=)',
url)[0]
# If this restaurant hasn't been seen before
if deliveroo_name not in restaurants['deliveroo_name']:
# Get the webpage
request = urllib.request.Request(url, headers=hdr)
page = urllib.request.urlopen(request)
soup = BeautifulSoup(page)
# Try and process the menu, if it doesn't work handle it nicely
try:
(tags_df, tag_type, restaurants, restaurants_to_tags,
menu_sections, menu_items) = process_menu(soup, url, tags_df,
tag_type, restaurants,
restaurants_to_tags,
menu_sections,
menu_items)
except Exception:
print(f"Fail on {url}")
# Get the postcode from the URL
postcode = re.findall('(?<=\\?postcode=)(.)*', url)[0]
# Find where it is in the postcodes data frame
postcodes_index = (postcodes['post_code'] == postcode).index[0]
# Find the restaurants id in the restaurants dataframe using the deliveroo
# name
restaurant_index = \
(restaurants['deliveroo_name'] == deliveroo_name).index[0]
# Add an entry to restaurants_to_locs saying that this restaurant is
# available at this location
restaurants_to_locs = restaurants_to_locs.append(
{"restaurant_id": restaurant_index, "loc_id": postcodes_index},
ignore_index=True)
# Return the amended dataframes
return (tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs)
def process_restaurants_for_postcode(postcode, tags_df, tag_type, restaurants,
restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs,
postcodes):
# This function processes the restaurants for the postcodes
# Add the postcode to the URL - it doesn't matter that it says camden, it
# will update as appropriate.
url = "https://deliveroo.co.uk/restaurants/london/camden" \
f"?postcode={postcode}&sort=time"
# Create the HTTP request
request = urllib.request.Request(url, headers=hdr)
# Get the page
page = urllib.request.urlopen(request)
soup = BeautifulSoup(page)
# For every link in the page
for i, link in enumerate(soup.find_all("a")):
print(i)
# Get the destination of the link
destination = link.get("href")
# If it's to a menu, get the restaurant and process the menu
if "/menu" in destination:
(tags_df, tag_type, restaurants, restaurants_to_tags,
menu_sections, menu_items, restaurants_to_locs) = \
get_restaurant_and_process_menu(
"https://deliveroo.co.uk" + destination, tags_df, tag_type,
restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs, postcodes)
# Return the amended dataframes
return (tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs)
def process_all_restaurants(postcodes, db_name):
# This function processes all of the postcodes
# Create the dataframes
tags_df = pd.DataFrame({"name": [], "type": []})\
.astype({"name": "str", "type": "int32"})
tag_type = pd.DataFrame({"name": []})
restaurants = pd.DataFrame({"name": [], "deliveroo_name": []})\
.astype({"name": "str", "deliveroo_name": "str"})
restaurants_to_tags = pd.DataFrame({"restaurant_id": [], "tag_id": []})\
.astype({"restaurant_id": "int64", "tag_id": "int64"})
menu_sections = pd.DataFrame({"restaurant_id": [], "name": []})\
.astype({"restaurant_id": "int64", "name": "str"})
menu_items = pd.DataFrame(
{"menu_section_id": [],
"name": [],
"price_in_pence": [],
"is_popular": []}).astype(
{"menu_section_id": "int64",
"name": "str",
"price_in_pence": "int64",
"is_popular": "bool"})
restaurants_to_locs = pd.DataFrame({"restaurant_id": [], "loc_id": []})\
.astype({"restaurant_id": "int64", "loc_id": "int64"})
for post_code in postcodes['post_code']:
(tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs) =\
process_restaurants_for_postcode(post_code, tags_df, tag_type,
restaurants, restaurants_to_tags,
menu_sections, menu_items,
restaurants_to_locs, postcodes)
# Write to db
cnx = sqlite3.connect(db_name)
postcodes.to_sql("POSTCODES", cnx, index_label="id")
restaurants.to_sql("RESTAURANTS", cnx, index_label="id")
restaurants_to_locs.to_sql("RESTAURANTS_AVAILABLE", cnx, index_label="id")
menu_items.to_sql("MENU_ITEMS", cnx, index_label="id")
menu_sections.to_sql("MENU_SECTIONS", cnx, index_label="id")
tags_df.to_sql("CATEGORIES", cnx, index_label="id")
tag_type.to_sql("CATEGORY_TYPES", cnx, index_label="id")
restaurants_to_tags.to_sql("RESTAURANT_CATEGORIES", cnx, index_label="id")
cnx.close()
if __name__ == "__main__":
postcodes_df = pd.DataFrame({
'post_code': postcodes_list
})
process_all_restaurants(postcodes_df, db_name)
| [((228, 11, 228, 30), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(228, 25, 228, 29): 'page'}, {}), '(page)', False, 'from bs4 import BeautifulSoup\n'), ((256, 15, 256, 41), 'pandas.DataFrame', 'pd.DataFrame', ({(256, 28, 256, 40): "{'name': []}"}, {}), "({'name': []})", True, 'import pandas as pd\n'), ((288, 10, 288, 34), 'sqlite3.connect', 'sqlite3.connect', ({(288, 26, 288, 33): 'db_name'}, {}), '(db_name)', False, 'import sqlite3\n'), ((301, 19, 303, 6), 'pandas.DataFrame', 'pd.DataFrame', ({(301, 32, 303, 5): "{'post_code': postcodes_list}"}, {}), "({'post_code': postcodes_list})", True, 'import pandas as pd\n'), ((36, 21, 38, 12), 're.findall', 're.findall', ({(37, 8, 37, 71): '"""(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\\\?postcode=)"""', (38, 8, 38, 11): 'url'}, {}), "('(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\\\?postcode=)', url\n )", False, 'import re\n'), ((170, 21, 172, 12), 're.findall', 're.findall', ({(171, 8, 171, 71): '"""(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\\\?postcode=)"""', (172, 8, 172, 11): 'url'}, {}), "('(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\\\?postcode=)', url\n )", False, 'import re\n'), ((179, 15, 179, 34), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(179, 29, 179, 33): 'page'}, {}), '(page)', False, 'from bs4 import BeautifulSoup\n'), ((192, 15, 192, 55), 're.findall', 're.findall', ({(192, 26, 192, 49): '"""(?<=\\\\?postcode=)(.)*"""', (192, 51, 192, 54): 'url'}, {}), "('(?<=\\\\?postcode=)(.)*', url)", False, 'import re\n'), ((253, 14, 253, 52), 'pandas.DataFrame', 'pd.DataFrame', ({(253, 27, 253, 51): "{'name': [], 'type': []}"}, {}), "({'name': [], 'type': []})", True, 'import pandas as pd\n'), ((257, 18, 257, 66), 'pandas.DataFrame', 'pd.DataFrame', ({(257, 31, 257, 65): "{'name': [], 'deliveroo_name': []}"}, {}), "({'name': [], 'deliveroo_name': []})", True, 'import pandas as pd\n'), ((260, 26, 260, 75), 'pandas.DataFrame', 'pd.DataFrame', ({(260, 39, 260, 74): "{'restaurant_id': [], 'tag_id': []}"}, {}), "({'restaurant_id': [], 'tag_id': []})", True, 'import pandas as pd\n'), ((263, 20, 263, 67), 'pandas.DataFrame', 'pd.DataFrame', ({(263, 33, 263, 66): "{'restaurant_id': [], 'name': []}"}, {}), "({'restaurant_id': [], 'name': []})", True, 'import pandas as pd\n'), ((266, 17, 270, 27), 'pandas.DataFrame', 'pd.DataFrame', ({(267, 8, 270, 26): "{'menu_section_id': [], 'name': [], 'price_in_pence': [], 'is_popular': []}"}, {}), "({'menu_section_id': [], 'name': [], 'price_in_pence': [],\n 'is_popular': []})", True, 'import pandas as pd\n'), ((276, 26, 276, 75), 'pandas.DataFrame', 'pd.DataFrame', ({(276, 39, 276, 74): "{'restaurant_id': [], 'loc_id': []}"}, {}), "({'restaurant_id': [], 'loc_id': []})", True, 'import pandas as pd\n')] |
AlexandruGhergut/wouso | wouso/core/security/admin.py | f26244ff58ae626808ae8c58ccc93d21f9f2666f | from django.contrib import admin
from wouso.core.security.models import Report
admin.site.register(Report)
| [((4, 0, 4, 27), 'django.contrib.admin.site.register', 'admin.site.register', ({(4, 20, 4, 26): 'Report'}, {}), '(Report)', False, 'from django.contrib import admin\n')] |
diliprk/SmartCityVisualization | DataWrangling/TTNData2Gsheet_Auto.py | 618cd433c2f6bb55042c643ccaef12b5814ccb77 | #### Reading Data from The Things Network Data and Automatically Storing it to a Google Spreadsheet
# Author: Dilip Rajkumar
# Email: [email protected]
# Date: 19/01/2018
# Revision: version#1
# License: MIT License
import pandas as pd
import requests
from df2gspread import df2gspread as d2g
import time
## Set Initial Time Duration in mins to query TTN Data:
time_duration = 5
# Insert spreadsheet file id of Google Spreadsheet
spreadsheet = '1ftXlebCTDp5tTxvlm5K3Sv1oNttDHR7s1xTi-i-ZR_o' ## Google SpreadSheet Title: TTN_Live_DataLogger
# Insert Sheet Name
wks_name = 'Sheet1'
def queryttndata(time_duration):
'''
This function queries data from TTN Swagger API based on a time duration which is given as an input
'''
headers = {'Accept': 'application/json','Authorization': 'key ttn-account-v2.P4kRaEqenNGbIdFSgSLDJGMav5K9YrekkMm_F1lOVrw'}
## Set query duration in minutes
querytime = str(time_duration) + 'm'
params = (('last', querytime),)
response = requests.get('https://vehiclecounter.data.thethingsnetwork.org/api/v2/query', headers=headers, params=params).json()
df_raw = pd.DataFrame.from_dict(response)
return df_raw
def cleandf(df):
'''
In this function we pass as input the raw dataframe from TTN in JSON format to clean and optimize the data.
This function is customized and unique to every dataset
'''
df.rename(columns={'time': 'TTNTimeStamp'}, inplace=True)
df['TTNTimeStamp'] = pd.to_datetime(df['TTNTimeStamp'])
df['TTNTimeStamp'] = df['TTNTimeStamp'] + pd.Timedelta(hours=1) ## Offset Time by 1 hour to fix TimeZone Error of Swagger API TimeStamps
df['TTNTimeStamp'] = df['TTNTimeStamp'].values.astype('datetime64[s]')
drop_cols = ['raw','device_id']
df = df.drop(drop_cols, 1)
df.reset_index()
df = df.reindex(['TTNTimeStamp','Count'], axis=1)
print("Latest Data:")
print(df.tail(1),'\n')
return df
while True:
#begin your infinite loop
df_raw = queryttndata(time_duration)
df_clean = cleandf(df_raw)
d2g.upload(df_clean, spreadsheet,wks_name,col_names=True,clean=True) # Write dataframe to Google Spreadsheet
df_clean.to_csv('TTN_VehicleCountData.csv', date_format="%d/%m/%Y %H:%M:%S",index=True) # Save DataFrame locally
time.sleep(60) # Call function every 60 seconds
time_duration += 1 ## Increment query duration by 1 mins at the end of every function call
| [((31, 13, 31, 45), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ({(31, 36, 31, 44): 'response'}, {}), '(response)', True, 'import pandas as pd\n'), ((40, 25, 40, 59), 'pandas.to_datetime', 'pd.to_datetime', ({(40, 40, 40, 58): "df['TTNTimeStamp']"}, {}), "(df['TTNTimeStamp'])", True, 'import pandas as pd\n'), ((55, 4, 55, 72), 'df2gspread.df2gspread.upload', 'd2g.upload', (), '', True, 'from df2gspread import df2gspread as d2g\n'), ((57, 4, 57, 18), 'time.sleep', 'time.sleep', ({(57, 15, 57, 17): '(60)'}, {}), '(60)', False, 'import time\n'), ((41, 46, 41, 67), 'pandas.Timedelta', 'pd.Timedelta', (), '', True, 'import pandas as pd\n'), ((30, 15, 30, 124), 'requests.get', 'requests.get', (), '', False, 'import requests\n')] |
felliott/SHARE | tests/share/normalize/test_xml.py | 8fd60ff4749349c9b867f6188650d71f4f0a1a56 | import xmltodict
from share.transform.chain import * # noqa
EXAMPLE = '''
<entry>
<id>http://arxiv.org/abs/cond-mat/0102536v1</id>
<updated>2001-02-28T20:12:09Z</updated>
<published>2001-02-28T20:12:09Z</published>
<title>Impact of Electron-Electron Cusp
on Configuration Interaction Energies</title>
<summary> The effect of the electron-electron cusp on the convergence of configuration
interaction (CI) wave functions is examined. By analogy with the
pseudopotential approach for electron-ion interactions, an effective
electron-electron interaction is developed which closely reproduces the
scattering of the Coulomb interaction but is smooth and finite at zero
electron-electron separation. The exact many-electron wave function for this
smooth effective interaction has no cusp at zero electron-electron separation.
We perform CI and quantum Monte Carlo calculations for He and Be atoms, both
with the Coulomb electron-electron interaction and with the smooth effective
electron-electron interaction. We find that convergence of the CI expansion of
the wave function for the smooth electron-electron interaction is not
significantly improved compared with that for the divergent Coulomb interaction
for energy differences on the order of 1 mHartree. This shows that, contrary to
popular belief, description of the electron-electron cusp is not a limiting
factor, to within chemical accuracy, for CI calculations.
</summary>
<author>
<name>David Prendergast</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">Department of Physics</arxiv:affiliation>
</author>
<author>
<name>M. Nolan</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">NMRC, University College, Cork, Ireland</arxiv:affiliation>
</author>
<author>
<name>Claudia Filippi</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">Department of Physics</arxiv:affiliation>
</author>
<author>
<name>Stephen Fahy</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">Department of Physics</arxiv:affiliation>
</author>
<author>
<name>J. C. Greer</name>
<arxiv:affiliation xmlns:arxiv="http://arxiv.org/schemas/atom">NMRC, University College, Cork, Ireland</arxiv:affiliation>
</author>
<arxiv:doi xmlns:arxiv="http://arxiv.org/schemas/atom">10.1063/1.1383585</arxiv:doi>
<link title="doi" href="http://dx.doi.org/10.1063/1.1383585" rel="related"/>
<arxiv:comment xmlns:arxiv="http://arxiv.org/schemas/atom">11 pages, 6 figures, 3 tables, LaTeX209, submitted to The Journal of
Chemical Physics</arxiv:comment>
<arxiv:journal_ref xmlns:arxiv="http://arxiv.org/schemas/atom">J. Chem. Phys. 115, 1626 (2001)</arxiv:journal_ref>
<link href="http://arxiv.org/abs/cond-mat/0102536v1" rel="alternate" type="text/html"/>
<link title="pdf" href="http://arxiv.org/pdf/cond-mat/0102536v1" rel="related" type="application/pdf"/>
<arxiv:primary_category xmlns:arxiv="http://arxiv.org/schemas/atom" term="cond-mat.str-el" scheme="http://arxiv.org/schemas/atom"/>
<category term="cond-mat.str-el" scheme="http://arxiv.org/schemas/atom"/>
</entry>
'''
class Organization(Parser):
name = ctx
class IsAffiliatedWith(Parser):
related = Delegate(Organization, ctx)
class Person(Parser):
related_agents = Map(Delegate(IsAffiliatedWith), ctx.affiliation)
given_name = ParseName(ctx.name).first
family_name = ParseName(ctx.name).last
class Creator(Parser):
agent = Delegate(Person, ctx)
class Preprint(Parser):
title = ctx.entry.title
description = ctx.entry.summary
related_agents = Map(Delegate(Creator), ctx.entry.author)
class Extra:
comment = ctx.entry.comment
journal_ref = ctx.entry.journal_ref
class TestParser:
def test_preprint_parser(self):
parsed = Preprint(
xmltodict.parse(
EXAMPLE,
process_namespaces=True,
namespaces={
'http://www.w3.org/2005/Atom': None,
'http://arxiv.org/schemas/atom': None,
}
)
).parse()
assert isinstance(parsed, dict)
assert parsed['@type'] == 'preprint'
normalized = ctx.pool[parsed]
assert normalized['extra'] == {'comment': '11 pages, 6 figures, 3 tables, LaTeX209, submitted to The Journal of\n Chemical Physics', 'journal_ref': 'J. Chem. Phys. 115, 1626 (2001)'}
# no newlines, leading/trailing white space, or multiple spaces
assert normalized['title'] == 'Impact of Electron-Electron Cusp on Configuration Interaction Energies'
assert normalized['description'] == 'The effect of the electron-electron cusp on the convergence of configuration interaction (CI) wave functions is examined. By analogy with the pseudopotential approach for electron-ion interactions, an effective electron-electron interaction is developed which closely reproduces the scattering of the Coulomb interaction but is smooth and finite at zero electron-electron separation. The exact many-electron wave function for this smooth effective interaction has no cusp at zero electron-electron separation. We perform CI and quantum Monte Carlo calculations for He and Be atoms, both with the Coulomb electron-electron interaction and with the smooth effective electron-electron interaction. We find that convergence of the CI expansion of the wave function for the smooth electron-electron interaction is not significantly improved compared with that for the divergent Coulomb interaction for energy differences on the order of 1 mHartree. This shows that, contrary to popular belief, description of the electron-electron cusp is not a limiting factor, to within chemical accuracy, for CI calculations.'
| [((94, 12, 101, 13), 'xmltodict.parse', 'xmltodict.parse', (), '', False, 'import xmltodict\n')] |
mysticfall/alleycat-reactive | alleycat/reactive/property.py | 69ff2f283627a6c613b084677be707234b29164c | from __future__ import annotations
from typing import TypeVar, Generic, Callable, Optional, Any, cast, Tuple
import rx
from returns import pipeline
from returns.functions import identity
from returns.maybe import Maybe, Nothing
from rx import Observable
from rx.subject import BehaviorSubject
from . import ReactiveValue, ReactiveView
from .value import Modifier
T = TypeVar("T")
class ReactiveProperty(Generic[T], ReactiveValue[T]):
def __init__(
self,
init_value: Maybe[T] = Nothing,
read_only=False,
modifier: Callable[[Any], Modifier] = lambda _: identity,
validator: Callable[[Any, T], T] = lambda _, v: v) -> None:
super().__init__(read_only)
self._init_value = init_value
self._modifier = modifier
self._validator = validator
@property
def init_value(self) -> Maybe[T]:
return self._init_value
@property
def validator(self) -> Callable[[T, Any], T]:
return self._validator
@property
def modifier(self) -> Callable[[Any], Modifier]:
return self._modifier
def as_view(self) -> ReactiveView[T]:
return ReactiveView(self.context, self.read_only)
def pipe(self, modifiers: Callable[[Any], Tuple[Modifier, ...]]) -> ReactiveProperty:
def stack(obj: Any):
# FIXME: Not sure why both PyCharm and Mypy fails to resolve pipeline.pipe(). Should investigate later.
# noinspection PyUnresolvedReferences
return pipeline.pipe(*([self.modifier(obj)] + list(modifiers(obj)))) # type:ignore
return ReactiveProperty(self.init_value, self.read_only, stack, self.validator)
def validate(self, validator: Callable[[Any, T], T]) -> ReactiveProperty[T]:
if validator is None:
raise ValueError("Argument 'modifier' is required.")
def validate(obj: Any, v: T) -> T:
return validator(obj, self.validator(obj, v))
return ReactiveProperty(self.init_value, self.read_only, self.modifier, validate)
class PropertyData(ReactiveValue.Data[T]):
def __init__(
self,
name: str,
init_value: Maybe[T],
modifier: Modifier,
validator: Callable[[T], T]):
assert name is not None
assert init_value is not None
assert modifier is not None
assert validator is not None
self._validator = validator
self._property: Optional[BehaviorSubject] = None
obs: Observable
if init_value != Nothing:
self._property = BehaviorSubject(init_value.map(validator).unwrap())
obs = self._property
else:
obs = rx.empty()
super().__init__(name, obs, modifier)
# Must override to appease Mypy... I hate Python.
@property
def value(self) -> T:
return super().value
@value.setter
def value(self, value: T):
self._check_disposed()
if self.initialized:
assert self._property is not None
self._property.on_next(self.validator(value))
else:
self._property = BehaviorSubject(self.validator(value))
self.observable = self._property
@property
def validator(self) -> Callable[[T], T]:
return self._validator
def dispose(self) -> None:
assert self._property is not None
self._check_disposed()
self._property.on_completed()
super().dispose()
def _create_data(self, obj: Any) -> PropertyData:
assert obj is not None
assert self.name is not None
def validate(v: T) -> T:
return self.validator(obj, v)
return self.PropertyData(self.name, self.init_value, self.modifier(obj), validate)
def _get_data(self, obj: Any) -> PropertyData:
assert obj is not None
return cast(ReactiveProperty.PropertyData, super()._get_data(obj))
def _set_value(self, obj: Any, data: ReactiveValue.Data, value: Any) -> None:
assert obj is not None
assert isinstance(data, ReactiveProperty.PropertyData)
data.value = value
| [((15, 4, 15, 16), 'typing.TypeVar', 'TypeVar', ({(15, 12, 15, 15): '"""T"""'}, {}), "('T')", False, 'from typing import TypeVar, Generic, Callable, Optional, Any, cast, Tuple\n'), ((89, 22, 89, 32), 'rx.empty', 'rx.empty', ({}, {}), '()', False, 'import rx\n')] |
Siebjee/argo-workflows | sdks/python/client/openapi_client/model/github_com_argoproj_labs_argo_dataflow_api_v1alpha1_dedupe.py | 1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c | """
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from openapi_client.exceptions import ApiAttributeError
def lazy_import():
from openapi_client.model.github_com_argoproj_labs_argo_dataflow_api_v1alpha1_abstract_step import GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep
globals()['GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep'] = GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep
class GithubComArgoprojLabsArgoDataflowApiV1alpha1Dedupe(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'abstract_step': (GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep,), # noqa: E501
'max_size': (str,), # noqa: E501
'uid': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'abstract_step': 'abstractStep', # noqa: E501
'max_size': 'maxSize', # noqa: E501
'uid': 'uid', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GithubComArgoprojLabsArgoDataflowApiV1alpha1Dedupe - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
abstract_step (GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep): [optional] # noqa: E501
max_size (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501
uid (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GithubComArgoprojLabsArgoDataflowApiV1alpha1Dedupe - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
abstract_step (GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep): [optional] # noqa: E501
max_size (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501
uid (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [((161, 18, 168, 13), 'openapi_client.model_utils.ApiTypeError', 'ApiTypeError', (), '', False, 'from openapi_client.model_utils import ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info\n'), ((243, 18, 250, 13), 'openapi_client.model_utils.ApiTypeError', 'ApiTypeError', (), '', False, 'from openapi_client.model_utils import ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info\n'), ((268, 22, 269, 73), 'openapi_client.exceptions.ApiAttributeError', 'ApiAttributeError', ({(268, 40, 269, 72): 'f"""`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes."""'}, {}), "(\n f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.'\n )", False, 'from openapi_client.exceptions import ApiAttributeError\n')] |
csgcmai/cvat | utils/mask/converter.py | 074500de7bf638fdf66f3874b80df9e87d58a746 | #!/usr/bin/env python
#
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, division, print_function
import argparse
import os
import glog as log
import numpy as np
import cv2
from lxml import etree
from tqdm import tqdm
def parse_args():
"""Parse arguments of command line"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description='Convert CVAT XML annotations to masks'
)
parser.add_argument(
'--cvat-xml', metavar='FILE', required=True,
help='input file with CVAT annotation in xml format'
)
parser.add_argument(
'--background-color', metavar='COLOR_BGR', default="0,0,0",
help='specify background color (by default: 0,0,0)'
)
parser.add_argument(
'--label-color', metavar='LABEL:COLOR_BGR', action='append',
default=[],
help="specify a label's color (e.g. 255 or 255,0,0). The color will " +
"be interpreted in accordance with the mask format."
)
parser.add_argument(
'--mask-bitness', type=int, choices=[8, 24], default=8,
help='choose bitness for masks'
)
parser.add_argument(
'--output-dir', metavar='DIRECTORY', required=True,
help='directory for output masks'
)
return parser.parse_args()
def parse_anno_file(cvat_xml):
root = etree.parse(cvat_xml).getroot()
anno = []
for image_tag in root.iter('image'):
image = {}
for key, value in image_tag.items():
image[key] = value
image['shapes'] = []
for poly_tag in image_tag.iter('polygon'):
polygon = {'type': 'polygon'}
for key, value in poly_tag.items():
polygon[key] = value
image['shapes'].append(polygon)
for box_tag in image_tag.iter('box'):
box = {'type': 'box'}
for key, value in box_tag.items():
box[key] = value
box['points'] = "{0},{1};{2},{1};{2},{3};{0},{3}".format(
box['xtl'], box['ytl'], box['xbr'], box['ybr'])
image['shapes'].append(box)
image['shapes'].sort(key=lambda x: int(x.get('z_order', 0)))
anno.append(image)
return anno
def create_mask_file(mask_path, width, height, bitness, color_map, background, shapes):
mask = np.zeros((height, width, bitness // 8), dtype=np.uint8)
for shape in shapes:
color = color_map.get(shape['label'], background)
points = [tuple(map(float, p.split(','))) for p in shape['points'].split(';')]
points = np.array([(int(p[0]), int(p[1])) for p in points])
mask = cv2.fillPoly(mask, [points], color=color)
cv2.imwrite(mask_path, mask)
def to_scalar(str, dim):
scalar = list(map(int, str.split(',')))
if len(scalar) < dim:
scalar.extend([scalar[-1]] * dim)
return tuple(scalar[0:dim])
def main():
args = parse_args()
anno = parse_anno_file(args.cvat_xml)
color_map = {}
dim = args.mask_bitness // 8
for item in args.label_color:
label, color = item.split(':')
color_map[label] = to_scalar(color, dim)
background = to_scalar(args.background_color, dim)
for image in tqdm(anno, desc='Generate masks'):
mask_path = os.path.join(args.output_dir, os.path.splitext(image['name'])[0] + '.png')
mask_dir = os.path.dirname(mask_path)
if mask_dir:
os.makedirs(mask_dir, exist_ok=True)
create_mask_file(mask_path, int(image['width']), int(image['height']),
args.mask_bitness, color_map, background, image['shapes'])
if __name__ == "__main__":
main()
| [((20, 13, 23, 5), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((81, 11, 81, 66), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((88, 4, 88, 32), 'cv2.imwrite', 'cv2.imwrite', ({(88, 16, 88, 25): 'mask_path', (88, 27, 88, 31): 'mask'}, {}), '(mask_path, mask)', False, 'import cv2\n'), ((107, 17, 107, 50), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm\n'), ((87, 15, 87, 56), 'cv2.fillPoly', 'cv2.fillPoly', (), '', False, 'import cv2\n'), ((109, 19, 109, 45), 'os.path.dirname', 'os.path.dirname', ({(109, 35, 109, 44): 'mask_path'}, {}), '(mask_path)', False, 'import os\n'), ((55, 11, 55, 32), 'lxml.etree.parse', 'etree.parse', ({(55, 23, 55, 31): 'cvat_xml'}, {}), '(cvat_xml)', False, 'from lxml import etree\n'), ((111, 12, 111, 48), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((108, 50, 108, 81), 'os.path.splitext', 'os.path.splitext', ({(108, 67, 108, 80): "image['name']"}, {}), "(image['name'])", False, 'import os\n')] |
gkiar/pyAFQ | examples/plot_afq_callosal.py | fb6985c2a9715a378e1ca94dc89f6bc966c60ab5 | """
==========================
Callosal bundles using AFQ API
==========================
An example using the AFQ API to find callosal bundles using the templates from:
http://hdl.handle.net/1773/34926
"""
import os.path as op
import plotly
from AFQ import api
from AFQ.mask import RoiMask
import AFQ.data as afd
##########################################################################
# Get some example data
# ---------------------
#
# Retrieves `Stanford HARDI dataset <https://purl.stanford.edu/ng782rw8378>`_.
#
afd.organize_stanford_data(clear_previous_afq=True)
##########################################################################
# Set tractography parameters (optional)
# ---------------------
# We make this tracking_params which we will pass to the AFQ object
# which specifies that we want 100,000 seeds randomly distributed
# in the ROIs of every bundle.
#
# We only do this to make this example faster and consume less space.
tracking_params = dict(seed_mask=RoiMask(),
n_seeds=10000,
random_seeds=True,
rng_seed=42)
##########################################################################
# Initialize an AFQ object:
# -------------------------
#
# We specify bundle_info as the default bundles list (api.BUNDLES) plus the
# callosal bundle list. This tells the AFQ object to use bundles from both
# the standard and callosal templates.
myafq = api.AFQ(bids_path=op.join(afd.afq_home,
'stanford_hardi'),
dmriprep='vistasoft',
bundle_info=api.BUNDLES + api.CALLOSUM_BUNDLES,
tracking_params=tracking_params)
##########################################################################
# Visualizing bundles and tract profiles:
# ---------------------------------------
# This would run the script and visualize the bundles using the plotly
# interactive visualization, which should automatically open in a
# new browser window.
bundle_html = myafq.viz_bundles(export=True, n_points=50)
plotly.io.show(bundle_html[0])
| [((23, 0, 23, 51), 'AFQ.data.organize_stanford_data', 'afd.organize_stanford_data', (), '', True, 'import AFQ.data as afd\n'), ((60, 0, 60, 30), 'plotly.io.show', 'plotly.io.show', ({(60, 15, 60, 29): 'bundle_html[0]'}, {}), '(bundle_html[0])', False, 'import plotly\n'), ((34, 33, 34, 42), 'AFQ.mask.RoiMask', 'RoiMask', ({}, {}), '()', False, 'from AFQ.mask import RoiMask\n'), ((47, 26, 48, 51), 'os.path.join', 'op.join', ({(47, 34, 47, 46): 'afd.afq_home', (48, 34, 48, 50): '"""stanford_hardi"""'}, {}), "(afd.afq_home, 'stanford_hardi')", True, 'import os.path as op\n')] |
Soldie/Nscan-scanner-ip | latest/probe.py | 4a507ca97a9f8b7f3fa4766c835f108671dbbcd6 | import time
import Queue
import random
import socket
import struct
import logging
import threading
from convert import *
from protocol import ethernet, ip, tcp, udp
ETH_P_IP = 0x0800 # IP protocol
ETH_P_ALL = 0x0003 # Every packet
NSCRIPT_PATH = 'nscript' # NSCRIPT PATH
PAYLOAD = {
53:('\x5d\x0d\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06'
'google\x03com\x00\x00\x01\x00\x01'), # 'google.com' DNS Lookup
161:('\x30\x26\x02\x01\x01\x04\x06public\xa1\x19\x02'
'\x04\x56\x9f\x5a\xdd\x02\x01\x00\x02\x01\x00\x30\x0b\x30\x09\x06'
'\x05\x2b\x06\x01\x02\x01\x05\x00'), # SNMP GetNextRequest|public|2c version|1.3.6.1.2.1
123:('\x17\x00\x02\x05'), # NTP systats commands lacks 38 null bytes (just to save bandwidth)
1900:('M-SEARCH * HTTP/1.1\r\nHOST: 239.255.255.250:1900\r\n'
'MAN: "ssdp:discover"\r\nMX: 2\r\nST: ssdp:all\r\n\r\n')
}
class Generator(object):
def __init__(self, size):
self.size = size
self.inc = size/4
if self.inc<1:
self.inc = 1
self.base = -self.inc
self.num = self.base
self.index = 0
def __iter__(self):
return self
def next(self):
if (self.num+self.inc)>=self.size:
self.next_index()
self.next_base()
self.num = self.num + self.inc
return self.num
def next_base(self):
self.base = 0
self.base-= self.index
self.num = self.base
def next_index(self):
self.index+=1
if self.index>=self.inc:
raise StopIteration
def suspend(self):
return self.size, self.inc, self.base, self.num, self.index
def resume(self, size, inc, base, num, index):
self.size = size
self.inc = inc
self.base = base
self.num = num
self.index = index
class ScriptEngine(object):
def __init__(self, imports):
self.imports = imports
self.event = threading.Event()
self.queues = {}
self.thread = []
def Load(self):
for script in self.imports:
q = Queue.Queue()
s = __import__('{}.{}'.format(NSCRIPT_PATH, script),
fromlist=[NSCRIPT_PATH])
t = threading.Thread(target=s.run,
args=(q, self.event))
self.thread.append(t)
t.setDaemon(True)
t.start()
self.queues[script] = q
def Feed(self, host, port):
for scr in self.imports:
for r in self.imports[scr]:
if port in xrange(r[0], r[1]):
self.queues[scr].put((host, port))
break
def Cleanup(self):
while Alive(self.thread):
time.sleep(10)
class nscan(object):
def __init__(self, options):
self.options = options
self.hosts = self.split(options.hosts, options.threads)
self.ports = options.ports
self.srcp = random.randint(1, 65535)#self.PickPort() # source port
self.smac = options.smac
self.dmac = options.dmac
self.ifname = options.ifname
self.siface = options.siface
self.diface = options.diface
self.banner = options.banner
self.count = options.count
self.cooldown = options.cooldown
self.queue = Queue.Queue()
if options.stype.upper()=='U':
self.stype = socket.IPPROTO_UDP
else:
self.stype = socket.IPPROTO_TCP
self.events = {
'send': threading.Event(),
'recv': threading.Event()}
self.threads = {
'send': [],
'recv': None}
def __Transport(self, src, dst=0):
if self.stype==socket.IPPROTO_TCP:
transport = tcp.TCP(src, dst)
transport.seqn = 0xDEADC0DE
else:
transport = udp.UDP(src, dst)
return transport
def __Pack(self, transport, src, dst):
if self.stype==socket.IPPROTO_TCP:
transport.payload = ''
else:
transport.payload = PAYLOAD.get(transport.dstp, '\x00\r\n\r\n')
packed = transport.pack(src, dst)
return packed + transport.payload
def __CookieCheck(self, data):
check = False
dstp = struct.unpack('!H', data[22:24])[0]
if self.stype==socket.IPPROTO_UDP:
if dstp==self.srcp:
check = True
else:
ackn = struct.unpack('!L', data[28:32])[0]
flags = struct.unpack('B', data[33])[0] & 0b010010 # SYN-ACK
if dstp==self.srcp and ackn==0xDEADC0DF and flags==18:
check = True
return check
def init(self):
generators = []
for h in self.hosts:
g = Generator(h[1]-h[0])
generators.append(g)
t = threading.Thread(target=self.send, args=(h, self.srcp, g))
t.setDaemon(True)
self.threads['send'].append(t)
t = threading.Thread(target=self.recv)
t.setDaemon(True)
self.threads['recv'] = t
if 'resume' in dir(self.options):
i = 0
for g in generators:
g.resume(*self.options.indexes[i])
i+=1
return self.threads, self.events, self.queue, generators
def run(self):
self.events['send'].set()
self.events['recv'].set()
for t in self.threads['send']:
t.start()
self.threads['recv'].start()
def send(self, hosts, srcp, gen):
if 'ppp' in self.ifname:
family = socket.AF_INET
proto = socket.IPPROTO_RAW
eth = ''
else:
family = socket.AF_PACKET
proto = ETH_P_IP
eth = ethernet.ETHER(mac2byte(self.smac), mac2byte(self.dmac), ETH_P_IP).pack()
sock = socket.socket(family, socket.SOCK_RAW, proto)
transport = self.__Transport(srcp, 0)
npacket = 0
self.events['send'].wait()
target = hosts[0]
while self.events['send'].isSet():
try:
target = hosts[0] + gen.next()
iph = ip.IP(self.diface, dec2dot(target), self.stype)
except StopIteration:
break
for port_list in self.ports:
for port in range(port_list[0], port_list[1]):
if self.events['send'].isSet():
transport.dstp = port
packet = eth + iph.pack() + self.__Pack(transport, iph.src, iph.dst) #tcph.pack(iph.src, iph.dst)
sock.sendto(packet, (dec2dot(target), 0)) # self.ifname
npacket+=1
if not npacket%self.cooldown[0]:
time.sleep(self.cooldown[1])
else:
break
logging.info('[SEND] Sent: {} packets'.format(npacket))
sock.close()
def recv(self):
sock = socket.socket(socket.AF_INET,
socket.SOCK_RAW,
self.stype)
sock.bind(('', self.srcp))
sock.settimeout(5)
self.events['recv'].wait()
counter = 0
while self.events['recv'].isSet():
try:
data, sa_ll = sock.recvfrom(65535)
if self.__CookieCheck(data):
self.queue.put(Extract(data))
counter += 1
if counter==self.count:
self.events['send'].clear()
break
except socket.timeout:
continue
sock.close()
logging.info('[RECV] Received: {} packets'.format(counter))
def split(self, hosts, n):
'''
Split host range into n parts (multithreaded)
'''
nhosts = hosts[1] - hosts[0] # number of hosts
nparts = nhosts/n + 1
host_parts = []
start = hosts[0]
while True:
if len(host_parts)<n-1:
end = start + nparts
host_parts.append((start, end))
start = end
else:
host_parts.append((start, hosts[1]))
break
return host_parts
def PickPort(self):
while True:
srcp = random.randrange(10000, 65535)
if srcp not in self.sport:
self.sport.append(srcp)
break
return srcp
def Extract(packet):
src = socket.inet_ntoa(packet[12:16])
srcp = struct.unpack('!H', packet[20:22])[0]
return src, srcp
def Alive(thread_list):
''' check if thread is alive '''
alive = False
for t in thread_list:
if t.isAlive():
alive = True
break
return alive
| [] |
dstambler17/Parsy.io | parsy-backend/flaskApp/assignment/views.py | 14c4905809f79f191efbbbdfbd0e8d9e838478e7 | import sys
from flask import Blueprint, request, jsonify
from flaskApp import db
from flaskApp.assignment.utils import *
from flaskApp.error.error_handlers import *
import json
from flaskApp.helpers import getAssignmentData
assignment = Blueprint('assignment', __name__)
@assignment.route('/restoreAssignment/<calID>/<courseID>', methods=['POST'])
def restore_assignment(calID, courseID):
try:
DbAssignmentUtils.restore_all_original_assignment(calID, courseID)
return jsonify({"restore" : "success"}), 201
except (NotFound) as e:
return jsonify(e.body), e.status_code
@assignment.route('/getAssignment/<calID>/<courseID>/<assignment>', methods=['GET'])
def get_assignment_details(calID, courseID, assignment):
try:
res = DbAssignmentUtils.get_assignment_slot_details(calID, courseID, assignment)
return jsonify(res), 200
except (NotFound) as e:
return jsonify(e.body), e.status_code
@assignment.route('/deleteAssignment/<calID>/<courseID>', methods=['DELETE'])
def delete_assignment(calID, courseID):
try:
request_body = json.loads(request.get_data())
DbAssignmentUtils.delete_assignment_slot(calID, courseID, request_body)
return jsonify({}), 204
except (NotFound, BadRequest) as e:
return jsonify(e.body), e.status_code
@assignment.route('/addAssignment/<calID>/<courseID>', methods=['POST'])
def add_assignment(calID, courseID):
try:
request_body = json.loads(request.get_data())
res = DbAssignmentUtils.add_Assignment_slot(calID, courseID, request_body)
return jsonify(res), 201
except (NotFound, BadRequest, ValidationFailed) as e:
return jsonify(e.body), e.status_code
'''Test method, keep just in case. Will prob be moved to seperate API designed to
interact with just the MySQL database that the data pipeline will drop stuff into'''
@assignment.route('/getAssignmentTest/<courseID>', methods=['GET'])
def get_session_assignment(courseID):
try:
result = getAssignmentData(courseID)
return jsonify(result)
except (NotFound) as e:
return jsonify(e.body), e.status_code
| [((9, 13, 9, 46), 'flask.Blueprint', 'Blueprint', ({(9, 23, 9, 35): '"""assignment"""', (9, 37, 9, 45): '__name__'}, {}), "('assignment', __name__)", False, 'from flask import Blueprint, request, jsonify\n'), ((50, 17, 50, 44), 'flaskApp.helpers.getAssignmentData', 'getAssignmentData', ({(50, 35, 50, 43): 'courseID'}, {}), '(courseID)', False, 'from flaskApp.helpers import getAssignmentData\n'), ((51, 15, 51, 30), 'flask.jsonify', 'jsonify', ({(51, 23, 51, 29): 'result'}, {}), '(result)', False, 'from flask import Blueprint, request, jsonify\n'), ((15, 15, 15, 47), 'flask.jsonify', 'jsonify', ({(15, 23, 15, 46): "{'restore': 'success'}"}, {}), "({'restore': 'success'})", False, 'from flask import Blueprint, request, jsonify\n'), ((23, 15, 23, 27), 'flask.jsonify', 'jsonify', ({(23, 23, 23, 26): 'res'}, {}), '(res)', False, 'from flask import Blueprint, request, jsonify\n'), ((30, 34, 30, 52), 'flask.request.get_data', 'request.get_data', ({}, {}), '()', False, 'from flask import Blueprint, request, jsonify\n'), ((32, 15, 32, 26), 'flask.jsonify', 'jsonify', ({(32, 23, 32, 25): '{}'}, {}), '({})', False, 'from flask import Blueprint, request, jsonify\n'), ((39, 34, 39, 52), 'flask.request.get_data', 'request.get_data', ({}, {}), '()', False, 'from flask import Blueprint, request, jsonify\n'), ((41, 15, 41, 27), 'flask.jsonify', 'jsonify', ({(41, 23, 41, 26): 'res'}, {}), '(res)', False, 'from flask import Blueprint, request, jsonify\n'), ((17, 15, 17, 30), 'flask.jsonify', 'jsonify', ({(17, 23, 17, 29): 'e.body'}, {}), '(e.body)', False, 'from flask import Blueprint, request, jsonify\n'), ((25, 15, 25, 30), 'flask.jsonify', 'jsonify', ({(25, 23, 25, 29): 'e.body'}, {}), '(e.body)', False, 'from flask import Blueprint, request, jsonify\n'), ((34, 15, 34, 30), 'flask.jsonify', 'jsonify', ({(34, 23, 34, 29): 'e.body'}, {}), '(e.body)', False, 'from flask import Blueprint, request, jsonify\n'), ((43, 15, 43, 30), 'flask.jsonify', 'jsonify', ({(43, 23, 43, 29): 'e.body'}, {}), '(e.body)', False, 'from flask import Blueprint, request, jsonify\n'), ((53, 15, 53, 30), 'flask.jsonify', 'jsonify', ({(53, 23, 53, 29): 'e.body'}, {}), '(e.body)', False, 'from flask import Blueprint, request, jsonify\n')] |
dharmik-thakkar/dsapatterns | python/patterns/slidingwindow/longest_substring_no_repeating_char.py | fc5890a86c5d49097b73b6afd14e1a4e81cff7a0 | #######################################################################################################################
# Given a string, find the length of the longest substring which has no repeating characters.
#
# Input: String="aabccbb"
# Output: 3
# Explanation: The longest substring without any repeating characters is "abc".
#
# Input: String="abbbb"
# Output: 2
# Explanation: The longest substring without any repeating characters is "ab".
#
# Input: String="abccde"
# Output: 3
# Explanation: Longest substrings without any repeating characters are "abc" & "cde".
#######################################################################################################################
def longest_substring_no_repeating_char(input_str: str) -> int:
window_start = 0
is_present = [None for i in range(26)]
max_window = 0
for i in range(len(input_str)):
char_ord = ord(input_str[i]) - 97
if is_present[char_ord] is not None:
window_start = max(window_start, is_present[char_ord] + 1)
is_present[char_ord] = i
max_window = max(max_window, i - window_start + 1)
return max_window
print(longest_substring_no_repeating_char('aabccbb'))
print(longest_substring_no_repeating_char('abbbb'))
print(longest_substring_no_repeating_char('abccde'))
print(longest_substring_no_repeating_char('abcabcbb'))
print(longest_substring_no_repeating_char('bbbbb'))
print(longest_substring_no_repeating_char('pwwkew'))
| [] |
jrderek/Big_Data_Engineering_Portfolio | Apache Spark with Python - Big Data with PySpark and Spark/6-PairRDD/filter/AirportsNotInUsa.py | bf7a5efb24f2c6e860e5ead544dadc08f791814e | import sys
sys.path.insert(0, '.')
from pyspark import SparkContext, SparkConf
from commons.Utils import Utils
if __name__ == "__main__":
'''
Create a Spark program to read the airport data from in/airports.text;
generate a pair RDD with airport name being the key and country name being the value.
Then remove all the airports which are located in United States and output the pair RDD to out/airports_not_in_usa_pair_rdd.text
Each row of the input file contains the following columns:
Airport ID, Name of airport, Main city served by airport, Country where airport is located,
IATA/FAA code, ICAO Code, Latitude, Longitude, Altitude, Timezone, DST, Timezone in Olson format
Sample output:
("Kamloops", "Canada")
("Wewak Intl", "Papua New Guinea")
...
'''
conf = SparkConf().setAppName("airports").setMaster("local[*]")
sc = SparkContext(conf=conf)
airportsRDD = sc.textFile("inputs/airports.text")
airportPairRDD = airportsRDD.map(lambda line:
(Utils.COMMA_DELIMITER.split(line)[1],
Utils.COMMA_DELIMITER.split(line)[3]))
airportsNotInUSA = airportPairRDD.filter(
lambda keyValue: keyValue[1] != "\"United States\"")
airportsNotInUSA.saveAsTextFile(
"outputs/airports_not_in_usa_pair_rdd.text")
| [((2, 0, 2, 23), 'sys.path.insert', 'sys.path.insert', ({(2, 16, 2, 17): '(0)', (2, 19, 2, 22): '"""."""'}, {}), "(0, '.')", False, 'import sys\n'), ((28, 9, 28, 32), 'pyspark.SparkContext', 'SparkContext', (), '', False, 'from pyspark import SparkContext, SparkConf\n'), ((27, 11, 27, 22), 'pyspark.SparkConf', 'SparkConf', ({}, {}), '()', False, 'from pyspark import SparkContext, SparkConf\n'), ((33, 38, 33, 71), 'commons.Utils.Utils.COMMA_DELIMITER.split', 'Utils.COMMA_DELIMITER.split', ({(33, 66, 33, 70): 'line'}, {}), '(line)', False, 'from commons.Utils import Utils\n'), ((34, 38, 34, 71), 'commons.Utils.Utils.COMMA_DELIMITER.split', 'Utils.COMMA_DELIMITER.split', ({(34, 66, 34, 70): 'line'}, {}), '(line)', False, 'from commons.Utils import Utils\n')] |
srl295/keyman | linux/keyman-config/keyman_config/keyboard_details.py | 4dfd0f71f3f4ccf81d1badbd824900deee1bb6d1 | #!/usr/bin/python3
# Keyboard details window
import logging
import json
from os import path
import qrcode
import tempfile
import gi
from gi.repository import Gtk
from keyman_config import KeymanComUrl, _, secure_lookup
from keyman_config.accelerators import init_accel
from keyman_config.kmpmetadata import parsemetadata
gi.require_version('Gtk', '3.0')
# basics: keyboard name, package version, description
# other things: filename (of kmx), ,
# OSK availability, documentation availability, package copyright
# also: supported languages, fonts
# from kmx?: keyboard version, encoding, layout type
# there is data in kmp.inf/kmp.json
# there is possibly data in kbid.json (downloaded from api)
class KeyboardDetailsView(Gtk.Dialog):
# TODO Display all the information that is available
# especially what is displayed for Keyman on Windows
# TODO clean up file once have what we want
def __init__(self, parent, kmp):
# kmp has name, version, packageID, area
if "keyboard" in kmp["name"].lower():
wintitle = kmp["name"]
else:
wintitle = _("{name} keyboard").format(name=kmp["name"])
Gtk.Dialog.__init__(self, wintitle, parent)
init_accel(self)
self.set_border_width(6)
packageDir = path.join(kmp['areapath'], kmp['packageID'])
kmp_json = path.join(packageDir, "kmp.json")
info, system, options, keyboards, files = parsemetadata(kmp_json)
if info is None:
# Dialog when invalid metadata
self.add_button(_("_Close"), Gtk.ResponseType.CLOSE)
grid = Gtk.Grid()
self.get_content_area().pack_start(grid, True, True, 12)
lbl_invalid_metadata = Gtk.Label()
lbl_invalid_metadata.set_text(_("ERROR: Keyboard metadata is damaged.\nPlease \"Uninstall\" and then \"Install\" the keyboard."))
lbl_invalid_metadata.set_halign(Gtk.Align.END)
grid.add(lbl_invalid_metadata)
self.resize(700, 200)
self.show_all()
return
kbdata = None
jsonfile = path.join(packageDir, kmp['packageID'] + ".json")
if path.isfile(jsonfile):
try:
with open(jsonfile, "r") as read_file:
kbdata = json.load(read_file)
except Exception as e:
logging.warning('Exception %s reading %s %s', type(e), jsonfile, e.args)
grid = Gtk.Grid()
# grid.set_column_homogeneous(True)
# kbdatapath = path.join("/usr/local/share/keyman", kmp["id"], kmp["id"] + ".json")
# Package info
lbl_pkg_name = Gtk.Label()
lbl_pkg_name.set_text(_("Package name: "))
lbl_pkg_name.set_halign(Gtk.Align.END)
grid.add(lbl_pkg_name)
prevlabel = lbl_pkg_name
label = Gtk.Label()
if secure_lookup(info, 'name', 'description'):
label.set_text(secure_lookup(info, 'name', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_name, Gtk.PositionType.RIGHT, 1, 1)
lbl_pkg_id = Gtk.Label()
lbl_pkg_id.set_text(_("Package id: "))
lbl_pkg_id.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_id, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_id
label = Gtk.Label()
if secure_lookup(kmp, 'packageID'):
label.set_text(kmp['packageID'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_id, Gtk.PositionType.RIGHT, 1, 1)
lbl_pkg_vrs = Gtk.Label()
lbl_pkg_vrs.set_text(_("Package version: "))
lbl_pkg_vrs.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_vrs, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_vrs
label = Gtk.Label()
if secure_lookup(info, 'version', 'description'):
label.set_text(secure_lookup(info, 'version', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_vrs, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(kbdata, 'description'):
lbl_pkg_desc = Gtk.Label()
lbl_pkg_desc.set_text(_("Package description: "))
lbl_pkg_desc.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_desc, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_desc
label = Gtk.Label()
label.set_text(kbdata['description'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
label.set_line_wrap(80)
grid.attach_next_to(label, lbl_pkg_desc, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(info, "author"):
lbl_pkg_auth = Gtk.Label()
lbl_pkg_auth.set_text(_("Package author: "))
lbl_pkg_auth.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_auth, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_auth
label = Gtk.Label()
if secure_lookup(info, 'author', 'description'):
label.set_text(secure_lookup(info, 'author', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_auth, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(info, "copyright"):
lbl_pkg_cpy = Gtk.Label()
lbl_pkg_cpy.set_text(_("Package copyright: "))
lbl_pkg_cpy.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_cpy, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_cpy
label = Gtk.Label()
if secure_lookup(info, 'copyright', 'description'):
label.set_text(secure_lookup(info, 'copyright', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_cpy, Gtk.PositionType.RIGHT, 1, 1)
# Padding and full width horizontal divider
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
divider_pkg = Gtk.HSeparator()
grid.attach_next_to(divider_pkg, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = divider_pkg
# Keyboard info for each keyboard
if keyboards:
for kbd in keyboards:
kbdata = None
jsonfile = path.join(packageDir, kbd['id'] + ".json")
if path.isfile(jsonfile):
try:
with open(jsonfile, "r") as read_file:
kbdata = json.load(read_file)
except Exception as e:
logging.warning('Exception %s reading %s %s', type(e), jsonfile, e.args)
# start with padding
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
# show the icon somewhere
lbl_kbd_file = Gtk.Label()
lbl_kbd_file.set_text(_("Keyboard filename: "))
lbl_kbd_file.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_file, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_file
label = Gtk.Label()
label.set_text(path.join(packageDir, kbd['id'] + ".kmx"))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_file, Gtk.PositionType.RIGHT, 1, 1)
if kbdata and secure_lookup(kbdata, 'id') != secure_lookup(kmp, 'packageID'):
lbl_kbd_name = Gtk.Label()
lbl_kbd_name.set_text(_("Keyboard name: "))
lbl_kbd_name.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_name, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_name
label = Gtk.Label()
if secure_lookup(kbdata, 'name'):
label.set_text(kbdata['name'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_name, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_id = Gtk.Label()
lbl_kbd_id.set_text(_("Keyboard id: "))
lbl_kbd_id.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_id, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_id
label = Gtk.Label()
if secure_lookup(kbdata, 'id'):
label.set_text(kbdata['id'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_id, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_vrs = Gtk.Label()
lbl_kbd_vrs.set_text(_("Keyboard version: "))
lbl_kbd_vrs.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_vrs, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_vrs
label = Gtk.Label()
if secure_lookup(kbdata, 'version'):
label.set_text(kbdata['version'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_vrs, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(info, "author"):
lbl_kbd_auth = Gtk.Label()
lbl_kbd_auth.set_text(_("Keyboard author: "))
lbl_kbd_auth.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_auth, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_auth
label = Gtk.Label()
if secure_lookup(kbdata, 'authorName'):
label.set_text(kbdata['authorName'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_auth, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_lic = Gtk.Label()
lbl_kbd_lic.set_text(_("Keyboard license: "))
lbl_kbd_lic.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_lic, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_lic
label = Gtk.Label()
if secure_lookup(kbdata, 'license'):
label.set_text(kbdata['license'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_lic, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_desc = Gtk.Label()
lbl_kbd_desc.set_text(_("Keyboard description: "))
lbl_kbd_desc.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_desc, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_desc
label = Gtk.Label()
if secure_lookup(kbdata, 'description'):
label.set_text(kbdata['description'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
label.set_line_wrap(80)
grid.attach_next_to(label, lbl_kbd_desc, Gtk.PositionType.RIGHT, 1, 1)
# Padding and full width horizontal divider
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
divider_pkg = Gtk.HSeparator()
grid.attach_next_to(divider_pkg, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
# label7 = Gtk.Label()
# label7.set_text(_("On Screen Keyboard: "))
# label7.set_halign(Gtk.Align.END)
# grid.attach_next_to(label7, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
# prevlabel = label7
# # label = Gtk.Label()
# # label.set_text(secure_lookup(info, 'version', 'description'))
# # label.set_halign(Gtk.Align.START)
# # label.set_selectable(True)
# # grid.attach_next_to(label, label7, Gtk.PositionType.RIGHT, 1, 1)
# label8 = Gtk.Label()
# label8.set_text(_("Documentation: "))
# label8.set_halign(Gtk.Align.END)
# grid.attach_next_to(label8, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
# prevlabel = label8
# #TODO need to know which area keyboard is installed in to show this
# # label = Gtk.Label()
# # welcome_file = path.join("/usr/local/share/doc/keyman", kmp["id"], "welcome.htm")
# # if path.isfile(welcome_file):
# # label.set_text(_("Installed"))
# # else:
# # label.set_text(_("Not installed"))
# # label.set_halign(Gtk.Align.START)
# # label.set_selectable(True)
# # grid.attach_next_to(label, label8, Gtk.PositionType.RIGHT, 1, 1)
# label9 = Gtk.Label()
# # stored in kmx
# label9.set_text(_("Message: "))
# label9.set_halign(Gtk.Align.END)
# grid.attach_next_to(label9, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
# prevlabel = label9
# label = Gtk.Label()
# label.set_line_wrap(True)
# label.set_text(
# "This keyboard is distributed under the MIT license (MIT) as described somewhere")
# #label.set_text(kmp["description"])
# label.set_halign(Gtk.Align.START)
# label.set_selectable(True)
# grid.attach_next_to(label, label9, Gtk.PositionType.RIGHT, 1, 1)
# Add an entire row of padding
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
# If it doesn't exist, generate QR code to share keyboard package
path_qr = path.join(tempfile.gettempdir(), kmp['packageID'] + '_qrcode.png')
url = KeymanComUrl + "/go/keyboard/" + kmp['packageID'] + "/share"
if not path.isfile(path_qr):
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=4,
border=4)
qr.add_data(url)
qr.make(fit=True)
img = qr.make_image()
img.save(path_qr)
# Display QR Code, spanning 2 columns so it will be centered
image = Gtk.Image()
image.set_from_file(path_qr)
grid.attach_next_to(image, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
lbl_share_kbd = Gtk.Label()
lbl_share_kbd.set_markup(_("Scan this code to load this keyboard\non another device or <a href='{uri}'>share online</a>").format(uri=url))
lbl_share_kbd.set_halign(Gtk.Align.CENTER)
lbl_share_kbd.set_line_wrap(True)
grid.attach_next_to(lbl_share_kbd, image, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_share_kbd
self.add_button(_("_Close"), Gtk.ResponseType.CLOSE)
self.get_content_area().pack_start(grid, True, True, 12)
self.resize(800, 450)
self.show_all()
| [((18, 0, 18, 32), 'gi.require_version', 'gi.require_version', ({(18, 19, 18, 24): '"""Gtk"""', (18, 26, 18, 31): '"""3.0"""'}, {}), "('Gtk', '3.0')", False, 'import gi\n'), ((40, 8, 40, 51), 'gi.repository.Gtk.Dialog.__init__', 'Gtk.Dialog.__init__', ({(40, 28, 40, 32): 'self', (40, 34, 40, 42): 'wintitle', (40, 44, 40, 50): 'parent'}, {}), '(self, wintitle, parent)', False, 'from gi.repository import Gtk\n'), ((41, 8, 41, 24), 'keyman_config.accelerators.init_accel', 'init_accel', ({(41, 19, 41, 23): 'self'}, {}), '(self)', False, 'from keyman_config.accelerators import init_accel\n'), ((45, 21, 45, 65), 'os.path.join', 'path.join', ({(45, 31, 45, 46): "kmp['areapath']", (45, 48, 45, 64): "kmp['packageID']"}, {}), "(kmp['areapath'], kmp['packageID'])", False, 'from os import path\n'), ((46, 19, 46, 52), 'os.path.join', 'path.join', ({(46, 29, 46, 39): 'packageDir', (46, 41, 46, 51): '"""kmp.json"""'}, {}), "(packageDir, 'kmp.json')", False, 'from os import path\n'), ((47, 50, 47, 73), 'keyman_config.kmpmetadata.parsemetadata', 'parsemetadata', ({(47, 64, 47, 72): 'kmp_json'}, {}), '(kmp_json)', False, 'from keyman_config.kmpmetadata import parsemetadata\n'), ((63, 19, 63, 68), 'os.path.join', 'path.join', ({(63, 29, 63, 39): 'packageDir', (63, 41, 63, 67): "kmp['packageID'] + '.json'"}, {}), "(packageDir, kmp['packageID'] + '.json')", False, 'from os import path\n'), ((64, 11, 64, 32), 'os.path.isfile', 'path.isfile', ({(64, 23, 64, 31): 'jsonfile'}, {}), '(jsonfile)', False, 'from os import path\n'), ((71, 15, 71, 25), 'gi.repository.Gtk.Grid', 'Gtk.Grid', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((78, 23, 78, 34), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((83, 16, 83, 27), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((84, 11, 84, 53), 'keyman_config.secure_lookup', 'secure_lookup', ({(84, 25, 84, 29): 'info', (84, 31, 84, 37): '"""name"""', (84, 39, 84, 52): '"""description"""'}, {}), "(info, 'name', 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((90, 21, 90, 32), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((95, 16, 95, 27), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((96, 11, 96, 42), 'keyman_config.secure_lookup', 'secure_lookup', ({(96, 25, 96, 28): 'kmp', (96, 30, 96, 41): '"""packageID"""'}, {}), "(kmp, 'packageID')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((102, 22, 102, 33), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((107, 16, 107, 27), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((108, 11, 108, 56), 'keyman_config.secure_lookup', 'secure_lookup', ({(108, 25, 108, 29): 'info', (108, 31, 108, 40): '"""version"""', (108, 42, 108, 55): '"""description"""'}, {}), "(info, 'version', 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((114, 11, 114, 47), 'keyman_config.secure_lookup', 'secure_lookup', ({(114, 25, 114, 31): 'kbdata', (114, 33, 114, 46): '"""description"""'}, {}), "(kbdata, 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((127, 11, 127, 40), 'keyman_config.secure_lookup', 'secure_lookup', ({(127, 25, 127, 29): 'info', (127, 31, 127, 39): '"""author"""'}, {}), "(info, 'author')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((140, 11, 140, 43), 'keyman_config.secure_lookup', 'secure_lookup', ({(140, 25, 140, 29): 'info', (140, 31, 140, 42): '"""copyright"""'}, {}), "(info, 'copyright')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((154, 18, 154, 29), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((160, 22, 160, 38), 'gi.repository.Gtk.HSeparator', 'Gtk.HSeparator', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((325, 18, 325, 29), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((347, 16, 347, 27), 'gi.repository.Gtk.Image', 'Gtk.Image', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((351, 24, 351, 35), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((52, 19, 52, 29), 'gi.repository.Gtk.Grid', 'Gtk.Grid', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((54, 35, 54, 46), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((79, 30, 79, 51), 'keyman_config._', '_', ({(79, 32, 79, 50): '"""Package name: """'}, {}), "('Package name: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((91, 28, 91, 47), 'keyman_config._', '_', ({(91, 30, 91, 46): '"""Package id: """'}, {}), "('Package id: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((103, 29, 103, 53), 'keyman_config._', '_', ({(103, 31, 103, 52): '"""Package version: """'}, {}), "('Package version: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((115, 27, 115, 38), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((120, 20, 120, 31), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((128, 27, 128, 38), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((133, 20, 133, 31), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((134, 15, 134, 59), 'keyman_config.secure_lookup', 'secure_lookup', ({(134, 29, 134, 33): 'info', (134, 35, 134, 43): '"""author"""', (134, 45, 134, 58): '"""description"""'}, {}), "(info, 'author', 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((141, 26, 141, 37), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((146, 20, 146, 31), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((147, 15, 147, 62), 'keyman_config.secure_lookup', 'secure_lookup', ({(147, 29, 147, 33): 'info', (147, 35, 147, 46): '"""copyright"""', (147, 48, 147, 61): '"""description"""'}, {}), "(info, 'copyright', 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((332, 28, 332, 49), 'tempfile.gettempdir', 'tempfile.gettempdir', ({}, {}), '()', False, 'import tempfile\n'), ((334, 15, 334, 35), 'os.path.isfile', 'path.isfile', ({(334, 27, 334, 34): 'path_qr'}, {}), '(path_qr)', False, 'from os import path\n'), ((335, 17, 339, 25), 'qrcode.QRCode', 'qrcode.QRCode', (), '', False, 'import qrcode\n'), ((358, 24, 358, 35), 'keyman_config._', '_', ({(358, 26, 358, 34): '"""_Close"""'}, {}), "('_Close')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((51, 28, 51, 39), 'keyman_config._', '_', ({(51, 30, 51, 38): '"""_Close"""'}, {}), "('_Close')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((55, 42, 55, 140), 'keyman_config._', '_', ({(55, 44, 55, 139): '"""ERROR: Keyboard metadata is damaged.\nPlease "Uninstall" and then "Install" the keyboard."""'}, {}), '("""ERROR: Keyboard metadata is damaged.\nPlease "Uninstall" and then "Install" the keyboard."""\n )', False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((85, 27, 85, 69), 'keyman_config.secure_lookup', 'secure_lookup', ({(85, 41, 85, 45): 'info', (85, 47, 85, 53): '"""name"""', (85, 55, 85, 68): '"""description"""'}, {}), "(info, 'name', 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((109, 27, 109, 72), 'keyman_config.secure_lookup', 'secure_lookup', ({(109, 41, 109, 45): 'info', (109, 47, 109, 56): '"""version"""', (109, 58, 109, 71): '"""description"""'}, {}), "(info, 'version', 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((116, 34, 116, 62), 'keyman_config._', '_', ({(116, 36, 116, 61): '"""Package description: """'}, {}), "('Package description: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((129, 34, 129, 57), 'keyman_config._', '_', ({(129, 36, 129, 56): '"""Package author: """'}, {}), "('Package author: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((142, 33, 142, 59), 'keyman_config._', '_', ({(142, 35, 142, 58): '"""Package copyright: """'}, {}), "('Package copyright: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((169, 27, 169, 69), 'os.path.join', 'path.join', ({(169, 37, 169, 47): 'packageDir', (169, 49, 169, 68): "kbd['id'] + '.json'"}, {}), "(packageDir, kbd['id'] + '.json')", False, 'from os import path\n'), ((170, 19, 170, 40), 'os.path.isfile', 'path.isfile', ({(170, 31, 170, 39): 'jsonfile'}, {}), '(jsonfile)', False, 'from os import path\n'), ((178, 26, 178, 37), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((186, 31, 186, 42), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((191, 24, 191, 35), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((39, 23, 39, 43), 'keyman_config._', '_', ({(39, 25, 39, 42): '"""{name} keyboard"""'}, {}), "('{name} keyboard')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((67, 29, 67, 49), 'json.load', 'json.load', ({(67, 39, 67, 48): 'read_file'}, {}), '(read_file)', False, 'import json\n'), ((135, 31, 135, 75), 'keyman_config.secure_lookup', 'secure_lookup', ({(135, 45, 135, 49): 'info', (135, 51, 135, 59): '"""author"""', (135, 61, 135, 74): '"""description"""'}, {}), "(info, 'author', 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((148, 31, 148, 78), 'keyman_config.secure_lookup', 'secure_lookup', ({(148, 45, 148, 49): 'info', (148, 51, 148, 62): '"""copyright"""', (148, 64, 148, 77): '"""description"""'}, {}), "(info, 'copyright', 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((187, 38, 187, 64), 'keyman_config._', '_', ({(187, 40, 187, 63): '"""Keyboard filename: """'}, {}), "('Keyboard filename: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((192, 31, 192, 72), 'os.path.join', 'path.join', ({(192, 41, 192, 51): 'packageDir', (192, 53, 192, 71): "(kbd['id'] + '.kmx')"}, {}), "(packageDir, kbd['id'] + '.kmx')", False, 'from os import path\n'), ((198, 35, 198, 46), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((203, 28, 203, 39), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((204, 23, 204, 52), 'keyman_config.secure_lookup', 'secure_lookup', ({(204, 37, 204, 43): 'kbdata', (204, 45, 204, 51): '"""name"""'}, {}), "(kbdata, 'name')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((210, 33, 210, 44), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((215, 28, 215, 39), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((216, 23, 216, 50), 'keyman_config.secure_lookup', 'secure_lookup', ({(216, 37, 216, 43): 'kbdata', (216, 45, 216, 49): '"""id"""'}, {}), "(kbdata, 'id')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((222, 34, 222, 45), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((227, 28, 227, 39), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((228, 23, 228, 55), 'keyman_config.secure_lookup', 'secure_lookup', ({(228, 37, 228, 43): 'kbdata', (228, 45, 228, 54): '"""version"""'}, {}), "(kbdata, 'version')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((234, 23, 234, 52), 'keyman_config.secure_lookup', 'secure_lookup', ({(234, 37, 234, 41): 'info', (234, 43, 234, 51): '"""author"""'}, {}), "(info, 'author')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((247, 34, 247, 45), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((252, 28, 252, 39), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((253, 23, 253, 55), 'keyman_config.secure_lookup', 'secure_lookup', ({(253, 37, 253, 43): 'kbdata', (253, 45, 253, 54): '"""license"""'}, {}), "(kbdata, 'license')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((259, 35, 259, 46), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((264, 28, 264, 39), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((265, 23, 265, 59), 'keyman_config.secure_lookup', 'secure_lookup', ({(265, 37, 265, 43): 'kbdata', (265, 45, 265, 58): '"""description"""'}, {}), "(kbdata, 'description')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((273, 30, 273, 41), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((279, 34, 279, 50), 'gi.repository.Gtk.HSeparator', 'Gtk.HSeparator', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((352, 33, 352, 129), 'keyman_config._', '_', ({(352, 35, 352, 128): '"""Scan this code to load this keyboard\non another device or <a href=\'{uri}\'>share online</a>"""'}, {}), '("""Scan this code to load this keyboard\non another device or <a href=\'{uri}\'>share online</a>"""\n )', False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((197, 30, 197, 57), 'keyman_config.secure_lookup', 'secure_lookup', ({(197, 44, 197, 50): 'kbdata', (197, 52, 197, 56): '"""id"""'}, {}), "(kbdata, 'id')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((197, 61, 197, 92), 'keyman_config.secure_lookup', 'secure_lookup', ({(197, 75, 197, 78): 'kmp', (197, 80, 197, 91): '"""packageID"""'}, {}), "(kmp, 'packageID')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((199, 42, 199, 64), 'keyman_config._', '_', ({(199, 44, 199, 63): '"""Keyboard name: """'}, {}), "('Keyboard name: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((211, 40, 211, 60), 'keyman_config._', '_', ({(211, 42, 211, 59): '"""Keyboard id: """'}, {}), "('Keyboard id: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((223, 41, 223, 66), 'keyman_config._', '_', ({(223, 43, 223, 65): '"""Keyboard version: """'}, {}), "('Keyboard version: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((235, 39, 235, 50), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((240, 32, 240, 43), 'gi.repository.Gtk.Label', 'Gtk.Label', ({}, {}), '()', False, 'from gi.repository import Gtk\n'), ((241, 27, 241, 62), 'keyman_config.secure_lookup', 'secure_lookup', ({(241, 41, 241, 47): 'kbdata', (241, 49, 241, 61): '"""authorName"""'}, {}), "(kbdata, 'authorName')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((248, 41, 248, 66), 'keyman_config._', '_', ({(248, 43, 248, 65): '"""Keyboard license: """'}, {}), "('Keyboard license: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((260, 42, 260, 71), 'keyman_config._', '_', ({(260, 44, 260, 70): '"""Keyboard description: """'}, {}), "('Keyboard description: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n'), ((173, 37, 173, 57), 'json.load', 'json.load', ({(173, 47, 173, 56): 'read_file'}, {}), '(read_file)', False, 'import json\n'), ((236, 46, 236, 70), 'keyman_config._', '_', ({(236, 48, 236, 69): '"""Keyboard author: """'}, {}), "('Keyboard author: ')", False, 'from keyman_config import KeymanComUrl, _, secure_lookup\n')] |
ozsolarwind/SAM | build_osx/copy_runtime.py | 0967b0a4be8f8924ec1ad915a14575ac22c4ec3c | import os
import shutil
SOURCE_DIR = '../deploy/runtime'
TARGET_DIR = 'SAM.app/Contents/runtime'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/solar_resource'
TARGET_DIR = 'SAM.app/Contents/solar_resource'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/wind_resource'
TARGET_DIR = 'SAM.app/Contents/wind_resource'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/libraries'
TARGET_DIR = 'SAM.app/Contents/libraries'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
| [((7, 3, 7, 29), 'os.path.exists', 'os.path.exists', ({(7, 18, 7, 28): 'TARGET_DIR'}, {}), '(TARGET_DIR)', False, 'import os\n'), ((15, 3, 15, 29), 'os.path.exists', 'os.path.exists', ({(15, 18, 15, 28): 'TARGET_DIR'}, {}), '(TARGET_DIR)', False, 'import os\n'), ((23, 3, 23, 29), 'os.path.exists', 'os.path.exists', ({(23, 18, 23, 28): 'TARGET_DIR'}, {}), '(TARGET_DIR)', False, 'import os\n'), ((31, 3, 31, 29), 'os.path.exists', 'os.path.exists', ({(31, 18, 31, 28): 'TARGET_DIR'}, {}), '(TARGET_DIR)', False, 'import os\n'), ((8, 4, 8, 29), 'shutil.rmtree', 'shutil.rmtree', ({(8, 18, 8, 28): 'TARGET_DIR'}, {}), '(TARGET_DIR)', False, 'import shutil\n'), ((16, 4, 16, 29), 'shutil.rmtree', 'shutil.rmtree', ({(16, 18, 16, 28): 'TARGET_DIR'}, {}), '(TARGET_DIR)', False, 'import shutil\n'), ((24, 4, 24, 29), 'shutil.rmtree', 'shutil.rmtree', ({(24, 18, 24, 28): 'TARGET_DIR'}, {}), '(TARGET_DIR)', False, 'import shutil\n'), ((32, 4, 32, 29), 'shutil.rmtree', 'shutil.rmtree', ({(32, 18, 32, 28): 'TARGET_DIR'}, {}), '(TARGET_DIR)', False, 'import shutil\n'), ((10, 47, 10, 77), 'shutil.ignore_patterns', 'shutil.ignore_patterns', ({(10, 70, 10, 76): '""".git"""'}, {}), "('.git')", False, 'import shutil\n'), ((18, 47, 18, 77), 'shutil.ignore_patterns', 'shutil.ignore_patterns', ({(18, 70, 18, 76): '""".git"""'}, {}), "('.git')", False, 'import shutil\n'), ((26, 47, 26, 77), 'shutil.ignore_patterns', 'shutil.ignore_patterns', ({(26, 70, 26, 76): '""".git"""'}, {}), "('.git')", False, 'import shutil\n'), ((34, 47, 34, 77), 'shutil.ignore_patterns', 'shutil.ignore_patterns', ({(34, 70, 34, 76): '""".git"""'}, {}), "('.git')", False, 'import shutil\n')] |
kl-chou/codalab-worksheets | codalab/lib/path_util.py | 101d1d9f86d3f7b8dae3b4fc3e2335fcf8d7c3d7 | """
path_util contains helpers for working with local filesystem paths.
There are a few classes of methods provided here:
Functions to normalize paths and check that they are in normal form:
normalize, check_isvalid, check_isdir, check_isfile, path_is_url
Functions to list directories and to deal with subpaths of paths:
safe_join, get_relative_path, ls, recursive_ls
Functions to read files to compute hashes, write results to stdout, etc:
getmtime, get_size, hash_directory, hash_file_contents
Functions that modify that filesystem in controlled ways:
copy, make_directory, set_write_permissions, rename, remove
"""
import errno
import hashlib
import itertools
import os
import shutil
import subprocess
import sys
from typing import Optional
from codalab.common import precondition, UsageError, parse_linked_bundle_url
from codalab.lib import file_util
from codalab.worker.file_util import get_path_size
# Block sizes and canonical strings used when hashing files.
BLOCK_SIZE = 0x40000
FILE_PREFIX = 'file'
LINK_PREFIX = 'link'
def path_error(message, path):
"""
Raised when a user-supplied path causes an exception.
"""
return UsageError(message + ': ' + path)
################################################################################
# Functions to normalize paths and check that they are in normal form.
################################################################################
def normalize(path):
"""
Return the absolute path of the location specified by the given path.
This path is returned in a "canonical form", without ~'s, .'s, ..'s.
"""
if path == '-':
return '/dev/stdin'
elif path_is_url(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def check_isvalid(path, fn_name):
"""
Raise a PreconditionViolation if the path is not absolute or normalized.
Raise a UsageError if the file at that path does not exist.
"""
precondition(os.path.isabs(path), '%s got relative path: %s' % (fn_name, path))
# Broken symbolic links are valid paths, so we use lexists instead of exists.
if not os.path.lexists(path):
raise path_error('%s got non-existent path:' % (fn_name,), path)
def check_isdir(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if not os.path.isdir(path):
raise path_error('%s got non-directory:' % (fn_name,), path)
def check_isfile(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if os.path.isdir(path):
raise path_error('%s got directory:' % (fn_name,), path)
def path_is_url(path):
if isinstance(path, str):
for prefix in ['http', 'https', 'ftp']:
if path.startswith(prefix + '://'):
return True
return False
################################################################################
# Functions to list directories and to deal with subpaths of paths.
################################################################################
def safe_join(*paths):
"""
Join a sequence of paths but filter out any that are empty. Used for targets.
Note that os.path.join has this functionality EXCEPT at the end of the list,
which causes problems when a target subpath is empty.
"""
return os.path.join(*[_f for _f in paths if _f])
def get_relative_path(root, path):
"""
Return the relative path from root to path, which should be nested under root.
"""
precondition(path.startswith(root), '%s is not under %s' % (path, root))
return path[len(root) :]
def ls(path):
"""
Return a (list of directories, list of files) in the given directory.
"""
check_isdir(path, 'ls')
(directories, files) = ([], [])
for file_name in os.listdir(path):
if os.path.isfile(os.path.join(path, file_name)):
files.append(file_name)
else:
directories.append(file_name)
return (directories, files)
def recursive_ls(path):
"""
Return a (list of directories, list of files) in the given directory and
all of its nested subdirectories. All paths returned are absolute.
Symlinks are returned in the list of files, even if they point to directories.
This makes it possible to distinguish between real and symlinked directories
when computing the hash of a directory. This function will NOT descend into
symlinked directories.
"""
check_isdir(path, 'recursive_ls')
(directories, files) = ([], [])
for (root, _, file_names) in os.walk(path):
assert os.path.isabs(root), 'Got relative root in os.walk: %s' % (root,)
directories.append(root)
for file_name in file_names:
files.append(os.path.join(root, file_name))
# os.walk ignores symlinks to directories, but we should count them as files.
# However, we can't used the followlinks parameter, because a) we don't want
# to descend into directories and b) we could end up in an infinite loop if
# we were to pass that flag. Instead, we handle symlinks here:
for subpath in os.listdir(root):
full_subpath = os.path.join(root, subpath)
if os.path.islink(full_subpath) and os.path.isdir(full_subpath):
files.append(full_subpath)
return (directories, files)
################################################################################
# Functions to read files to compute hashes, write results to stdout, etc.
################################################################################
def getmtime(path):
"""
Like os.path.getmtime, but does not follow symlinks.
"""
return os.lstat(path).st_mtime
def get_size(path, dirs_and_files=None):
"""
Get the size (in bytes) of the file or directory at or under the given path.
Does not include symlinked files and directories.
"""
if parse_linked_bundle_url(path).uses_beam:
return get_path_size(path)
if os.path.islink(path) or not os.path.isdir(path):
return os.lstat(path).st_size
dirs_and_files = dirs_and_files or recursive_ls(path)
return sum(os.lstat(path).st_size for path in itertools.chain(*dirs_and_files))
def hash_directory(path, dirs_and_files=None):
"""
Return the hash of the contents of the folder at the given path.
This hash is independent of the path itself - if you were to move the
directory and call get_hash again, you would get the same result.
"""
if parse_linked_bundle_url(path).uses_beam:
# On Azure Blob Storage, we just use the directory size for the hashed contents.
return get_size(path)
(directories, files) = dirs_and_files or recursive_ls(path)
# Sort and then hash all directories and then compute a hash of the hashes.
# This two-level hash is necessary so that the overall hash is unambiguous -
# if we updated directory_hash with the directory names themselves, then
# we'd be hashing the concatenation of these names, which could be generated
# in multiple ways.
directory_hash = hashlib.sha1()
for directory in sorted(directories):
relative_path = get_relative_path(path, directory)
directory_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
# Use a similar two-level hashing scheme for all files, but incorporate a
# hash of both the file name and contents.
file_hash = hashlib.sha1()
for file_name in sorted(files):
relative_path = get_relative_path(path, file_name)
file_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
file_hash.update(hash_file_contents(file_name).encode())
# Return a hash of the two hashes.
overall_hash = hashlib.sha1(directory_hash.hexdigest().encode())
overall_hash.update(file_hash.hexdigest().encode())
return overall_hash.hexdigest()
def hash_file_contents(path):
"""
Return the hash of the file's contents, read in blocks of size BLOCK_SIZE.
"""
message = 'hash_file called with relative path: %s' % (path,)
precondition(os.path.isabs(path), message)
if os.path.islink(path):
contents_hash = hashlib.sha1(LINK_PREFIX.encode())
contents_hash.update(os.readlink(path).encode())
else:
contents_hash = hashlib.sha1(FILE_PREFIX.encode())
with open(path, 'rb') as file_handle:
while True:
data = file_handle.read(BLOCK_SIZE)
if not data:
break
contents_hash.update(data)
return contents_hash.hexdigest()
################################################################################
# Functions that modify that filesystem in controlled ways.
################################################################################
def copy(source_path: str, dest_path: str, follow_symlinks: Optional[bool] = False):
"""
Copy |source_path| to |dest_path|.
Assume dest_path doesn't exist.
|follow_symlinks|: whether to follow symlinks
Note: this only works in Linux.
"""
if os.path.exists(dest_path):
raise path_error('already exists', dest_path)
if source_path == '/dev/stdin':
with open(dest_path, 'wb') as dest:
file_util.copy(
sys.stdin,
dest,
autoflush=False,
print_status='Copying %s to %s' % (source_path, dest_path),
)
else:
if not follow_symlinks and os.path.islink(source_path):
raise path_error('not following symlinks', source_path)
if not os.path.exists(source_path):
raise path_error('does not exist', source_path)
command = [
'rsync',
'-pr%s' % ('L' if follow_symlinks else 'l'),
source_path
+ ('/' if not os.path.islink(source_path) and os.path.isdir(source_path) else ''),
dest_path,
]
if subprocess.call(command) != 0:
raise path_error('Unable to copy %s to' % source_path, dest_path)
def make_directory(path):
"""
Create the directory at the given path.
"""
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
check_isdir(path, 'make_directory')
def set_write_permissions(path):
# Recursively give give write permissions to |path|, so that we can operate
# on it.
if not os.path.islink(path): # Don't need write permissions if symlink
subprocess.call(['chmod', '-R', 'u+w', path])
def rename(old_path, new_path):
# Allow write permissions, or else the move will fail.
set_write_permissions(old_path)
subprocess.call(['mv', old_path, new_path])
def remove(path):
"""
Remove the given path, whether it is a directory, file, or link.
"""
if parse_linked_bundle_url(path).uses_beam:
from apache_beam.io.filesystems import FileSystems
if not FileSystems.exists(path):
FileSystems.delete([path])
return
check_isvalid(path, 'remove')
set_write_permissions(path) # Allow permissions
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
try:
shutil.rmtree(path)
except shutil.Error:
pass
else:
os.remove(path)
if os.path.exists(path):
print('Failed to remove %s' % path)
def soft_link(source, path):
"""
Create a symbolic link to source at path. This is basically the same as doing "ln -s $source $path"
"""
check_isvalid(source, 'soft_link')
os.symlink(source, path)
| [((41, 11, 41, 44), 'codalab.common.UsageError', 'UsageError', ({(41, 22, 41, 43): "(message + ': ' + path)"}, {}), "(message + ': ' + path)", False, 'from codalab.common import precondition, UsageError, parse_linked_bundle_url\n'), ((87, 7, 87, 26), 'os.path.isdir', 'os.path.isdir', ({(87, 21, 87, 25): 'path'}, {}), '(path)', False, 'import os\n'), ((110, 11, 110, 52), 'os.path.join', 'os.path.join', ({(110, 24, 110, 51): '*[_f for _f in paths if _f]'}, {}), '(*[_f for _f in paths if _f])', False, 'import os\n'), ((127, 21, 127, 37), 'os.listdir', 'os.listdir', ({(127, 32, 127, 36): 'path'}, {}), '(path)', False, 'import os\n'), ((147, 33, 147, 46), 'os.walk', 'os.walk', ({(147, 41, 147, 45): 'path'}, {}), '(path)', False, 'import os\n'), ((203, 21, 203, 35), 'hashlib.sha1', 'hashlib.sha1', ({}, {}), '()', False, 'import hashlib\n'), ((209, 16, 209, 30), 'hashlib.sha1', 'hashlib.sha1', ({}, {}), '()', False, 'import hashlib\n'), ((226, 7, 226, 27), 'os.path.islink', 'os.path.islink', ({(226, 22, 226, 26): 'path'}, {}), '(path)', False, 'import os\n'), ((252, 7, 252, 32), 'os.path.exists', 'os.path.exists', ({(252, 22, 252, 31): 'dest_path'}, {}), '(dest_path)', False, 'import os\n'), ((301, 4, 301, 47), 'subprocess.call', 'subprocess.call', ({(301, 20, 301, 46): "['mv', old_path, new_path]"}, {}), "(['mv', old_path, new_path])", False, 'import subprocess\n'), ((316, 7, 316, 27), 'os.path.islink', 'os.path.islink', ({(316, 22, 316, 26): 'path'}, {}), '(path)', False, 'import os\n'), ((325, 7, 325, 27), 'os.path.exists', 'os.path.exists', ({(325, 22, 325, 26): 'path'}, {}), '(path)', False, 'import os\n'), ((334, 4, 334, 28), 'os.symlink', 'os.symlink', ({(334, 15, 334, 21): 'source', (334, 23, 334, 27): 'path'}, {}), '(source, path)', False, 'import os\n'), ((67, 17, 67, 36), 'os.path.isabs', 'os.path.isabs', ({(67, 31, 67, 35): 'path'}, {}), '(path)', False, 'import os\n'), ((69, 11, 69, 32), 'os.path.lexists', 'os.path.lexists', ({(69, 27, 69, 31): 'path'}, {}), '(path)', False, 'import os\n'), ((78, 11, 78, 30), 'os.path.isdir', 'os.path.isdir', ({(78, 25, 78, 29): 'path'}, {}), '(path)', False, 'import os\n'), ((148, 15, 148, 34), 'os.path.isabs', 'os.path.isabs', ({(148, 29, 148, 33): 'root'}, {}), '(root)', False, 'import os\n'), ((156, 23, 156, 39), 'os.listdir', 'os.listdir', ({(156, 34, 156, 38): 'root'}, {}), '(root)', False, 'import os\n'), ((172, 11, 172, 25), 'os.lstat', 'os.lstat', ({(172, 20, 172, 24): 'path'}, {}), '(path)', False, 'import os\n'), ((180, 7, 180, 36), 'codalab.common.parse_linked_bundle_url', 'parse_linked_bundle_url', ({(180, 31, 180, 35): 'path'}, {}), '(path)', False, 'from codalab.common import precondition, UsageError, parse_linked_bundle_url\n'), ((181, 15, 181, 34), 'codalab.worker.file_util.get_path_size', 'get_path_size', ({(181, 29, 181, 33): 'path'}, {}), '(path)', False, 'from codalab.worker.file_util import get_path_size\n'), ((182, 7, 182, 27), 'os.path.islink', 'os.path.islink', ({(182, 22, 182, 26): 'path'}, {}), '(path)', False, 'import os\n'), ((194, 7, 194, 36), 'codalab.common.parse_linked_bundle_url', 'parse_linked_bundle_url', ({(194, 31, 194, 35): 'path'}, {}), '(path)', False, 'from codalab.common import precondition, UsageError, parse_linked_bundle_url\n'), ((225, 17, 225, 36), 'os.path.isabs', 'os.path.isabs', ({(225, 31, 225, 35): 'path'}, {}), '(path)', False, 'import os\n'), ((284, 8, 284, 22), 'os.mkdir', 'os.mkdir', ({(284, 17, 284, 21): 'path'}, {}), '(path)', False, 'import os\n'), ((294, 11, 294, 31), 'os.path.islink', 'os.path.islink', ({(294, 26, 294, 30): 'path'}, {}), '(path)', False, 'import os\n'), ((295, 8, 295, 53), 'subprocess.call', 'subprocess.call', ({(295, 24, 295, 52): "['chmod', '-R', 'u+w', path]"}, {}), "(['chmod', '-R', 'u+w', path])", False, 'import subprocess\n'), ((308, 7, 308, 36), 'codalab.common.parse_linked_bundle_url', 'parse_linked_bundle_url', ({(308, 31, 308, 35): 'path'}, {}), '(path)', False, 'from codalab.common import precondition, UsageError, parse_linked_bundle_url\n'), ((317, 8, 317, 23), 'os.unlink', 'os.unlink', ({(317, 18, 317, 22): 'path'}, {}), '(path)', False, 'import os\n'), ((318, 9, 318, 28), 'os.path.isdir', 'os.path.isdir', ({(318, 23, 318, 27): 'path'}, {}), '(path)', False, 'import os\n'), ((128, 26, 128, 55), 'os.path.join', 'os.path.join', ({(128, 39, 128, 43): 'path', (128, 45, 128, 54): 'file_name'}, {}), '(path, file_name)', False, 'import os\n'), ((157, 27, 157, 54), 'os.path.join', 'os.path.join', ({(157, 40, 157, 44): 'root', (157, 46, 157, 53): 'subpath'}, {}), '(root, subpath)', False, 'import os\n'), ((182, 35, 182, 54), 'os.path.isdir', 'os.path.isdir', ({(182, 49, 182, 53): 'path'}, {}), '(path)', False, 'import os\n'), ((183, 15, 183, 29), 'os.lstat', 'os.lstat', ({(183, 24, 183, 28): 'path'}, {}), '(path)', False, 'import os\n'), ((257, 12, 262, 13), 'codalab.lib.file_util.copy', 'file_util.copy', (), '', False, 'from codalab.lib import file_util\n'), ((264, 35, 264, 62), 'os.path.islink', 'os.path.islink', ({(264, 50, 264, 61): 'source_path'}, {}), '(source_path)', False, 'import os\n'), ((266, 15, 266, 42), 'os.path.exists', 'os.path.exists', ({(266, 30, 266, 41): 'source_path'}, {}), '(source_path)', False, 'import os\n'), ((275, 11, 275, 35), 'subprocess.call', 'subprocess.call', ({(275, 27, 275, 34): 'command'}, {}), '(command)', False, 'import subprocess\n'), ((311, 15, 311, 39), 'apache_beam.io.filesystems.FileSystems.exists', 'FileSystems.exists', ({(311, 34, 311, 38): 'path'}, {}), '(path)', False, 'from apache_beam.io.filesystems import FileSystems\n'), ((312, 12, 312, 38), 'apache_beam.io.filesystems.FileSystems.delete', 'FileSystems.delete', ({(312, 31, 312, 37): '[path]'}, {}), '([path])', False, 'from apache_beam.io.filesystems import FileSystems\n'), ((324, 8, 324, 23), 'os.remove', 'os.remove', ({(324, 18, 324, 22): 'path'}, {}), '(path)', False, 'import os\n'), ((59, 31, 59, 55), 'os.path.expanduser', 'os.path.expanduser', ({(59, 50, 59, 54): 'path'}, {}), '(path)', False, 'import os\n'), ((151, 25, 151, 54), 'os.path.join', 'os.path.join', ({(151, 38, 151, 42): 'root', (151, 44, 151, 53): 'file_name'}, {}), '(root, file_name)', False, 'import os\n'), ((158, 15, 158, 43), 'os.path.islink', 'os.path.islink', ({(158, 30, 158, 42): 'full_subpath'}, {}), '(full_subpath)', False, 'import os\n'), ((158, 48, 158, 75), 'os.path.isdir', 'os.path.isdir', ({(158, 62, 158, 74): 'full_subpath'}, {}), '(full_subpath)', False, 'import os\n'), ((185, 15, 185, 29), 'os.lstat', 'os.lstat', ({(185, 24, 185, 28): 'path'}, {}), '(path)', False, 'import os\n'), ((185, 50, 185, 82), 'itertools.chain', 'itertools.chain', ({(185, 66, 185, 81): '*dirs_and_files'}, {}), '(*dirs_and_files)', False, 'import itertools\n'), ((320, 12, 320, 31), 'shutil.rmtree', 'shutil.rmtree', ({(320, 26, 320, 30): 'path'}, {}), '(path)', False, 'import shutil\n'), ((228, 29, 228, 46), 'os.readlink', 'os.readlink', ({(228, 41, 228, 45): 'path'}, {}), '(path)', False, 'import os\n'), ((272, 58, 272, 84), 'os.path.isdir', 'os.path.isdir', ({(272, 72, 272, 83): 'source_path'}, {}), '(source_path)', False, 'import os\n'), ((272, 26, 272, 53), 'os.path.islink', 'os.path.islink', ({(272, 41, 272, 52): 'source_path'}, {}), '(source_path)', False, 'import os\n')] |
aliavni/statsmodels | statsmodels/regression/tests/test_glsar_gretl.py | ef5d57a8d45de76a895e9401705280d558d688ad | # -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: Josef Perktold
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl:
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
#simple diff, not growthrate, I want heteroscedasticity later for testing
endogd = np.diff(d['realinv'])
exogd = add_constant(np.c_[np.diff(d['realgdp'].values), d['realint'][:-1].values])
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
vif = [1.002, 1.002]
cond_1norm = 6862.0664
determinant = 1.0296049e+009
reciprocal_condition_number = 0.013819244
#Chi-square(2): test-statistic, pvalue, df
normality = [20.2792, 3.94837e-005, 2]
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_allclose(res.f_pvalue,
result_gretl_g1['f_pvalue'][1],
rtol=1e-2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
linear_logs = [1.68351, 0.430953, 2, "chi2"]
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
lm_acorr4 = [1.17928, 0.321197, 4, 195, "F"]
lm2_acorr4 = [4.771043, 0.312, 4, "chi2"]
acorr_ljungbox4 = [5.23587, 0.264, 4, "chi2"]
#break
cusum_Harvey_Collier = [0.494432, 0.621549, 198, "t"] #stats.t.sf(0.494432, 198)*2
#see cusum results in files
break_qlr = [3.01985, 0.1, 3, 196, "maxF"] #TODO check this, max at 2001:4
break_chow = [13.1897, 0.00424384, 3, "chi2"] # break at 1984:1
arch_4 = [3.43473, 0.487871, 4, "chi2"]
normality = [23.962, 0.00001, 2, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan = [1.302014, 0.521520, 2, "chi2"] #TODO: not available
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"] #not available
cond_1norm = 5984.0525
determinant = 7.1087467e+008
reciprocal_condition_number = 0.013826504
vif = [1.001, 1.001]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
vif2 = [oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4)
def test_GLSARlag():
#test that results for lag>1 is close to lag=1, and smaller ssr
from statsmodels.datasets import macrodata
d2 = macrodata.load_pandas().data
g_gdp = 400*np.diff(np.log(d2['realgdp'].values))
g_inv = 400*np.diff(np.log(d2['realinv'].values))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1].values], prepend=False)
mod1 = GLSAR(g_inv, exogg, 1)
res1 = mod1.iterative_fit(5)
mod4 = GLSAR(g_inv, exogg, 4)
res4 = mod4.iterative_fit(10)
assert_array_less(np.abs(res1.params / res4.params - 1), 0.03)
assert_array_less(res4.ssr, res1.ssr)
assert_array_less(np.abs(res4.bse / res1.bse) - 1, 0.015)
assert_array_less(np.abs((res4.fittedvalues / res1.fittedvalues - 1).mean()),
0.015)
assert_equal(len(mod4.rho), 4)
if __name__ == '__main__':
t = TestGLSARGretl()
t.test_all()
'''
Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: ds_l_realinv
HAC standard errors, bandwidth 4 (Bartlett kernel)
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.48167 1.17709 -8.055 7.17e-014 ***
ds_l_realgdp 4.37422 0.328787 13.30 2.62e-029 ***
realint_1 -0.613997 0.293619 -2.091 0.0378 **
Mean dependent var 3.257395 S.D. dependent var 18.73915
Sum squared resid 22799.68 S.E. of regression 10.70380
R-squared 0.676978 Adjusted R-squared 0.673731
F(2, 199) 90.79971 P-value(F) 9.53e-29
Log-likelihood -763.9752 Akaike criterion 1533.950
Schwarz criterion 1543.875 Hannan-Quinn 1537.966
rho -0.107341 Durbin-Watson 2.213805
QLR test for structural break -
Null hypothesis: no structural break
Test statistic: max F(3, 196) = 3.01985 at observation 2001:4
(10 percent critical value = 4.09)
Non-linearity test (logs) -
Null hypothesis: relationship is linear
Test statistic: LM = 1.68351
with p-value = P(Chi-square(2) > 1.68351) = 0.430953
Non-linearity test (squares) -
Null hypothesis: relationship is linear
Test statistic: LM = 7.52477
with p-value = P(Chi-square(2) > 7.52477) = 0.0232283
LM test for autocorrelation up to order 4 -
Null hypothesis: no autocorrelation
Test statistic: LMF = 1.17928
with p-value = P(F(4,195) > 1.17928) = 0.321197
CUSUM test for parameter stability -
Null hypothesis: no change in parameters
Test statistic: Harvey-Collier t(198) = 0.494432
with p-value = P(t(198) > 0.494432) = 0.621549
Chow test for structural break at observation 1984:1 -
Null hypothesis: no structural break
Asymptotic test statistic: Chi-square(3) = 13.1897
with p-value = 0.00424384
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 3.43473
with p-value = P(Chi-square(4) > 3.43473) = 0.487871:
#ANOVA
Analysis of Variance:
Sum of squares df Mean square
Regression 47782.7 2 23891.3
Residual 22799.7 199 114.571
Total 70582.3 201 351.156
R^2 = 47782.7 / 70582.3 = 0.676978
F(2, 199) = 23891.3 / 114.571 = 208.528 [p-value 1.47e-049]
#LM-test autocorrelation
Breusch-Godfrey test for autocorrelation up to order 4
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 0.0640964 1.06719 0.06006 0.9522
ds_l_realgdp -0.0456010 0.217377 -0.2098 0.8341
realint_1 0.0511769 0.293136 0.1746 0.8616
uhat_1 -0.104707 0.0719948 -1.454 0.1475
uhat_2 -0.00898483 0.0742817 -0.1210 0.9039
uhat_3 0.0837332 0.0735015 1.139 0.2560
uhat_4 -0.0636242 0.0737363 -0.8629 0.3893
Unadjusted R-squared = 0.023619
Test statistic: LMF = 1.179281,
with p-value = P(F(4,195) > 1.17928) = 0.321
Alternative statistic: TR^2 = 4.771043,
with p-value = P(Chi-square(4) > 4.77104) = 0.312
Ljung-Box Q' = 5.23587,
with p-value = P(Chi-square(4) > 5.23587) = 0.264:
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023
#heteroscedasticity White
White's test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 104.920 21.5848 4.861 2.39e-06 ***
ds_l_realgdp -29.7040 6.24983 -4.753 3.88e-06 ***
realint_1 -6.93102 6.95607 -0.9964 0.3203
sq_ds_l_realg 4.12054 0.684920 6.016 8.62e-09 ***
X2_X3 2.89685 1.38571 2.091 0.0379 **
sq_realint_1 0.662135 1.10919 0.5970 0.5512
Unadjusted R-squared = 0.165860
Test statistic: TR^2 = 33.503723,
with p-value = P(Chi-square(5) > 33.503723) = 0.000003:
#heteroscedasticity Breusch-Pagan (original)
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 1.09468 0.192281 5.693 4.43e-08 ***
ds_l_realgdp -0.0323119 0.0386353 -0.8363 0.4040
realint_1 0.00410778 0.0512274 0.08019 0.9362
Explained sum of squares = 2.60403
Test statistic: LM = 1.302014,
with p-value = P(Chi-square(2) > 1.302014) = 0.521520
#heteroscedasticity Breusch-Pagan Koenker
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2 (Koenker robust variant)
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 10.6870 21.7027 0.4924 0.6230
ds_l_realgdp -3.64704 4.36075 -0.8363 0.4040
realint_1 0.463643 5.78202 0.08019 0.9362
Explained sum of squares = 33174.2
Test statistic: LM = 0.709924,
with p-value = P(Chi-square(2) > 0.709924) = 0.701200
########## forecast
#forecast mean y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 2.946312 -22.987904 - -11.367905
2008:4 -27.665860 -36.294434 3.036851 -42.282972 - -30.305896
2009:1 -70.239280 -44.018178 4.007017 -51.919841 - -36.116516
2009:2 -27.024588 -12.284842 1.427414 -15.099640 - -9.470044
2009:3 8.078897 4.483669 1.315876 1.888819 - 7.078520
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
#forecast actual y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 11.101892 -39.070353 - 4.714544
2008:4 -27.665860 -36.294434 11.126262 -58.234939 - -14.353928
2009:1 -70.239280 -44.018178 11.429236 -66.556135 - -21.480222
2009:2 -27.024588 -12.284842 10.798554 -33.579120 - 9.009436
2009:3 8.078897 4.483669 10.784377 -16.782652 - 25.749991
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
'''
| [((27, 4, 27, 74), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((28, 4, 28, 74), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((29, 4, 29, 47), 'numpy.testing.assert_equal', 'assert_equal', ({(29, 17, 29, 36): 'contrast_res.df_num', (29, 38, 29, 46): 'other[2]'}, {}), '(contrast_res.df_num, other[2])', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((30, 4, 30, 49), 'numpy.testing.assert_equal', 'assert_equal', ({(30, 17, 30, 38): 'contrast_res.df_denom', (30, 40, 30, 48): 'other[3]'}, {}), '(contrast_res.df_denom, other[3])', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((31, 4, 31, 31), 'numpy.testing.assert_equal', 'assert_equal', ({(31, 17, 31, 20): '"""f"""', (31, 22, 31, 30): 'other[4]'}, {}), "('f', other[4])", False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((419, 12, 419, 80), 'statsmodels.tools.tools.add_constant', 'add_constant', (), '', False, 'from statsmodels.tools.tools import add_constant\n'), ((421, 11, 421, 33), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', ({(421, 17, 421, 22): 'g_inv', (421, 24, 421, 29): 'exogg', (421, 31, 421, 32): '1'}, {}), '(g_inv, exogg, 1)', False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((424, 11, 424, 33), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', ({(424, 17, 424, 22): 'g_inv', (424, 24, 424, 29): 'exogg', (424, 31, 424, 32): '4'}, {}), '(g_inv, exogg, 4)', False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((428, 4, 428, 41), 'numpy.testing.assert_array_less', 'assert_array_less', ({(428, 22, 428, 30): 'res4.ssr', (428, 32, 428, 40): 'res1.ssr'}, {}), '(res4.ssr, res1.ssr)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((47, 17, 47, 38), 'numpy.diff', 'np.diff', ({(47, 25, 47, 37): "d['realinv']"}, {}), "(d['realinv'])", True, 'import numpy as np\n'), ((51, 16, 51, 75), 'statsmodels.tools.tools.add_constant', 'add_constant', ({(51, 29, 51, 74): "np.c_[gs_l_realgdp, d['realint'][:-1].values]"}, {}), "(np.c_[gs_l_realgdp, d['realint'][:-1].values])", False, 'from statsmodels.tools.tools import add_constant\n'), ((56, 17, 56, 52), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (), '', False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((60, 17, 60, 52), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (), '', False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((68, 19, 71, 85), 'numpy.array', 'np.array', ({(68, 28, 71, 84): '[[-9.5099, 0.990456, -9.602, 3.65e-18, -11.4631, -7.5567], [4.3704, \n 0.208146, 21.0, 2.93e-52, 3.95993, 4.78086], [-0.579253, 0.268009, -\n 2.161, 0.0319, -1.10777, -0.0507346]]'}, {}), '([[-9.5099, 0.990456, -9.602, 3.65e-18, -11.4631, -7.5567], [4.3704,\n 0.208146, 21.0, 2.93e-52, 3.95993, 4.78086], [-0.579253, 0.268009, -\n 2.161, 0.0319, -1.10777, -0.0507346]])', True, 'import numpy as np\n'), ((109, 8, 109, 57), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(109, 28, 109, 38): 'res.params', (109, 40, 109, 53): 'partable[:, (0)]', (109, 55, 109, 56): '(4)'}, {}), '(res.params, partable[:, (0)], 4)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((110, 8, 110, 54), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(110, 28, 110, 35): 'res.bse', (110, 37, 110, 50): 'partable[:, (1)]', (110, 52, 110, 53): '(6)'}, {}), '(res.bse, partable[:, (1)], 6)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((111, 8, 111, 58), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(111, 28, 111, 39): 'res.tvalues', (111, 41, 111, 54): 'partable[:, (2)]', (111, 56, 111, 57): '(2)'}, {}), '(res.tvalues, partable[:, (2)], 2)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((113, 8, 113, 74), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((118, 8, 118, 80), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((119, 8, 121, 34), 'numpy.testing.assert_allclose', 'assert_allclose', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((126, 18, 126, 54), 'statsmodels.stats.diagnostic.het_arch', 'smsdia.het_arch', (), '', True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((127, 8, 127, 61), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((128, 8, 128, 61), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((134, 8, 134, 58), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((137, 8, 137, 57), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(137, 28, 137, 38): 'res.params', (137, 40, 137, 53): 'partable[:, (0)]', (137, 55, 137, 56): '(4)'}, {}), '(res.params, partable[:, (0)], 4)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((138, 8, 138, 54), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(138, 28, 138, 35): 'res.bse', (138, 37, 138, 50): 'partable[:, (1)]', (138, 52, 138, 53): '(3)'}, {}), '(res.bse, partable[:, (1)], 3)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((139, 8, 139, 58), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(139, 28, 139, 39): 'res.tvalues', (139, 41, 139, 54): 'partable[:, (2)]', (139, 56, 139, 57): '(2)'}, {}), '(res.tvalues, partable[:, (2)], 2)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((141, 8, 141, 74), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((146, 8, 146, 80), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((147, 8, 147, 84), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((152, 12, 152, 42), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (), '', True, 'import statsmodels.stats.outliers_influence as oi\n'), ((154, 12, 154, 42), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (), '', True, 'import statsmodels.stats.outliers_influence as oi\n'), ((159, 18, 159, 54), 'statsmodels.stats.diagnostic.het_arch', 'smsdia.het_arch', (), '', True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((160, 8, 160, 61), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((161, 8, 161, 61), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((288, 19, 291, 79), 'numpy.array', 'np.array', ({(288, 28, 291, 78): '[[-9.48167, 1.17709, -8.055, 7.17e-14, -11.8029, -7.16049], [4.37422, \n 0.328787, 13.3, 2.62e-29, 3.72587, 5.02258], [-0.613997, 0.293619, -\n 2.091, 0.0378, -1.193, -0.0349939]]'}, {}), '([[-9.48167, 1.17709, -8.055, 7.17e-14, -11.8029, -7.16049], [\n 4.37422, 0.328787, 13.3, 2.62e-29, 3.72587, 5.02258], [-0.613997, \n 0.293619, -2.091, 0.0378, -1.193, -0.0349939]])', True, 'import numpy as np\n'), ((345, 16, 345, 83), 'os.path.join', 'os.path.join', ({(345, 29, 345, 36): 'cur_dir', (345, 38, 345, 82): '"""results/leverage_influence_ols_nostars.txt"""'}, {}), "(cur_dir, 'results/leverage_influence_ols_nostars.txt')", False, 'import os\n'), ((346, 14, 347, 55), 'numpy.genfromtxt', 'np.genfromtxt', (), '', True, 'import numpy as np\n'), ((349, 11, 349, 34), 'numpy.isnan', 'np.isnan', ({(349, 20, 349, 33): "lev[-1]['f1']"}, {}), "(lev[-1]['f1'])", True, 'import numpy as np\n'), ((357, 18, 357, 71), 'statsmodels.stats.sandwich_covariance.cov_hac_simple', 'sw.cov_hac_simple', (), '', True, 'import statsmodels.stats.sandwich_covariance as sw\n'), ((358, 19, 358, 37), 'statsmodels.stats.sandwich_covariance.se_cov', 'sw.se_cov', ({(358, 29, 358, 36): 'cov_hac'}, {}), '(cov_hac)', True, 'import statsmodels.stats.sandwich_covariance as sw\n'), ((360, 8, 360, 57), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(360, 28, 360, 38): 'res.params', (360, 40, 360, 53): 'partable[:, (0)]', (360, 55, 360, 56): '(5)'}, {}), '(res.params, partable[:, (0)], 5)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((361, 8, 361, 54), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(361, 28, 361, 35): 'bse_hac', (361, 37, 361, 50): 'partable[:, (1)]', (361, 52, 361, 53): '(5)'}, {}), '(bse_hac, partable[:, (1)], 5)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((364, 8, 364, 74), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((365, 8, 365, 74), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((366, 8, 366, 84), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((367, 8, 367, 92), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((377, 12, 377, 42), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (), '', True, 'import statsmodels.stats.outliers_influence as oi\n'), ((379, 12, 379, 42), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (), '', True, 'import statsmodels.stats.outliers_influence as oi\n'), ((382, 20, 382, 63), 'statsmodels.stats.diagnostic.linear_lm', 'smsdia.linear_lm', ({(382, 37, 382, 46): 'res.resid', (382, 48, 382, 62): 'res.model.exog'}, {}), '(res.resid, res.model.exog)', True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((383, 8, 383, 71), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((384, 8, 384, 71), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((386, 15, 386, 65), 'statsmodels.stats.diagnostic.het_breuschpagan', 'smsdia.het_breuschpagan', ({(386, 39, 386, 48): 'res.resid', (386, 50, 386, 64): 'res.model.exog'}, {}), '(res.resid, res.model.exog)', True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((387, 8, 387, 76), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((388, 8, 388, 76), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((390, 13, 390, 56), 'statsmodels.stats.diagnostic.het_white', 'smsdia.het_white', ({(390, 30, 390, 39): 'res.resid', (390, 41, 390, 55): 'res.model.exog'}, {}), '(res.resid, res.model.exog)', True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((391, 8, 391, 53), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', ({(391, 28, 391, 34): 'hw[:2]', (391, 36, 391, 49): 'het_white[:2]', (391, 51, 391, 52): '(6)'}, {}), '(hw[:2], het_white[:2], 6)', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((395, 18, 395, 53), 'statsmodels.stats.diagnostic.het_arch', 'smsdia.het_arch', (), '', True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((396, 8, 396, 61), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((397, 8, 397, 61), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((401, 15, 401, 39), 'statsmodels.stats.outliers_influence.OLSInfluence', 'oi.OLSInfluence', ({(401, 31, 401, 38): 'res_ols'}, {}), '(res_ols)', True, 'import statsmodels.stats.outliers_influence as oi\n'), ((407, 8, 407, 66), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((408, 8, 408, 69), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((409, 8, 409, 77), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((410, 8, 410, 72), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (), '', False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((416, 9, 416, 32), 'statsmodels.datasets.macrodata.load_pandas', 'macrodata.load_pandas', ({}, {}), '()', False, 'from statsmodels.datasets import macrodata\n'), ((427, 22, 427, 59), 'numpy.abs', 'np.abs', ({(427, 29, 427, 58): '(res1.params / res4.params - 1)'}, {}), '(res1.params / res4.params - 1)', True, 'import numpy as np\n'), ((38, 12, 38, 35), 'statsmodels.datasets.macrodata.load_pandas', 'macrodata.load_pandas', ({}, {}), '()', False, 'from statsmodels.datasets import macrodata\n'), ((117, 28, 117, 50), 'numpy.sqrt', 'np.sqrt', ({(117, 36, 117, 49): 'res.mse_resid'}, {}), '(res.mse_resid)', True, 'import numpy as np\n'), ((145, 28, 145, 50), 'numpy.sqrt', 'np.sqrt', ({(145, 36, 145, 49): 'res.mse_resid'}, {}), '(res.mse_resid)', True, 'import numpy as np\n'), ((344, 34, 344, 59), 'os.path.dirname', 'os.path.dirname', ({(344, 50, 344, 58): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((350, 18, 351, 59), 'numpy.genfromtxt', 'np.genfromtxt', (), '', True, 'import numpy as np\n'), ((368, 28, 368, 50), 'numpy.sqrt', 'np.sqrt', ({(368, 36, 368, 49): 'res.mse_resid'}, {}), '(res.mse_resid)', True, 'import numpy as np\n'), ((399, 16, 399, 63), 'statsmodels.stats.outliers_influence.variance_inflation_factor', 'oi.variance_inflation_factor', ({(399, 45, 399, 59): 'res.model.exog', (399, 61, 399, 62): 'k'}, {}), '(res.model.exog, k)', True, 'import statsmodels.stats.outliers_influence as oi\n'), ((417, 24, 417, 52), 'numpy.log', 'np.log', ({(417, 31, 417, 51): "d2['realgdp'].values"}, {}), "(d2['realgdp'].values)", True, 'import numpy as np\n'), ((418, 24, 418, 52), 'numpy.log', 'np.log', ({(418, 31, 418, 51): "d2['realinv'].values"}, {}), "(d2['realinv'].values)", True, 'import numpy as np\n'), ((429, 22, 429, 49), 'numpy.abs', 'np.abs', ({(429, 29, 429, 48): '(res4.bse / res1.bse)'}, {}), '(res4.bse / res1.bse)', True, 'import numpy as np\n'), ((43, 37, 43, 64), 'numpy.log', 'np.log', ({(43, 44, 43, 63): "d['realinv'].values"}, {}), "(d['realinv'].values)", True, 'import numpy as np\n'), ((44, 37, 44, 64), 'numpy.log', 'np.log', ({(44, 44, 44, 63): "d['realgdp'].values"}, {}), "(d['realgdp'].values)", True, 'import numpy as np\n'), ((53, 18, 53, 36), 'statsmodels.regression.linear_model.OLS', 'OLS', ({(53, 22, 53, 28): 'endogg', (53, 30, 53, 35): 'exogg'}, {}), '(endogg, exogg)', False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((48, 35, 48, 63), 'numpy.diff', 'np.diff', ({(48, 43, 48, 62): "d['realgdp'].values"}, {}), "(d['realgdp'].values)", True, 'import numpy as np\n')] |
tweeprint/api.tweeprint.com | core/views.py | 248525f2cffffb20765e7eca1e7a63f359adfc1b | import requests
import django.contrib.auth as auth
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse, Http404
from django.contrib.auth.decorators import login_required
from django.core.serializers import serialize
from core.serializers import *
from core.models import *
from core.secrets import API_TOKEN, STRIPE_API_KEY
import json
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
def get_category(request, category):
category = serialize('json', Tweeprint.objects.filter(category_slug=category), fields=('id', 'date_added', 'link', 'tweet_id', 'tweet_json', 'score', 'category', 'category_slug'))
return HttpResponse(category, content_type="application/json")
def get_categories(request):
categories = [t[0] for t in Tweeprint.CHOICES]
if request.method == 'GET':
return JsonResponse(categories, safe=False)
def get_used_categories(request):
used_categories = {t.category_slug: {'category': t.category, 'slug': t.category_slug} for t in Tweeprint.objects.all()}.values()
if request.method == 'GET':
return JsonResponse(list(used_categories), safe=False)
def get_tweeprints(request):
if request.method == 'GET':
tweeprints = serialize('json', Tweeprint.objects.all(), fields=('id', 'date_added', 'link', 'tweet_id', 'tweet_json', 'score', 'category', 'category_slug'))
return HttpResponse(tweeprints, content_type="application/json")
def get_most_recent(request):
if request.method == 'GET':
tweeprints = serialize('json', Tweeprint.objects.all().order_by('-date_added'), fields=('id', 'date_added', 'link', 'tweet_id', 'tweet_json', 'score', 'category', 'category_slug'))
return HttpResponse(tweeprints, content_type="application/json")
def get_most_popular(request):
if request.method == 'GET':
tweeprints = serialize('json', Tweeprint.objects.all().order_by('-score'), fields=('id', 'date_added', 'link', 'tweet_id', 'tweet_json', 'score', 'category', 'category_slug'))
return HttpResponse(tweeprints, content_type="application/json")
@csrf_exempt
def submit(request):
if request.method == 'POST':
form = request.body
json_data = json.loads(request.body)
try:
tweeprint = Tweeprint.objects.create(link=str(json_data['link']), category=json_data['category'])
except Exception as e:
print(e)
return HttpResponse('Submitted!')
return HttpResponse("POST not made") | [((17, 11, 17, 66), 'django.http.HttpResponse', 'HttpResponse', (), '', False, 'from django.http import HttpResponse, JsonResponse, Http404\n'), ((55, 11, 55, 40), 'django.http.HttpResponse', 'HttpResponse', ({(55, 24, 55, 39): '"""POST not made"""'}, {}), "('POST not made')", False, 'from django.http import HttpResponse, JsonResponse, Http404\n'), ((22, 15, 22, 51), 'django.http.JsonResponse', 'JsonResponse', (), '', False, 'from django.http import HttpResponse, JsonResponse, Http404\n'), ((32, 15, 32, 72), 'django.http.HttpResponse', 'HttpResponse', (), '', False, 'from django.http import HttpResponse, JsonResponse, Http404\n'), ((37, 15, 37, 72), 'django.http.HttpResponse', 'HttpResponse', (), '', False, 'from django.http import HttpResponse, JsonResponse, Http404\n'), ((42, 15, 42, 72), 'django.http.HttpResponse', 'HttpResponse', (), '', False, 'from django.http import HttpResponse, JsonResponse, Http404\n'), ((49, 20, 49, 44), 'json.loads', 'json.loads', ({(49, 31, 49, 43): 'request.body'}, {}), '(request.body)', False, 'import json\n'), ((54, 15, 54, 41), 'django.http.HttpResponse', 'HttpResponse', ({(54, 28, 54, 40): '"""Submitted!"""'}, {}), "('Submitted!')", False, 'from django.http import HttpResponse, JsonResponse, Http404\n')] |
cdanielmachado/framed | src/framed/bioreactor/__init__.py | 36d56437685cbf5c7c3c8ee4f6d85b8f05f4d345 | from __future__ import absolute_import
__author__ = 'kaizhuang'
"""
Package implementing features for simulating bioreactor operation.
"""
from .base import Organism, Bioreactor
from .bioreactors import ANAEROBIC, AEROBIC, MICROAEROBIC
from .bioreactors import Bioreactor_ox, IdealBatch, IdealFedbatch
from framed.bioreactor.dfba import *
| [] |
deperrone/content | shared/templates/coreos_kernel_option/template.py | caaff27f01a1d6c15da461f9fafe26090e8fdd18 | from ssg.utils import parse_template_boolean_value
def preprocess(data, lang):
data["arg_negate"] = parse_template_boolean_value(data, parameter="arg_negate", default_value=False)
data["arg_is_regex"] = parse_template_boolean_value(data, parameter="arg_is_regex", default_value=False)
return data
| [((5, 25, 5, 104), 'ssg.utils.parse_template_boolean_value', 'parse_template_boolean_value', (), '', False, 'from ssg.utils import parse_template_boolean_value\n'), ((6, 27, 6, 108), 'ssg.utils.parse_template_boolean_value', 'parse_template_boolean_value', (), '', False, 'from ssg.utils import parse_template_boolean_value\n')] |
enicklas/pondus | pondus/backends/__init__.py | c94edce0351697c96f2ad046e8f602448d2e0df0 | # -*- coding: UTF-8 -*-
"""
This file is part of Pondus, a personal weight manager.
Copyright (C) 2011 Eike Nicklas <[email protected]>
This program is free software licensed under the MIT license. For details
see LICENSE or http://www.opensource.org/licenses/mit-license.php
"""
__all__ = ['csv_backend', 'sportstracker_backend', 'xml_backend',
'xml_backend_old']
| [] |
specialprocedures/chpy | setup.py | 3bbe66da96abe95653722682754b4d48f9c8eba1 | import pathlib
from setuptools import find_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="chpy",
version="0.1.1",
description="Build networks from the Companies House API",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/specialprocedures/chpy",
author="Ian Goodrich",
# author_email="[email protected]",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(exclude=["collections", "time", "math", "re", "os"]),
include_package_data=True,
# install_requires=["networkx", "pandas", "progressbar", "fuzzywuzzy",
# "os", "requests", "math", "time", "collections", "re"]
)
| [((5, 7, 5, 29), 'pathlib.Path', 'pathlib.Path', ({(5, 20, 5, 28): '__file__'}, {}), '(__file__)', False, 'import pathlib\n'), ((26, 13, 26, 79), 'setuptools.find_packages', 'find_packages', (), '', False, 'from setuptools import find_packages, setup\n')] |
boblail/sentry | src/sentry/eventtypes/error.py | 71127331e58791d4651e480b65dd66f06cadc1c8 | from __future__ import absolute_import
import six
from sentry.utils.safe import get_path, trim
from sentry.utils.strings import truncatechars
from .base import BaseEvent
def get_crash_location(exception, platform=None):
default = None
for frame in reversed(get_path(exception, 'stacktrace', 'frames', filter=True) or ()):
fn = frame.get('filename') or frame.get('abs_path')
if fn:
func = frame.get('function')
if func is not None:
from sentry.interfaces.stacktrace import trim_function_name
func = trim_function_name(func, frame.get('platform') or platform)
if frame.get('in_app'):
return fn, func
if default is None:
default = fn, func
return default
class ErrorEvent(BaseEvent):
key = 'error'
def has_metadata(self, data):
exception = get_path(data, 'exception', 'values', -1)
return exception and any(v is not None for v in six.itervalues(exception))
def get_metadata(self, data):
exception = get_path(data, 'exception', 'values', -1)
if not exception:
return {}
loc = get_crash_location(exception, data.get('platform'))
rv = {
'value': trim(get_path(exception, 'value', default=''), 1024),
}
# If the exception mechanism indicates a synthetic exception we do not
# want to record the type and value into the metadata.
if not get_path(exception, 'mechanism', 'synthetic'):
rv['type'] = trim(get_path(exception, 'type', default='Error'), 128)
# Attach crash location if available
if loc is not None:
fn, func = loc
if fn:
rv['filename'] = fn
if func:
rv['function'] = func
return rv
def get_title(self, metadata):
ty = metadata.get('type')
if ty is None:
return metadata.get('function') or '<unknown>'
if not metadata.get('value'):
return ty
return u'{}: {}'.format(
ty,
truncatechars(metadata['value'].splitlines()[0], 100),
)
def get_location(self, metadata):
return metadata.get('filename')
| [((31, 20, 31, 61), 'sentry.utils.safe.get_path', 'get_path', ({(31, 29, 31, 33): 'data', (31, 35, 31, 46): '"""exception"""', (31, 48, 31, 56): '"""values"""', (31, 58, 31, 60): '-1'}, {}), "(data, 'exception', 'values', -1)", False, 'from sentry.utils.safe import get_path, trim\n'), ((35, 20, 35, 61), 'sentry.utils.safe.get_path', 'get_path', ({(35, 29, 35, 33): 'data', (35, 35, 35, 46): '"""exception"""', (35, 48, 35, 56): '"""values"""', (35, 58, 35, 60): '-1'}, {}), "(data, 'exception', 'values', -1)", False, 'from sentry.utils.safe import get_path, trim\n'), ((13, 26, 13, 82), 'sentry.utils.safe.get_path', 'get_path', (), '', False, 'from sentry.utils.safe import get_path, trim\n'), ((46, 15, 46, 60), 'sentry.utils.safe.get_path', 'get_path', ({(46, 24, 46, 33): 'exception', (46, 35, 46, 46): '"""mechanism"""', (46, 48, 46, 59): '"""synthetic"""'}, {}), "(exception, 'mechanism', 'synthetic')", False, 'from sentry.utils.safe import get_path, trim\n'), ((41, 26, 41, 66), 'sentry.utils.safe.get_path', 'get_path', (), '', False, 'from sentry.utils.safe import get_path, trim\n'), ((47, 30, 47, 74), 'sentry.utils.safe.get_path', 'get_path', (), '', False, 'from sentry.utils.safe import get_path, trim\n'), ((32, 56, 32, 81), 'six.itervalues', 'six.itervalues', ({(32, 71, 32, 80): 'exception'}, {}), '(exception)', False, 'import six\n')] |
Sultan91/keras-english-resume-parser-and-analyzer | keras_en_parser_and_analyzer/library/tests/test_detect_date.py | 221407cb0231e4c21f8edc61a2b19b74f9585d6a | from unittest import TestCase
from datetime import date
from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date
class DetectDate(TestCase):
def test_detect_date(self):
dates_to_test = ['10-1990', '09/12/2020', 'jan 1990', 'feb 2012', '9-12-2020']
res = detect_date(dates_to_test[0])
self.assertEqual(10, res.month)
self.assertEqual(1990, res.year)
res = detect_date(dates_to_test[1])
self.assertEqual(9, res.month)
self.assertEqual(2020, res.year)
res = detect_date(dates_to_test[2])
self.assertEqual(1, res.month)
self.assertEqual(1990, res.year)
res = detect_date(dates_to_test[3])
self.assertEqual(2, res.month)
self.assertEqual(2012, res.year)
res = detect_date(dates_to_test[4])
self.assertEqual(9, res.month)
self.assertEqual(2020, res.year)
| [((9, 14, 9, 43), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', ({(9, 26, 9, 42): 'dates_to_test[0]'}, {}), '(dates_to_test[0])', False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n'), ((12, 14, 12, 43), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', ({(12, 26, 12, 42): 'dates_to_test[1]'}, {}), '(dates_to_test[1])', False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n'), ((15, 14, 15, 43), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', ({(15, 26, 15, 42): 'dates_to_test[2]'}, {}), '(dates_to_test[2])', False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n'), ((18, 14, 18, 43), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', ({(18, 26, 18, 42): 'dates_to_test[3]'}, {}), '(dates_to_test[3])', False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n'), ((21, 14, 21, 43), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', ({(21, 26, 21, 42): 'dates_to_test[4]'}, {}), '(dates_to_test[4])', False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n')] |
google-admin/capirca | capirca/lib/ipset.py | 8c9e66456fedb3c0fc1c641dbefc41793e5c68d5 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Ipset iptables generator. This is a subclass of Iptables generator.
ipset is a system inside the Linux kernel, which can very efficiently store
and match IPv4 and IPv6 addresses. This can be used to dramatically increase
performace of iptables firewall.
"""
import string
from capirca.lib import iptables
from capirca.lib import nacaddr
class Error(Exception):
"""Base error class."""
class Term(iptables.Term):
"""Single Ipset term representation."""
_PLATFORM = 'ipset'
_SET_MAX_LENGTH = 31
_POSTJUMP_FORMAT = None
_PREJUMP_FORMAT = None
_TERM_FORMAT = None
_COMMENT_FORMAT = string.Template(
'-A $filter -m comment --comment "$comment"')
_FILTER_TOP_FORMAT = string.Template('-A $filter')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This stores tuples of set name and set contents, keyed by direction.
# For example:
# { 'src': ('set_name', [ipaddr object, ipaddr object]),
# 'dst': ('set_name', [ipaddr object, ipaddr object]) }
self.addr_sets = {}
def _CalculateAddresses(self, src_addr_list, src_addr_exclude_list,
dst_addr_list, dst_addr_exclude_list):
"""Calculates source and destination address list for a term.
Since ipset is very efficient at matching large number of
addresses, we never return any exclude addresses. Instead
least positive match is calculated for both source and destination
addresses.
For source and destination address list, three cases are possible.
First case is when there are no addresses. In that case we return
_all_ips.
Second case is when there is strictly one address. In that case,
we optimize by not generating a set, and it's then the only
element of returned set.
Third case is when there are more than one address in a set.
In that case we generate a set and also return _all_ips. Note the
difference to the first case where no set is actually generated.
Args:
src_addr_list: source address list of the term.
src_addr_exclude_list: source address exclude list of the term.
dst_addr_list: destination address list of the term.
dst_addr_exclude_list: destination address exclude list of the term.
Returns:
tuple containing source address list, source address exclude list,
destination address list, destination address exclude list in
that order.
"""
target_af = self.AF_MAP[self.af]
src_addr_list = self._CalculateAddrList(src_addr_list,
src_addr_exclude_list, target_af,
'src')
dst_addr_list = self._CalculateAddrList(dst_addr_list,
dst_addr_exclude_list, target_af,
'dst')
return (src_addr_list, [], dst_addr_list, [])
def _CalculateAddrList(self, addr_list, addr_exclude_list,
target_af, direction):
"""Calculates and stores address list for target AF and direction.
Args:
addr_list: address list.
addr_exclude_list: address exclude list of the term.
target_af: target address family.
direction: direction in which address list will be used.
Returns:
calculated address list.
"""
if not addr_list:
addr_list = [self._all_ips]
addr_list = [addr for addr in addr_list if addr.version == target_af]
if addr_exclude_list:
addr_exclude_list = [addr_exclude for addr_exclude in addr_exclude_list if
addr_exclude.version == target_af]
addr_list = nacaddr.ExcludeAddrs(addr_list, addr_exclude_list)
if len(addr_list) > 1:
set_name = self._GenerateSetName(self.term.name, direction)
self.addr_sets[direction] = (set_name, addr_list)
addr_list = [self._all_ips]
return addr_list
def _GenerateAddressStatement(self, src_addr, dst_addr):
"""Returns the address section of an individual iptables rule.
See _CalculateAddresses documentation. Three cases are possible here,
and they map directly to cases in _CalculateAddresses.
First, there can be no addresses for a direction (value is _all_ips then)
In that case we return empty string.
Second there can be stricly one address. In that case we return single
address match (-s or -d).
Third case, is when the value is _all_ips but also the set for particular
direction is present. That's when we return a set match.
Args:
src_addr: ipaddr address or network object with source
address of the rule.
dst_addr: ipaddr address or network object with destination
address of the rule.
Returns:
tuple containing source and destination address statement, in
that order.
"""
src_addr_stmt = ''
dst_addr_stmt = ''
if src_addr and dst_addr:
if src_addr == self._all_ips:
if 'src' in self.addr_sets:
src_addr_stmt = ('-m set --match-set %s src' %
self.addr_sets['src'][0])
else:
src_addr_stmt = '-s %s/%d' % (src_addr.network_address,
src_addr.prefixlen)
if dst_addr == self._all_ips:
if 'dst' in self.addr_sets:
dst_addr_stmt = ('-m set --match-set %s dst' %
self.addr_sets['dst'][0])
else:
dst_addr_stmt = '-d %s/%d' % (dst_addr.network_address,
dst_addr.prefixlen)
return (src_addr_stmt, dst_addr_stmt)
def _GenerateSetName(self, term_name, suffix):
if self.af == 'inet6':
suffix += '-v6'
if len(term_name) + len(suffix) + 1 > self._SET_MAX_LENGTH:
set_name_max_lenth = self._SET_MAX_LENGTH - len(suffix) - 1
term_name = term_name[:set_name_max_lenth]
return '%s-%s' % (term_name, suffix)
class Ipset(iptables.Iptables):
"""Ipset generator."""
_PLATFORM = 'ipset'
_SET_TYPE = 'hash:net'
SUFFIX = '.ips'
_TERM = Term
_MARKER_BEGIN = '# begin:ipset-rules'
_MARKER_END = '# end:ipset-rules'
_GOOD_OPTIONS = ['nostate', 'abbreviateterms', 'truncateterms', 'noverbose',
'exists']
# TODO(vklimovs): some not trivial processing is happening inside this
# __str__, replace with explicit method
def __str__(self):
# Actual rendering happens in __str__, so it has to be called
# before we do set specific part.
iptables_output = super().__str__()
output = []
output.append(self._MARKER_BEGIN)
for (_, _, _, _, terms) in self.iptables_policies:
for term in terms:
output.extend(self._GenerateSetConfig(term))
output.append(self._MARKER_END)
output.append(iptables_output)
return '\n'.join(output)
def _GenerateSetConfig(self, term):
"""Generates set configuration for supplied term.
Args:
term: input term.
Returns:
string that is configuration of supplied term.
"""
output = []
c_str = 'create'
a_str = 'add'
if 'exists' in self.filter_options:
c_str = c_str + ' -exist'
a_str = a_str + ' -exist'
for direction in sorted(term.addr_sets, reverse=True):
set_name, addr_list = term.addr_sets[direction]
set_hashsize = 1 << len(addr_list).bit_length()
set_maxelem = set_hashsize
output.append('%s %s %s family %s hashsize %i maxelem %i' %
(c_str,
set_name,
self._SET_TYPE,
term.af,
set_hashsize,
set_maxelem))
for address in addr_list:
output.append('%s %s %s' % (a_str, set_name, address))
return output
| [((41, 20, 42, 51), 'string.Template', 'string.Template', ({(42, 6, 42, 50): '"""-A $filter -m comment --comment "$comment\\""""'}, {}), '(\'-A $filter -m comment --comment "$comment"\')', False, 'import string\n'), ((43, 23, 43, 52), 'string.Template', 'string.Template', ({(43, 39, 43, 51): '"""-A $filter"""'}, {}), "('-A $filter')", False, 'import string\n'), ((113, 18, 113, 68), 'capirca.lib.nacaddr.ExcludeAddrs', 'nacaddr.ExcludeAddrs', ({(113, 39, 113, 48): 'addr_list', (113, 50, 113, 67): 'addr_exclude_list'}, {}), '(addr_list, addr_exclude_list)', False, 'from capirca.lib import nacaddr\n')] |
zhut19/straxen | straxen/analyses/records_matrix.py | 20dea986790ef168ba7052d652a7aa19ab836943 | import warnings
import numba
import numpy as np
import strax
import straxen
DEFAULT_MAX_SAMPLES = 20_000
@straxen.mini_analysis(requires=('records',),
warn_beyond_sec=10,
default_time_selection='touching')
def records_matrix(records, time_range, seconds_range, config, to_pe,
max_samples=DEFAULT_MAX_SAMPLES,
ignore_max_sample_warning=False):
"""Return (wv_matrix, times, pms)
- wv_matrix: (n_samples, n_pmt) array with per-PMT waveform intensity in PE/ns
- times: time labels in seconds (corr. to rows)
- pmts: PMT numbers (corr. to columns)
Both times and pmts have one extra element.
:param max_samples: Maximum number of time samples. If window and dt
conspire to exceed this, waveforms will be downsampled.
:param ignore_max_sample_warning: If True, suppress warning when this happens.
Example:
wvm, ts, ys = st.records_matrix(run_id, seconds_range=(1., 1.00001))
plt.pcolormesh(ts, ys, wvm.T,
norm=matplotlib.colors.LogNorm())
plt.colorbar(label='Intensity [PE / ns]')
"""
if len(records):
dt = records[0]['dt']
samples_per_record = len(records[0]['data'])
else:
# Defaults here do not matter, nothing will be plotted anyway
dt = 10, 110
record_duration = samples_per_record * dt
window = time_range[1] - time_range[0]
if window / dt > max_samples:
with np.errstate(divide='ignore', invalid='ignore'):
# Downsample. New dt must be
# a) multiple of old dt
dts = np.arange(0, record_duration + dt, dt).astype(np.int)
# b) divisor of record duration
dts = dts[record_duration / dts % 1 == 0]
# c) total samples < max_samples
dts = dts[window / dts < max_samples]
if len(dts):
# Pick lowest dt that satisfies criteria
dt = dts.min()
else:
# Records will be downsampled to single points
dt = max(record_duration, window // max_samples)
if not ignore_max_sample_warning:
warnings.warn(f"Matrix would exceed max_samples {max_samples}, "
f"downsampling to dt = {dt} ns.")
wvm = _records_to_matrix(
records,
t0=time_range[0],
n_channels=config['n_tpc_pmts'],
dt=dt,
window=window)
wvm = wvm.astype(np.float32) * to_pe.reshape(1, -1) / dt
# Note + 1, so data for sample 0 will range from 0-1 in plot
ts = (np.arange(wvm.shape[0] + 1) * dt / int(1e9) + seconds_range[0])
ys = np.arange(wvm.shape[1] + 1)
return wvm, ts, ys
@straxen.mini_analysis(requires=('raw_records',),
warn_beyond_sec=3e-3,
default_time_selection='touching')
def raw_records_matrix(context, run_id, raw_records, time_range,
ignore_max_sample_warning=False,
max_samples=DEFAULT_MAX_SAMPLES,
**kwargs):
# Convert raw to records. We may not be able to baseline correctly
# at the start of the range due to missing zeroth fragments
records = strax.raw_to_records(raw_records)
strax.baseline(records, allow_sloppy_chunking=True)
strax.zero_out_of_bounds(records)
return context.records_matrix(run_id=run_id,
records=records,
time_range=time_range,
max_samples=max_samples,
ignore_max_sample_warning=ignore_max_sample_warning,
**kwargs)
@numba.njit
def _records_to_matrix(records, t0, window, n_channels, dt=10):
n_samples = (window // dt) + 1
# Use 32-bit integers, so downsampling saturated samples doesn't
# cause wraparounds
# TODO: amplitude bit shift!
y = np.zeros((n_samples, n_channels),
dtype=np.int32)
if not len(records):
return y
samples_per_record = len(records[0]['data'])
for r in records:
if r['channel'] > n_channels:
continue
if dt >= samples_per_record * r['dt']:
# Downsample to single sample -> store area
idx = (r['time'] - t0) // dt
if idx >= len(y):
print(len(y), idx)
raise IndexError('Despite n_samples = window // dt + 1, our '
'idx is too high?!')
y[idx, r['channel']] += r['area']
continue
# Assume out-of-bounds data has been zeroed, so we do not
# need to do r['data'][:r['length']] here.
# This simplifies downsampling.
w = r['data'].astype(np.int32)
if dt > r['dt']:
# Downsample
duration = samples_per_record * r['dt']
assert duration % dt == 0, "Cannot downsample fractionally"
# .astype here keeps numba happy ... ??
w = w.reshape(duration // dt, -1).sum(axis=1).astype(np.int32)
elif dt < r['dt']:
raise ValueError("Upsampling not yet implemented")
(r_start, r_end), (y_start, y_end) = strax.overlap_indices(
r['time'] // dt, len(w),
t0 // dt, n_samples)
# += is paranoid, data in individual channels should not overlap
# but... https://github.com/AxFoundation/strax/issues/119
y[y_start:y_end, r['channel']] += w[r_start:r_end]
return y
| [((12, 1, 14, 57), 'straxen.mini_analysis', 'straxen.mini_analysis', (), '', False, 'import straxen\n'), ((77, 1, 79, 57), 'straxen.mini_analysis', 'straxen.mini_analysis', (), '', False, 'import straxen\n'), ((72, 9, 72, 36), 'numpy.arange', 'np.arange', ({(72, 19, 72, 35): 'wvm.shape[1] + 1'}, {}), '(wvm.shape[1] + 1)', True, 'import numpy as np\n'), ((86, 14, 86, 47), 'strax.raw_to_records', 'strax.raw_to_records', ({(86, 35, 86, 46): 'raw_records'}, {}), '(raw_records)', False, 'import strax\n'), ((87, 4, 87, 55), 'strax.baseline', 'strax.baseline', (), '', False, 'import strax\n'), ((88, 4, 88, 37), 'strax.zero_out_of_bounds', 'strax.zero_out_of_bounds', ({(88, 29, 88, 36): 'records'}, {}), '(records)', False, 'import strax\n'), ((104, 8, 105, 32), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((44, 13, 44, 59), 'numpy.errstate', 'np.errstate', (), '', True, 'import numpy as np\n'), ((59, 12, 60, 59), 'warnings.warn', 'warnings.warn', ({(59, 26, 60, 58): 'f"""Matrix would exceed max_samples {max_samples}, downsampling to dt = {dt} ns."""'}, {}), "(\n f'Matrix would exceed max_samples {max_samples}, downsampling to dt = {dt} ns.'\n )", False, 'import warnings\n'), ((71, 10, 71, 37), 'numpy.arange', 'np.arange', ({(71, 20, 71, 36): '(wvm.shape[0] + 1)'}, {}), '(wvm.shape[0] + 1)', True, 'import numpy as np\n'), ((47, 18, 47, 56), 'numpy.arange', 'np.arange', ({(47, 28, 47, 29): '0', (47, 31, 47, 51): 'record_duration + dt', (47, 53, 47, 55): 'dt'}, {}), '(0, record_duration + dt, dt)', True, 'import numpy as np\n')] |
entropyx/fiduchain-blockchain-interface | bdbc/lib/python3.5/site-packages/bigchaindb_driver/crypto.py | 07336a5eebfaa9cddb148edb94461a8fd57562b1 | from collections import namedtuple
from cryptoconditions import crypto
CryptoKeypair = namedtuple('CryptoKeypair', ('signing_key', 'verifying_key'))
def generate_keypair():
"""Generates a cryptographic key pair.
Returns:
:class:`~bigchaindb_driver.crypto.CryptoKeypair`: A
:obj:`collections.namedtuple` with named fields
:attr:`~bigchaindb_driver.crypto.CryptoKeypair.signing_key` and
:attr:`~bigchaindb_driver.crypto.CryptoKeypair.verifying_key`.
"""
return CryptoKeypair(
*(k.decode() for k in crypto.ed25519_generate_key_pair()))
| [((6, 16, 6, 77), 'collections.namedtuple', 'namedtuple', ({(6, 27, 6, 42): '"""CryptoKeypair"""', (6, 44, 6, 76): "('signing_key', 'verifying_key')"}, {}), "('CryptoKeypair', ('signing_key', 'verifying_key'))", False, 'from collections import namedtuple\n'), ((20, 30, 20, 64), 'cryptoconditions.crypto.ed25519_generate_key_pair', 'crypto.ed25519_generate_key_pair', ({}, {}), '()', False, 'from cryptoconditions import crypto\n')] |
mnoorenberghe/reviewboard | reviewboard/webapi/resources/change.py | b8ba9d662c250cb5ec704a50f619adbf3be8cbf0 | from __future__ import unicode_literals
from django.utils import six
from djblets.util.decorators import augment_method_from
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.reviews.fields import get_review_request_field
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
class ChangeResource(MarkdownFieldsMixin, WebAPIResource):
"""Provides information on a change made to a public review request.
A change includes, optionally, text entered by the user describing the
change, and also includes a list of fields that were changed on the
review request.
The list of fields changed are in ``fields_changed``. The keys are the
names of the fields, and the values are details on that particular
change to the field.
For ``summary``, ``description``, ``testing_done`` and ``branch`` fields,
the following detail keys will be available:
* ``old``: The old value of the field.
* ``new``: The new value of the field.
For ``diff`` fields:
* ``added``: The diff that was added.
For ``bugs_closed`` fields:
* ``old``: A list of old bugs.
* ``new``: A list of new bugs.
* ``removed``: A list of bugs that were removed, if any.
* ``added``: A list of bugs that were added, if any.
For ``file_attachments``, ``screenshots``, ``target_people`` and
``target_groups`` fields:
* ``old``: A list of old items.
* ``new``: A list of new items.
* ``removed``: A list of items that were removed, if any.
* ``added``: A list of items that were added, if any.
For ``screenshot_captions`` and ``file_captions`` fields:
* ``old``: The old caption.
* ``new``: The new caption.
* ``screenshot``: The screenshot that was updated.
"""
added_in = '1.6'
model = ChangeDescription
name = 'change'
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the change description.',
},
'fields_changed': {
'type': dict,
'description': 'The fields that were changed.',
},
'text': {
'type': six.text_type,
'description': 'The description of the change written by the '
'submitter.',
'supports_text_types': True,
},
'text_type': {
'type': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The mode for the text field.',
'added_in': '2.0',
},
'timestamp': {
'type': six.text_type,
'description': 'The date and time that the change was made '
'(in YYYY-MM-DD HH:MM:SS format).',
},
}
uri_object_key = 'change_id'
model_parent_key = 'review_request'
allowed_methods = ('GET',)
mimetype_list_resource_name = 'review-request-changes'
mimetype_item_resource_name = 'review-request-change'
def serialize_fields_changed_field(self, obj, **kwargs):
review_request = obj.review_request.get()
fields_changed = {}
for field_name, data in six.iteritems(obj.fields_changed):
field_cls = get_review_request_field(field_name)
field = field_cls(review_request)
fields_changed[field.field_id] = field.serialize_change_entry(obj)
return fields_changed
def has_access_permissions(self, request, obj, *args, **kwargs):
return obj.review_request.get().is_accessible_by(request.user)
def get_queryset(self, request, *args, **kwargs):
review_request = resources.review_request.get_object(
request, *args, **kwargs)
return review_request.changedescs.filter(public=True)
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Returns a list of changes made on a review request."""
pass
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Returns the information on a change to a review request."""
pass
change_resource = ChangeResource()
| [((114, 5, 114, 40), 'djblets.util.decorators.augment_method_from', 'augment_method_from', ({(114, 25, 114, 39): 'WebAPIResource'}, {}), '(WebAPIResource)', False, 'from djblets.util.decorators import augment_method_from\n'), ((120, 5, 120, 40), 'djblets.util.decorators.augment_method_from', 'augment_method_from', ({(120, 25, 120, 39): 'WebAPIResource'}, {}), '(WebAPIResource)', False, 'from djblets.util.decorators import augment_method_from\n'), ((96, 32, 96, 65), 'django.utils.six.iteritems', 'six.iteritems', ({(96, 46, 96, 64): 'obj.fields_changed'}, {}), '(obj.fields_changed)', False, 'from django.utils import six\n'), ((108, 25, 109, 37), 'reviewboard.webapi.resources.resources.review_request.get_object', 'resources.review_request.get_object', ({(109, 12, 109, 19): 'request', (109, 21, 109, 26): '*args'}, {}), '(request, *args, **kwargs)', False, 'from reviewboard.webapi.resources import resources\n'), ((97, 24, 97, 60), 'reviewboard.reviews.fields.get_review_request_field', 'get_review_request_field', ({(97, 49, 97, 59): 'field_name'}, {}), '(field_name)', False, 'from reviewboard.reviews.fields import get_review_request_field\n')] |
heminsatya/free_notes | controllers/notes/NewNote.py | 88272a34c48e60d1a82e28b0b2d56883fa724bb3 | # Dependencies
from aurora import Controller, View, Forms
from models import Users, Notes
from aurora.security import login_required, get_session
from flask import request
from datetime import datetime
# The controller class
class NewNote(Controller):
# POST Method
@login_required(app='users')
def post(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes()
# Form data
data = request.form
form = Forms(data)
# Valid form data
if form.validate():
# Collect form inputs
title = data.get('title')
content = data.get('content')
# Required fields
if not title or not content:
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# Everything is fine
# Insert new note into the database
data = {
'user_id': user['id'],
'title': title,
'content': content,
# 'date': datetime.now().strftime("%m-%d-%Y")
}
notes.create(data=data)
# Return the result
return {
'success': '<i class="fas fa-check-circle mr-1"></i> The new note created successfully!',
}, 200
# Invalid form data
else:
# Return the result
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# GET Method
@login_required(app='users')
def get(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes().read(where={'user_id':user['id']}, order_by={'id':'DESC'}).all()
form = Forms()
return View('create', user=user, form=form)
| [((12, 5, 12, 32), 'aurora.security.login_required', 'login_required', (), '', False, 'from aurora.security import login_required, get_session\n'), ((58, 5, 58, 32), 'aurora.security.login_required', 'login_required', (), '', False, 'from aurora.security import login_required, get_session\n'), ((16, 16, 16, 23), 'models.Notes', 'Notes', ({}, {}), '()', False, 'from models import Users, Notes\n'), ((20, 15, 20, 26), 'aurora.Forms', 'Forms', ({(20, 21, 20, 25): 'data'}, {}), '(data)', False, 'from aurora import Controller, View, Forms\n'), ((64, 15, 64, 22), 'aurora.Forms', 'Forms', ({}, {}), '()', False, 'from aurora import Controller, View, Forms\n'), ((66, 15, 66, 51), 'aurora.View', 'View', (), '', False, 'from aurora import Controller, View, Forms\n'), ((15, 15, 15, 22), 'models.Users', 'Users', ({}, {}), '()', False, 'from models import Users, Notes\n'), ((61, 15, 61, 22), 'models.Users', 'Users', ({}, {}), '()', False, 'from models import Users, Notes\n'), ((62, 16, 62, 23), 'models.Notes', 'Notes', ({}, {}), '()', False, 'from models import Users, Notes\n'), ((15, 46, 15, 65), 'aurora.security.get_session', 'get_session', ({(15, 58, 15, 64): '"""user"""'}, {}), "('user')", False, 'from aurora.security import login_required, get_session\n'), ((61, 46, 61, 65), 'aurora.security.get_session', 'get_session', ({(61, 58, 61, 64): '"""user"""'}, {}), "('user')", False, 'from aurora.security import login_required, get_session\n')] |
udayraj-gupta/ga-learner-dsmp-repo | EDA-&-Data-Preprocessing/code.py | 90b16345fb3fd4f6f4f201012995eea7ff1e73e9 | # --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())*100
missing_data = pd.concat([total_null,percent_null],axis=1,keys=['Total','Percentage'])
print(missing_data)
data = data.dropna()
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())*100
missing_data_1 = pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Percentage'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
a = sns.catplot(x='Category',y='Rating',data=data, kind="box", height = 10)
a.set_xticklabels(rotation=90)
a.set_titles('Rating vs Category [BoxPlot]')
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
le = LabelEncoder()
#data['Installs'] = data['Installs'].str.replace(',','').str.replace('+','')
data['Installs'] = data['Installs'].apply(lambda x : x.replace(',','')).apply(lambda x : x.replace('+',''))
data['Installs'] =data['Installs'].astype(int)
print(data['Installs'])
data['Installs'] = le.fit_transform(data['Installs'])
a = sns.regplot(x="Installs", y="Rating" , data=data)
a.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import seaborn as sns
#Code starts here
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].apply(lambda x : x.replace('$',''))
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].astype(float)
#le=LabelEncoder()
#data['Installs'] = le.fit_transform(data['Installs'])
y=sns.regplot(data=data,x='Price',y='Rating')
y.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
data['Genres']=data['Genres'].str.split(';').str[0]
#print(data['Genres'])
df=data[['Genres','Rating']]
gr_mean=df.groupby(['Genres'],as_index=False).mean()
gr_mean=gr_mean.sort_values(by=['Rating'])
gr_mean=pd.DataFrame(gr_mean)
print(gr_mean)#,gr_mean[-1,:])
#Code ends heree
# --------------
#Code starts here
import seaborn as sns
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
print(data['Last Updated'].max())
max_date=data['Last Updated'].max()
data['Last Updated Days']=max_date-data['Last Updated']
data['Last Updated Days']=data['Last Updated Days'].dt.days
sns.regplot(data=data,x='Last Updated Days',y='Rating').set_title('Rating vs Last Updated [RegPlot]')
#Code ends here
| [((9, 7, 9, 24), 'pandas.read_csv', 'pd.read_csv', ({(9, 19, 9, 23): 'path'}, {}), '(path)', True, 'import pandas as pd\n'), ((21, 15, 21, 86), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((27, 17, 27, 92), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((36, 4, 36, 75), 'seaborn.catplot', 'sns.catplot', (), '', True, 'import seaborn as sns\n'), ((48, 5, 48, 19), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ({}, {}), '()', False, 'from sklearn.preprocessing import MinMaxScaler, LabelEncoder\n'), ((54, 4, 54, 53), 'seaborn.regplot', 'sns.regplot', (), '', True, 'import seaborn as sns\n'), ((75, 2, 75, 45), 'seaborn.regplot', 'sns.regplot', (), '', True, 'import seaborn as sns\n'), ((92, 8, 92, 29), 'pandas.DataFrame', 'pd.DataFrame', ({(92, 21, 92, 28): 'gr_mean'}, {}), '(gr_mean)', True, 'import pandas as pd\n'), ((102, 23, 102, 59), 'pandas.to_datetime', 'pd.to_datetime', ({(102, 38, 102, 58): "data['Last Updated']"}, {}), "(data['Last Updated'])", True, 'import pandas as pd\n'), ((108, 0, 108, 55), 'seaborn.regplot', 'sns.regplot', (), '', True, 'import seaborn as sns\n')] |
rguan-uoft/OpenPNM | openpnm/algorithms/ChargeConservation.py | b3873d35270b0acaad019264368d0055c677d159 | import numpy as np
from openpnm.algorithms import ReactiveTransport
from openpnm.models.physics import generic_source_term as gst
from openpnm.utils import logging
logger = logging.getLogger(__name__)
class ChargeConservation(ReactiveTransport):
r"""
A class to enforce charge conservation in ionic transport simulations.
Parameters
----------
network : OpenPNM Network object
The network on which this algorithm operates
project : OpenPNM Project object
Either a network or a project must be specified
name : string, optional
A unique name to give the object for easier identification. If not
given, one is generated.
"""
def __init__(self, settings={}, phase=None, **kwargs):
def_set = {'phase': None,
'quantity': 'pore.potential',
'conductance': 'throat.ionic_conductance',
'charge_conservation': 'electroneutrality',
'gui': {'setup': {'phase': None,
'quantity': '',
'conductance': '',
'charge_conservation': ''},
'set_rate_BC': {'pores': None,
'values': None},
'set_value_BC': {'pores': None,
'values': None},
'set_source': {'pores': None,
'propname': ''}
}
}
super().__init__(**kwargs)
self.settings.update(def_set)
self.settings.update(settings)
if phase is not None:
self.setup(phase=phase)
def setup(self, phase=None, quantity='', conductance='',
charge_conservation=None, **kwargs):
r"""
This method takes several arguments that are essential to running the
algorithm and adds them to the settings.
Parameters
----------
phase : OpenPNM Phase object
The phase on which the algorithm is to be run.
quantity : string
(default is ``'pore.mole_fraction'``) The name of the physical
quantity to be calculated.
conductance : string
(default is ``'throat.diffusive_conductance'``) The name of the
pore-scale transport conductance values. These are typically
calculated by a model attached to a *Physics* object associated
with the given *Phase*.
charge_conservation : string
The assumption adopted to enforce charge conservation when
performing ions transport simulations (default is
"electroneutrality").
Notes
-----
Any additional arguments are added to the ``settings`` dictionary of
the object.
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity
if conductance:
self.settings['conductance'] = conductance
if charge_conservation:
self.settings['charge_conservation'] = charge_conservation
super().setup(**kwargs)
def _charge_conservation_eq_source_term(self, e_alg):
# Source term for Poisson or charge conservation (electroneutrality) eq
phase = self.project.phases()[self.settings['phase']]
Ps = (self['pore.all'] * np.isnan(self['pore.bc_value']) *
np.isnan(self['pore.bc_rate']))
mod = gst.charge_conservation
phys = self.project.find_physics(phase=phase)
phys[0].add_model(propname='pore.charge_conservation', model=mod,
phase=phase, p_alg=self, e_alg=e_alg,
assumption=self.settings['charge_conservation'])
self.set_source(propname='pore.charge_conservation', pores=Ps)
| [((5, 9, 5, 36), 'openpnm.utils.logging.getLogger', 'logging.getLogger', ({(5, 27, 5, 35): '__name__'}, {}), '(__name__)', False, 'from openpnm.utils import logging\n'), ((94, 14, 94, 44), 'numpy.isnan', 'np.isnan', ({(94, 23, 94, 43): "self['pore.bc_rate']"}, {}), "(self['pore.bc_rate'])", True, 'import numpy as np\n'), ((93, 33, 93, 64), 'numpy.isnan', 'np.isnan', ({(93, 42, 93, 63): "self['pore.bc_value']"}, {}), "(self['pore.bc_value'])", True, 'import numpy as np\n')] |
Kosinkadink/jno | jno/commands/upload.py | 773806dd737c1ef0b0a89a7e4086da9c2c1260c1 | from jno.util import interpret_configs
from jno.util import run_arduino_process
from jno.util import create_build_directory
from jno.util import get_common_parameters
from jno.util import verify_arduino_dir
from jno.util import verify_and_get_port
from jno.util import JnoException
from jno.commands.command import Command
import getopt
from colorama import Fore
class Upload(Command):
help_name = "Upload"
help_usage = "jno upload [-b, --board=] boardname [-p, --ports=] port [-v, --verbose]"
help_description = "Runs build and uploads to board. Without arguments, uses board/port defined locally/globally. " \
"If port is not defined, uses first available port. With -v, more info will be displayed during upload."
def run(self,argv,location):
jno_dict = interpret_configs()
verify_arduino_dir(jno_dict)
create_build_directory(jno_dict)
arg_list = self.perform_upload(argv,jno_dict)
run_arduino_process(arg_list)
# Create argument list for arduino build
def perform_upload(self,argv,jno_dict):
# assemble command query
# GOAL: <arduino exec> --upload <script> --board <board> --port <serial>
arg_list = [jno_dict["EXEC_SCRIPT"]]
# add common params - set pref
arg_list.extend(get_common_parameters(jno_dict))
# add upload params
arg_list.append("--upload")
arg_list.append(jno_dict["SKETCH_INO"])
try:
opts,args = getopt.getopt(argv, 'b:p:v',['board=','port=','verbose'])
except getopt.GetoptError as e:
raise JnoException(str(e))
for opt, arg in opts:
if opt in ("-b","--board"):
jno_dict["board"] = arg.strip()
elif opt in ("-p","--port"):
jno_dict["port"] = arg.strip()
elif opt in ("-v","--verbose"):
arg_list.append("--verbose")
# verify port or get first available
port = verify_and_get_port(jno_dict["port"])
if not port:
if jno_dict["port"] == "DEFAULT":
raise JnoException("no ports available")
raise JnoException("port does not exist: {}".format(jno_dict["port"]))
else:
if jno_dict["port"] == "DEFAULT":
print("{1}No port provided, using available port {0}{2}".format(port,Fore.YELLOW,Fore.RESET))
# add board params
arg_list.append("--board")
arg_list.append(self.formatBoard(jno_dict["board"],jno_dict))
# add port params
arg_list.append("--port")
arg_list.append(port)
return arg_list
| [((21, 13, 21, 32), 'jno.util.interpret_configs', 'interpret_configs', ({}, {}), '()', False, 'from jno.util import interpret_configs\n'), ((22, 2, 22, 30), 'jno.util.verify_arduino_dir', 'verify_arduino_dir', ({(22, 21, 22, 29): 'jno_dict'}, {}), '(jno_dict)', False, 'from jno.util import verify_arduino_dir\n'), ((23, 2, 23, 34), 'jno.util.create_build_directory', 'create_build_directory', ({(23, 25, 23, 33): 'jno_dict'}, {}), '(jno_dict)', False, 'from jno.util import create_build_directory\n'), ((25, 2, 25, 31), 'jno.util.run_arduino_process', 'run_arduino_process', ({(25, 22, 25, 30): 'arg_list'}, {}), '(arg_list)', False, 'from jno.util import run_arduino_process\n'), ((50, 9, 50, 46), 'jno.util.verify_and_get_port', 'verify_and_get_port', ({(50, 29, 50, 45): "jno_dict['port']"}, {}), "(jno_dict['port'])", False, 'from jno.util import verify_and_get_port\n'), ((33, 18, 33, 49), 'jno.util.get_common_parameters', 'get_common_parameters', ({(33, 40, 33, 48): 'jno_dict'}, {}), '(jno_dict)', False, 'from jno.util import get_common_parameters\n'), ((39, 15, 39, 72), 'getopt.getopt', 'getopt.getopt', ({(39, 29, 39, 33): 'argv', (39, 35, 39, 42): '"""b:p:v"""', (39, 43, 39, 71): "['board=', 'port=', 'verbose']"}, {}), "(argv, 'b:p:v', ['board=', 'port=', 'verbose'])", False, 'import getopt\n'), ((53, 10, 53, 44), 'jno.util.JnoException', 'JnoException', ({(53, 23, 53, 43): '"""no ports available"""'}, {}), "('no ports available')", False, 'from jno.util import JnoException\n')] |
rizwan09/hydra-sum | modelling/inference_multi_attribute.py | 42088dde4e2b109fdb222ad4c329ca7bbfe9db2f | import argparse
import json
import logging
import os
import torch
from transformers.file_utils import ModelOutput
from typing import Dict, Optional, Tuple
from torch.utils.data import DataLoader, SequentialSampler
from transformers.modeling_outputs import Seq2SeqLMOutput
import train_seq2seq_utils
import single_head_utils
import multi_head_utils
from torch import nn
from generation_utils_multi_attribute import GenerationMixinCustomCombined
from transformers import (
PreTrainedModel,
PreTrainedTokenizer,
BartConfig,
BartTokenizer
)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {"bart_mult_heads_2": (BartConfig,
multi_head_utils.ConditionalGenerationCustomBartMultHeads,
BartTokenizer),
}
class Seq2SeqLMOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values_1: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
past_key_values_2: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
class BartModelCombined(GenerationMixinCustomCombined, nn.Module):
def __init__(self, model1, model2, config: BartConfig):
super().__init__()
self.model1 = model1
self.model2 = model2
self.config = config
self.device = model2.device
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs_1=None,
encoder_outputs_2=None,
past_key_values_1=None,
past_key_values_2=None,
inputs_embeds=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=None,
use_mixed=False,
use_head_1=0,
use_head_2=0,
gate_prob=0.5,
):
args1 = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_input_ids': decoder_input_ids,
'decoder_attention_mask': decoder_attention_mask,
'head_mask': head_mask,
'decoder_head_mask': decoder_head_mask,
'cross_attn_head_mask': cross_attn_head_mask,
'encoder_outputs': encoder_outputs_1,
'past_key_values': past_key_values_1,
'inputs_embeds': inputs_embeds,
'use_cache': use_cache,
'output_attentions': False,
'output_hidden_states': False,
'return_dict': None,
'use_mixed': False,
'use_head': use_head_1,
}
out1 = self.model1(**args1)
softmax_0 = torch.exp(out1.logits)
args2 = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_input_ids': decoder_input_ids,
'decoder_attention_mask': decoder_attention_mask,
'head_mask': head_mask,
'decoder_head_mask': decoder_head_mask,
'cross_attn_head_mask': cross_attn_head_mask,
'encoder_outputs': encoder_outputs_2,
'past_key_values': past_key_values_2,
'inputs_embeds': inputs_embeds,
'use_cache': use_cache,
'output_attentions': output_attentions,
'output_hidden_states': output_hidden_states,
'return_dict': None,
'use_mixed': False,
'use_head': use_head_2,
}
out2 = self.model2(**args2)
softmax_1 = torch.exp(out2.logits)
softmax_0 = softmax_0 * gate_prob
softmax_1 = softmax_1 * (1 - gate_prob)
lm_logits = torch.log(softmax_0 + softmax_1)
return_output = Seq2SeqLMOutput(
logits=lm_logits,
past_key_values_1=out1.past_key_values,
past_key_values_2=out2.past_key_values)
return return_output
# unchanged
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_1=None,
past_2=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs_1=None,
encoder_outputs_2=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past_1 is not None and past_2 is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs_1": encoder_outputs_1,
"encoder_outputs_2": encoder_outputs_2,
"past_key_values_1": past_1,
"past_key_values_2": past_2,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def load_model(path):
args = json.load(open(path))
config_class, model_class = BartConfig, multi_head_utils.ConditionalGenerationCustomBartMultHeads
config = config_class.from_pretrained(args['path'])
model = model_class.from_pretrained(
args['path'],
from_tf=bool(".ckpt" in args['path']),
config=config)
return model, args, config
def evaluate(args, eval_dataset, model: PreTrainedModel, args1, args2, tokenizer: PreTrainedTokenizer,
suffix="") -> Dict:
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
if args.generate:
f_out = open(os.path.join(eval_output_dir, 'test_out%s.txt' % suffix), 'w')
print(eval_output_dir)
k = 0
with torch.no_grad():
model.eval()
for batch in eval_dataloader:
batch = tuple(t.to(args.device) for t in batch)
input_ids, input_attention_mask, decoder_ids = batch[0], batch[1], batch[2]
for j in range(input_ids.shape[0]):
gold = tokenizer.decode(decoder_ids[j], skip_special_tokens=True)
input = tokenizer.decode(input_ids[j], skip_special_tokens=True)
input_args = {'input_ids': input_ids[j].unsqueeze(0),
'attention_mask': input_attention_mask[j].unsqueeze(0), 'num_beams': 6,
'length_penalty': 2, 'no_repeat_ngram_size': 3, 'max_length': 200, 'min_length': 12,
'top_k': 30, 'top_p': 0.5, 'do_sample': True,
'decoder_start_token_id': tokenizer.bos_token_id, 'num_return_sequences': 1,
'gate_prob': args.gate_probability, 'use_head_1': args1['use_head'],
'use_head_2': args2['use_head']}
gen = model.generate(**input_args)
gen = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in
gen]
# gen = gen[0]
print(gen[0].strip())
f_out.write(input + '\n')
f_out.write(gold + '\n')
for g in gen:
f_out.write(g.strip() + '\n')
f_out.write('\n')
k += 1
if k > 1000:
break
f_out.close()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_type",
default=None,
type=str,
help="base model, used to load tokenizer",
)
parser.add_argument(
"--model_1_config",
default=None,
type=str,
help="Path to model 1 config",
)
parser.add_argument(
"--model_2_config",
default=None,
type=str,
required=True,
help="Path to model 2 config",
)
parser.add_argument(
"--test_data_file",
default=None,
type=str,
required=True,
help="Evaluation data file to evaluate the perplexity on (a text file).",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--max_seq_length",
default=1024,
type=int,
help="The maximum total input sequence length after tokenization.",
)
parser.add_argument(
"--max_decoder_length",
default=128,
type=int,
help="The maximum total decoder sequence length after tokenization.",
)
parser.add_argument("--per_gpu_eval_batch_size", default=32, type=int, help="Batch size evaluation.", )
parser.add_argument("--gpu_device", type=int, default=0, help="gpu device")
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached data sets", )
# custom flags
parser.add_argument("--generate", action="store_true", help="Generate summaries for dev set", )
parser.add_argument("--dump_posteriors", action="store_true", help="Dump posterior probs at intermediate steps", )
parser.add_argument("--gate_probability", type=float, default=None, help="gate prob")
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
args.n_gpu = 1
device = torch.device("cuda", args.gpu_device)
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
filename=os.path.join(args.output_dir, 'model.log')
)
# Set seed
model1, args1, config = load_model(args.model_1_config)
model1.to(args.device)
model2, args2, _ = load_model(args.model_2_config)
model2.to(args.device)
f_out = open(os.path.join(args.output_dir, 'model_configs.json'), 'w')
json.dump(args1, f_out)
f_out.write('\n')
json.dump(args2, f_out)
f_out.write('\n')
json.dump({'gate_prob': args.gate_probability}, f_out)
f_out.write('\n')
f_out.close()
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
model = BartModelCombined(model1, model2, config)
eval_dataset = train_seq2seq_utils.load_and_cache_examples(args, tokenizer, 'test')
evaluate(args, eval_dataset, model, args1, args2, tokenizer, 'final')
logger.info("Training/evaluation parameters %s", args)
if __name__ == "__main__":
main()
| [((23, 9, 23, 36), 'logging.getLogger', 'logging.getLogger', ({(23, 27, 23, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((170, 19, 170, 50), 'torch.utils.data.SequentialSampler', 'SequentialSampler', ({(170, 37, 170, 49): 'eval_dataset'}, {}), '(eval_dataset)', False, 'from torch.utils.data import DataLoader, SequentialSampler\n'), ((171, 22, 171, 101), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, SequentialSampler\n'), ((225, 13, 225, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((290, 13, 290, 50), 'torch.device', 'torch.device', ({(290, 26, 290, 32): '"""cuda"""', (290, 34, 290, 49): 'args.gpu_device'}, {}), "('cuda', args.gpu_device)", False, 'import torch\n'), ((309, 4, 309, 27), 'json.dump', 'json.dump', ({(309, 14, 309, 19): 'args1', (309, 21, 309, 26): 'f_out'}, {}), '(args1, f_out)', False, 'import json\n'), ((311, 4, 311, 27), 'json.dump', 'json.dump', ({(311, 14, 311, 19): 'args2', (311, 21, 311, 26): 'f_out'}, {}), '(args2, f_out)', False, 'import json\n'), ((313, 4, 313, 58), 'json.dump', 'json.dump', ({(313, 14, 313, 50): "{'gate_prob': args.gate_probability}", (313, 52, 313, 57): 'f_out'}, {}), "({'gate_prob': args.gate_probability}, f_out)", False, 'import json\n'), ((317, 16, 317, 68), 'transformers.BartTokenizer.from_pretrained', 'BartTokenizer.from_pretrained', ({(317, 46, 317, 67): '"""facebook/bart-large"""'}, {}), "('facebook/bart-large')", False, 'from transformers import PreTrainedModel, PreTrainedTokenizer, BartConfig, BartTokenizer\n'), ((320, 19, 320, 87), 'train_seq2seq_utils.load_and_cache_examples', 'train_seq2seq_utils.load_and_cache_examples', ({(320, 63, 320, 67): 'args', (320, 69, 320, 78): 'tokenizer', (320, 80, 320, 86): '"""test"""'}, {}), "(args, tokenizer, 'test')", False, 'import train_seq2seq_utils\n'), ((87, 20, 87, 42), 'torch.exp', 'torch.exp', ({(87, 30, 87, 41): 'out1.logits'}, {}), '(out1.logits)', False, 'import torch\n'), ((108, 20, 108, 42), 'torch.exp', 'torch.exp', ({(108, 30, 108, 41): 'out2.logits'}, {}), '(out2.logits)', False, 'import torch\n'), ((113, 20, 113, 52), 'torch.log', 'torch.log', ({(113, 30, 113, 51): 'softmax_0 + softmax_1'}, {}), '(softmax_0 + softmax_1)', False, 'import torch\n'), ((114, 24, 117, 51), 'transformers.modeling_outputs.Seq2SeqLMOutput', 'Seq2SeqLMOutput', (), '', False, 'from transformers.modeling_outputs import Seq2SeqLMOutput\n'), ((166, 11, 166, 42), 'os.path.exists', 'os.path.exists', ({(166, 26, 166, 41): 'eval_output_dir'}, {}), '(eval_output_dir)', False, 'import os\n'), ((167, 8, 167, 36), 'os.makedirs', 'os.makedirs', ({(167, 20, 167, 35): 'eval_output_dir'}, {}), '(eval_output_dir)', False, 'import os\n'), ((286, 11, 286, 42), 'os.path.exists', 'os.path.exists', ({(286, 26, 286, 41): 'args.output_dir'}, {}), '(args.output_dir)', False, 'import os\n'), ((287, 8, 287, 36), 'os.makedirs', 'os.makedirs', ({(287, 20, 287, 35): 'args.output_dir'}, {}), '(args.output_dir)', False, 'import os\n'), ((308, 17, 308, 68), 'os.path.join', 'os.path.join', ({(308, 30, 308, 45): 'args.output_dir', (308, 47, 308, 67): '"""model_configs.json"""'}, {}), "(args.output_dir, 'model_configs.json')", False, 'import os\n'), ((179, 21, 179, 77), 'os.path.join', 'os.path.join', ({(179, 34, 179, 49): 'eval_output_dir', (179, 51, 179, 76): "'test_out%s.txt' % suffix"}, {}), "(eval_output_dir, 'test_out%s.txt' % suffix)", False, 'import os\n'), ((183, 13, 183, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((298, 17, 298, 59), 'os.path.join', 'os.path.join', ({(298, 30, 298, 45): 'args.output_dir', (298, 47, 298, 58): '"""model.log"""'}, {}), "(args.output_dir, 'model.log')", False, 'import os\n')] |
andkononykhin/plenum | stp_core/common/logging/handlers.py | 28dc1719f4b7e80d31dafbadb38cfec4da949886 | import logging
class CallbackHandler(logging.Handler):
def __init__(self, typestr, default_tags, callback, override_tags):
"""
Initialize the handler.
"""
super().__init__()
self.callback = callback
self.tags = default_tags
self.update_tags(override_tags or {})
self.typestr = typestr
def update_tags(self, override_tags):
self.tags.update(override_tags)
def emit(self, record):
"""
Passes the log record back to the CLI for rendering
"""
should_cb = None
attr_val = None
if hasattr(record, self.typestr):
attr_val = getattr(record, self.typestr)
should_cb = bool(attr_val)
if should_cb is None and record.levelno >= logging.INFO:
should_cb = True
if hasattr(record, 'tags'):
for t in record.tags:
if t in self.tags:
if self.tags[t]:
should_cb = True
continue
else:
should_cb = False
break
if should_cb:
self.callback(record, attr_val)
class CliHandler(CallbackHandler):
def __init__(self, callback, override_tags=None):
default_tags = {
"add_replica": True
}
super().__init__(typestr="cli",
default_tags=default_tags,
callback=callback,
override_tags=override_tags)
class DemoHandler(CallbackHandler):
def __init__(self, callback, override_tags=None):
default_tags = {
"add_replica": True
}
super().__init__(typestr="demo",
default_tags=default_tags,
callback=callback,
override_tags=override_tags)
class TestingHandler(logging.Handler):
def __init__(self, tester):
"""
Initialize the handler.
"""
super().__init__()
self.tester = tester
def emit(self, record):
"""
Captures a record.
"""
self.tester(record)
| [] |
Amohammadi2/django-SPA-blog | blog/migrations/__init__.py | 5dc10894ba360569b4849cfda0c3340ea5a15fb8 | # you just need to add some informations here
| [] |
iqtek/amocrn_asterisk_ng | amocrm_asterisk_ng/crm/amocrm/kernel/calls/call_records/file_converters/core/__init__.py | 429a8d0823b951c855a49c1d44ab0e05263c54dc | from .IFileConverter import IFileConverter
| [] |
cuenca-mx/agave | tests/blueprint/test_decorators.py | d4719bdbab8e200c98d206475df6adb275e9fdcc | from functools import wraps
from agave.blueprints.decorators import copy_attributes
def i_am_test(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.i_am_test = True
return wrapper
class TestResource:
@i_am_test
def retrieve(self) -> str:
return 'hello'
def test_copy_properties_from() -> None:
def retrieve():
...
assert not hasattr(retrieve, 'i_am_test')
retrieve = copy_attributes(TestResource)(retrieve)
assert hasattr(retrieve, 'i_am_test')
| [((7, 5, 7, 16), 'functools.wraps', 'wraps', ({(7, 11, 7, 15): 'func'}, {}), '(func)', False, 'from functools import wraps\n'), ((26, 15, 26, 44), 'agave.blueprints.decorators.copy_attributes', 'copy_attributes', ({(26, 31, 26, 43): 'TestResource'}, {}), '(TestResource)', False, 'from agave.blueprints.decorators import copy_attributes\n')] |
hanhan9449/mace | tools/python/utils/config_parser.py | 63feaf5055bab6a081d36edfab8f963a624899aa | # Copyright 2019 The MACE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import copy
import yaml
from enum import Enum
from utils.util import mace_check
from utils.util import MaceLogger
from py_proto import mace_pb2
CPP_KEYWORDS = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel',
'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t',
'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast',
'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default',
'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit',
'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if',
'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace',
'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'requires', 'return', 'short', 'signed', 'sizeof', 'static',
'static_assert', 'static_cast', 'struct', 'switch', 'synchronized',
'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef',
'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void',
'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final',
'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else',
'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include',
'line', 'error', 'pragma',
]
def sanitize_load(s):
# do not let yaml parse ON/OFF to boolean
for w in ["ON", "OFF", "on", "off"]:
s = re.sub(r":\s+" + w + "$", r": '" + w + "'", s)
# sub ${} to env value
s = re.sub(r"\${(\w+)}", lambda x: os.environ[x.group(1)], s)
return yaml.load(s)
def parse(path):
with open(path) as f:
config = sanitize_load(f.read())
return config
def parse_device_info(path):
conf = parse(path)
return conf["devices"]
class ModelKeys(object):
platform = "platform"
runtime = "runtime"
models = 'models'
graph_optimize_options = "graph_optimize_options"
input_tensors = "input_tensors"
input_shapes = "input_shapes"
input_data_types = "input_data_types"
input_data_formats = "input_data_formats"
input_ranges = "input_ranges"
output_tensors = "output_tensors"
output_shapes = "output_shapes"
output_data_types = "output_data_types"
output_data_formats = "output_data_formats"
check_tensors = "check_tensors"
check_shapes = "check_shapes"
model_file_path = "model_file_path"
model_sha256_checksum = "model_sha256_checksum"
weight_file_path = "weight_file_path"
weight_sha256_checksum = "weight_sha256_checksum"
quantize_range_file = "quantize_range_file"
quantize = "quantize"
quantize_schema = "quantize_schema"
quantize_large_weights = "quantize_large_weights"
quantize_stat = "quantize_stat"
change_concat_ranges = "change_concat_ranges"
winograd = "winograd"
cl_mem_type = "cl_mem_type"
data_type = "data_type"
subgraphs = "subgraphs"
validation_inputs_data = "validation_inputs_data"
class DataFormat(Enum):
NONE = 0
NHWC = 1
NCHW = 2
HWIO = 100
OIHW = 101
HWOI = 102
OHWI = 103
AUTO = 1000
def parse_data_format(str):
str = str.upper()
mace_check(str in [e.name for e in DataFormat],
"unknown data format %s" % str)
return DataFormat[str]
class DeviceType(Enum):
CPU = 0
GPU = 2
HEXAGON = 3
HTA = 4
APU = 5
CPU_GPU = 100
DEVICE_MAP = {
"cpu": DeviceType.CPU,
"gpu": DeviceType.GPU,
"hexagon": DeviceType.HEXAGON,
"dsp": DeviceType.HEXAGON,
"hta": DeviceType.HTA,
"apu": DeviceType.APU,
"cpu+gpu": DeviceType.CPU_GPU
}
def parse_device_type(str):
mace_check(str in DEVICE_MAP, "unknown device %s" % str)
return DEVICE_MAP[str]
class Platform(Enum):
TENSORFLOW = 0
CAFFE = 1
ONNX = 2
MEGENGINE = 3
def parse_platform(str):
str = str.upper()
mace_check(str in [e.name for e in Platform],
"unknown platform %s" % str)
return Platform[str]
DATA_TYPE_MAP = {
'float32': mace_pb2.DT_FLOAT,
'int32': mace_pb2.DT_INT32,
}
def parse_data_type(str):
if str == "float32":
return mace_pb2.DT_FLOAT
elif str == "int32":
return mace_pb2.DT_INT32
else:
mace_check(False, "data type %s not supported" % str)
def parse_internal_data_type(str):
if str == 'fp32_fp32':
return mace_pb2.DT_FLOAT
elif str == 'bf16_fp32':
return mace_pb2.DT_BFLOAT16
else:
return mace_pb2.DT_HALF
def to_list(x):
if isinstance(x, list):
return x
else:
return [x]
def parse_int_array(xs):
if len(xs) is 0:
return [1]
return [int(x) for x in xs.split(",")]
def parse_float_array(xs):
return [float(x) for x in xs.split(",")]
def normalize_model_config(conf):
conf = copy.deepcopy(conf)
if ModelKeys.subgraphs in conf:
subgraph = conf[ModelKeys.subgraphs][0]
del conf[ModelKeys.subgraphs]
conf.update(subgraph)
conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform])
conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime])
if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1:
conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT
else:
if ModelKeys.data_type in conf:
conf[ModelKeys.data_type] = parse_internal_data_type(
conf[ModelKeys.data_type])
else:
conf[ModelKeys.data_type] = mace_pb2.DT_HALF
# parse input
conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_tensors] = [str(i) for i in
conf[ModelKeys.input_tensors]]
input_count = len(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.input_shapes])]
mace_check(
len(conf[ModelKeys.input_shapes]) == input_count,
"input node count and shape count do not match")
input_data_types = [parse_data_type(dt) for dt in
to_list(conf.get(ModelKeys.input_data_types,
["float32"]))]
if len(input_data_types) == 1 and input_count > 1:
input_data_types = [input_data_types[0]] * input_count
mace_check(len(input_data_types) == input_count,
"the number of input_data_types should be "
"the same as input tensors")
conf[ModelKeys.input_data_types] = input_data_types
input_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.input_data_formats,
["NHWC"]))]
if len(input_data_formats) == 1 and input_count > 1:
input_data_formats = [input_data_formats[0]] * input_count
mace_check(len(input_data_formats) == input_count,
"the number of input_data_formats should be "
"the same as input tensors")
conf[ModelKeys.input_data_formats] = input_data_formats
input_ranges = [parse_float_array(r) for r in
to_list(conf.get(ModelKeys.input_ranges,
["-1.0,1.0"]))]
if len(input_ranges) == 1 and input_count > 1:
input_ranges = [input_ranges[0]] * input_count
mace_check(len(input_ranges) == input_count,
"the number of input_ranges should be "
"the same as input tensors")
conf[ModelKeys.input_ranges] = input_ranges
# parse output
conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_tensors] = [str(i) for i in
conf[ModelKeys.output_tensors]]
output_count = len(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.output_shapes])]
mace_check(len(conf[ModelKeys.output_tensors]) == output_count,
"output node count and shape count do not match")
output_data_types = [parse_data_type(dt) for dt in
to_list(conf.get(ModelKeys.output_data_types,
["float32"]))]
if len(output_data_types) == 1 and output_count > 1:
output_data_types = [output_data_types[0]] * output_count
mace_check(len(output_data_types) == output_count,
"the number of output_data_types should be "
"the same as output tensors")
conf[ModelKeys.output_data_types] = output_data_types
output_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.output_data_formats,
["NHWC"]))]
if len(output_data_formats) == 1 and output_count > 1:
output_data_formats = [output_data_formats[0]] * output_count
mace_check(len(output_data_formats) == output_count,
"the number of output_data_formats should be "
"the same as output tensors")
conf[ModelKeys.output_data_formats] = output_data_formats
if ModelKeys.check_tensors in conf:
conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors])
conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.check_shapes])]
mace_check(len(conf[ModelKeys.check_tensors]) == len(
conf[ModelKeys.check_shapes]),
"check tensors count and shape count do not match.")
MaceLogger.summary(conf)
return conf
| [((58, 11, 58, 23), 'yaml.load', 'yaml.load', ({(58, 21, 58, 22): 's'}, {}), '(s)', False, 'import yaml\n'), ((119, 4, 120, 46), 'utils.util.mace_check', 'mace_check', ({(119, 15, 119, 50): '(str in [e.name for e in DataFormat])', (120, 15, 120, 45): "('unknown data format %s' % str)"}, {}), "(str in [e.name for e in DataFormat], 'unknown data format %s' % str)", False, 'from utils.util import mace_check\n'), ((145, 4, 145, 60), 'utils.util.mace_check', 'mace_check', ({(145, 15, 145, 32): '(str in DEVICE_MAP)', (145, 34, 145, 59): "('unknown device %s' % str)"}, {}), "(str in DEVICE_MAP, 'unknown device %s' % str)", False, 'from utils.util import mace_check\n'), ((158, 4, 159, 43), 'utils.util.mace_check', 'mace_check', ({(158, 15, 158, 48): '(str in [e.name for e in Platform])', (159, 15, 159, 42): "('unknown platform %s' % str)"}, {}), "(str in [e.name for e in Platform], 'unknown platform %s' % str)", False, 'from utils.util import mace_check\n'), ((205, 11, 205, 30), 'copy.deepcopy', 'copy.deepcopy', ({(205, 25, 205, 29): 'conf'}, {}), '(conf)', False, 'import copy\n'), ((303, 4, 303, 28), 'utils.util.MaceLogger.summary', 'MaceLogger.summary', ({(303, 23, 303, 27): 'conf'}, {}), '(conf)', False, 'from utils.util import MaceLogger\n'), ((54, 12, 54, 58), 're.sub', 're.sub', ({(54, 19, 54, 36): "':\\\\s+' + w + '$'", (54, 38, 54, 54): '": \'" + w + "\'"', (54, 56, 54, 57): 's'}, {}), '(\':\\\\s+\' + w + \'$\', ": \'" + w + "\'", s)', False, 'import re\n'), ((175, 8, 175, 61), 'utils.util.mace_check', 'mace_check', ({(175, 19, 175, 24): '(False)', (175, 26, 175, 60): "('data type %s not supported' % str)"}, {}), "(False, 'data type %s not supported' % str)", False, 'from utils.util import mace_check\n')] |
sami-ets/DeepNormalize | main_cross_testing_iseg.py | 5ed53280d98a201d45bb9973e79736136273eaea | # -*- coding: utf-8 -*-
# Copyright 2019 Pierre-Luc Delisle. All Rights Reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import multiprocessing
import numpy as np
import os
import random
import torch
import torch.backends.cudnn as cudnn
from kerosene.configs.configs import RunConfiguration, DatasetConfiguration
from kerosene.configs.parsers import YamlConfigurationParser
from kerosene.loggers.visdom import PlotType, PlotFrequency
from kerosene.loggers.visdom.config import VisdomConfiguration
from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData
from kerosene.training.trainers import ModelTrainerFactory
from samitorch.inputs.utils import augmented_sample_collate
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import DataLoader
from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType
from deepNormalize.factories.customModelFactory import CustomModelFactory
from deepNormalize.factories.customTrainerFactory import TrainerFactory
from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory
from deepNormalize.nn.criterions import CustomCriterionFactory
from deepNormalize.utils.constants import *
from deepNormalize.utils.image_slicer import ImageReconstructor
cudnn.benchmark = True
cudnn.enabled = True
np.random.seed(42)
random.seed(42)
if __name__ == '__main__':
# Basic settings
logging.basicConfig(level=logging.INFO)
torch.set_num_threads(multiprocessing.cpu_count())
torch.set_num_interop_threads(multiprocessing.cpu_count())
args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args()
# Create configurations.
run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level)
model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file)
if not isinstance(model_trainer_configs, list):
model_trainer_configs = [model_trainer_configs]
dataset_configs = YamlConfigurationParser.parse_section(args.config_file, "dataset")
dataset_configs = {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()}
data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, "data_augmentation")
config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())),
list(map(lambda config: config.to_html(), model_trainer_configs))]
# Prepare the data.
train_datasets = list()
valid_datasets = list()
test_datasets = list()
reconstruction_datasets = list()
iSEG_train = None
iSEG_CSV = None
MRBrainS_train = None
MRBrainS_CSV = None
ABIDE_train = None
ABIDE_CSV = None
iSEG_augmentation_strategy = None
MRBrainS_augmentation_strategy = None
ABIDE_augmentation_strategy = None
# Initialize the model trainers
model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(),
criterion_factory=CustomCriterionFactory())
model_trainers = model_trainer_factory.create(model_trainer_configs)
if not isinstance(model_trainers, list):
model_trainers = [model_trainers]
# Create datasets
if dataset_configs.get("iSEG", None) is not None:
iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["iSEG"].path,
modalities=dataset_configs["iSEG"].modalities,
dataset_id=ISEG_ID,
test_size=dataset_configs["iSEG"].validation_split,
max_subjects=dataset_configs["iSEG"].max_subjects,
max_num_patches=dataset_configs["iSEG"].max_num_patches,
augment=dataset_configs["iSEG"].augment,
patch_size=dataset_configs["iSEG"].patch_size,
step=dataset_configs["iSEG"].step,
test_patch_size=dataset_configs["iSEG"].test_patch_size,
test_step=dataset_configs["iSEG"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(iSEG_train)
valid_datasets.append(iSEG_valid)
reconstruction_datasets.append(iSEG_reconstruction)
if dataset_configs.get("MRBrainS", None) is not None:
MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["MRBrainS"].path,
modalities=dataset_configs["MRBrainS"].modalities,
dataset_id=MRBRAINS_ID,
test_size=dataset_configs["MRBrainS"].validation_split,
max_subjects=dataset_configs["MRBrainS"].max_subjects,
max_num_patches=dataset_configs["MRBrainS"].max_num_patches,
augment=dataset_configs["MRBrainS"].augment,
patch_size=dataset_configs["MRBrainS"].patch_size,
step=dataset_configs["MRBrainS"].step,
test_patch_size=dataset_configs["MRBrainS"].test_patch_size,
test_step=dataset_configs["MRBrainS"].test_step,
data_augmentation_config=data_augmentation_config)
test_datasets.append(MRBrainS_test)
reconstruction_datasets.append(MRBrainS_reconstruction)
if dataset_configs.get("ABIDE", None) is not None:
ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["ABIDE"].path,
modalities=dataset_configs["ABIDE"].modalities,
dataset_id=ABIDE_ID,
sites=dataset_configs["ABIDE"].sites,
max_subjects=dataset_configs["ABIDE"].max_subjects,
test_size=dataset_configs["ABIDE"].validation_split,
max_num_patches=dataset_configs["ABIDE"].max_num_patches,
augment=dataset_configs["ABIDE"].augment,
patch_size=dataset_configs["ABIDE"].patch_size,
step=dataset_configs["ABIDE"].step,
test_patch_size=dataset_configs["ABIDE"].test_patch_size,
test_step=dataset_configs["ABIDE"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(ABIDE_train)
valid_datasets.append(ABIDE_valid)
test_datasets.append(ABIDE_test)
reconstruction_datasets.append(ABIDE_reconstruction)
if len(list(dataset_configs.keys())) == 2:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
segment=True,
batch_size=8)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
is_ground_truth=True,
batch_size=50)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
else:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
normalize_and_segment=True,
batch_size=4)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0],
ABIDE_reconstruction._target_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
is_ground_truth=True)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
# Concat datasets.
if len(dataset_configs) > 1:
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
valid_dataset = torch.utils.data.ConcatDataset(valid_datasets)
test_dataset = torch.utils.data.ConcatDataset(test_datasets)
else:
train_dataset = train_datasets[0]
valid_dataset = valid_datasets[0]
test_dataset = test_datasets[0]
# Create loaders.
dataloaders = list(map(lambda dataset: DataLoader(dataset,
training_config.batch_size,
sampler=None,
shuffle=True,
num_workers=args.num_workers,
collate_fn=augmented_sample_collate,
drop_last=True,
pin_memory=True),
[train_dataset, valid_dataset, test_dataset]))
# Initialize the loggers.
visdom_config = VisdomConfiguration.from_yml(args.config_file, "visdom")
exp = args.config_file.split("/")[-3:]
if visdom_config.save_destination is not None:
save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1],
os.path.basename(
os.path.normpath(visdom_config.env)))
else:
save_folder = "saves/{}".format(os.path.basename(os.path.normpath(visdom_config.env)))
[os.makedirs("{}/{}".format(save_folder, model), exist_ok=True)
for model in
["Discriminator", "Generator", "Segmenter"]]
visdom_logger = VisdomLogger(visdom_config)
visdom_logger(VisdomData("Experiment", "Experiment Config", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None,
config_html))
visdom_logger(VisdomData("Experiment", "Patch count", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH,
x=[len(iSEG_train) if iSEG_train is not None else 0,
len(MRBrainS_train) if MRBrainS_train is not None else 0,
len(ABIDE_train) if ABIDE_train is not None else 0],
y=["iSEG", "MRBrainS", "ABIDE"], params={"opts": {"title": "Patch count"}}))
trainer = TrainerFactory(training_config.trainer).create(training_config,
model_trainers,
dataloaders,
reconstruction_datasets,
None,
input_reconstructor,
segmentation_reconstructor,
augmented_input_reconstructor,
None,
gt_reconstructor,
run_config,
dataset_configs,
save_folder,
visdom_logger)
trainer.train(training_config.nb_epochs)
| [((46, 0, 46, 18), 'numpy.random.seed', 'np.random.seed', ({(46, 15, 46, 17): '(42)'}, {}), '(42)', True, 'import numpy as np\n'), ((47, 0, 47, 15), 'random.seed', 'random.seed', ({(47, 12, 47, 14): '(42)'}, {}), '(42)', False, 'import random\n'), ((51, 4, 51, 43), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((57, 17, 57, 117), 'kerosene.configs.configs.RunConfiguration', 'RunConfiguration', (), '', False, 'from kerosene.configs.configs import RunConfiguration, DatasetConfiguration\n'), ((58, 45, 58, 92), 'kerosene.configs.parsers.YamlConfigurationParser.parse', 'YamlConfigurationParser.parse', ({(58, 75, 58, 91): 'args.config_file'}, {}), '(args.config_file)', False, 'from kerosene.configs.parsers import YamlConfigurationParser\n'), ((61, 22, 61, 88), 'kerosene.configs.parsers.YamlConfigurationParser.parse_section', 'YamlConfigurationParser.parse_section', ({(61, 60, 61, 76): 'args.config_file', (61, 78, 61, 87): '"""dataset"""'}, {}), "(args.config_file, 'dataset')", False, 'from kerosene.configs.parsers import YamlConfigurationParser\n'), ((63, 31, 63, 107), 'kerosene.configs.parsers.YamlConfigurationParser.parse_section', 'YamlConfigurationParser.parse_section', ({(63, 69, 63, 85): 'args.config_file', (63, 87, 63, 106): '"""data_augmentation"""'}, {}), "(args.config_file, 'data_augmentation')", False, 'from kerosene.configs.parsers import YamlConfigurationParser\n'), ((246, 20, 246, 76), 'kerosene.loggers.visdom.config.VisdomConfiguration.from_yml', 'VisdomConfiguration.from_yml', ({(246, 49, 246, 65): 'args.config_file', (246, 67, 246, 75): '"""visdom"""'}, {}), "(args.config_file, 'visdom')", False, 'from kerosene.loggers.visdom.config import VisdomConfiguration\n'), ((258, 20, 258, 47), 'kerosene.loggers.visdom.visdom.VisdomLogger', 'VisdomLogger', ({(258, 33, 258, 46): 'visdom_config'}, {}), '(visdom_config)', False, 'from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData\n'), ((52, 26, 52, 53), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ({}, {}), '()', False, 'import multiprocessing\n'), ((53, 34, 53, 61), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ({}, {}), '()', False, 'import multiprocessing\n'), ((62, 26, 62, 49), 'kerosene.configs.configs.DatasetConfiguration', 'DatasetConfiguration', ({(62, 47, 62, 48): 'v'}, {}), '(v)', False, 'from kerosene.configs.configs import RunConfiguration, DatasetConfiguration\n'), ((93, 65, 105, 62), 'deepNormalize.inputs.datasets.iSEGSliceDatasetFactory.create_train_valid_test', 'iSEGSliceDatasetFactory.create_train_valid_test', (), '', False, 'from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory\n'), ((111, 81, 123, 62), 'deepNormalize.inputs.datasets.MRBrainSSliceDatasetFactory.create_train_valid_test', 'MRBrainSSliceDatasetFactory.create_train_valid_test', (), '', False, 'from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory\n'), ((128, 69, 141, 62), 'deepNormalize.inputs.datasets.ABIDESliceDatasetFactory.create_train_valid_test', 'ABIDESliceDatasetFactory.create_train_valid_test', (), '', False, 'from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory\n'), ((148, 37, 154, 25), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (), '', False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((155, 30, 160, 26), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (), '', False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((161, 27, 167, 26), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (), '', False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((183, 37, 191, 25), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (), '', False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((192, 30, 198, 26), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (), '', False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((199, 27, 206, 33), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (), '', False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((226, 24, 226, 70), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', ({(226, 55, 226, 69): 'train_datasets'}, {}), '(train_datasets)', False, 'import torch\n'), ((227, 24, 227, 70), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', ({(227, 55, 227, 69): 'valid_datasets'}, {}), '(valid_datasets)', False, 'import torch\n'), ((228, 23, 228, 68), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', ({(228, 54, 228, 67): 'test_datasets'}, {}), '(test_datasets)', False, 'import torch\n'), ((260, 18, 261, 41), 'kerosene.loggers.visdom.visdom.VisdomData', 'VisdomData', ({(260, 29, 260, 41): '"""Experiment"""', (260, 43, 260, 62): '"""Experiment Config"""', (260, 64, 260, 82): 'PlotType.TEXT_PLOT', (260, 84, 260, 109): 'PlotFrequency.EVERY_EPOCH', (260, 111, 260, 115): 'None', (261, 29, 261, 40): 'config_html'}, {}), "('Experiment', 'Experiment Config', PlotType.TEXT_PLOT,\n PlotFrequency.EVERY_EPOCH, None, config_html)", False, 'from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData\n'), ((54, 11, 54, 73), 'deepNormalize.config.parsers.ArgsParserFactory.create_parser', 'ArgsParserFactory.create_parser', ({(54, 43, 54, 72): 'ArgsParserType.MODEL_TRAINING'}, {}), '(ArgsParserType.MODEL_TRAINING)', False, 'from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType\n'), ((85, 62, 85, 82), 'deepNormalize.factories.customModelFactory.CustomModelFactory', 'CustomModelFactory', ({}, {}), '()', False, 'from deepNormalize.factories.customModelFactory import CustomModelFactory\n'), ((86, 66, 86, 90), 'deepNormalize.nn.criterions.CustomCriterionFactory', 'CustomCriterionFactory', ({}, {}), '()', False, 'from deepNormalize.nn.criterions import CustomCriterionFactory\n'), ((169, 44, 178, 83), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (), '', False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((208, 44, 218, 83), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (), '', False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((268, 14, 268, 53), 'deepNormalize.factories.customTrainerFactory.TrainerFactory', 'TrainerFactory', ({(268, 29, 268, 52): 'training_config.trainer'}, {}), '(training_config.trainer)', False, 'from deepNormalize.factories.customTrainerFactory import TrainerFactory\n'), ((235, 43, 242, 70), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data.dataloader import DataLoader\n'), ((253, 57, 253, 92), 'os.path.normpath', 'os.path.normpath', ({(253, 74, 253, 91): 'visdom_config.env'}, {}), '(visdom_config.env)', False, 'import os\n'), ((251, 72, 251, 107), 'os.path.normpath', 'os.path.normpath', ({(251, 89, 251, 106): 'visdom_config.env'}, {}), '(visdom_config.env)', False, 'import os\n')] |
FaBoPlatform/RobotCarAI | docs/10.level3_demo_streaming/pc_server/server.py | c89d3330a2beda0f253733d3252b2b035b153b6b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る
# Server: Jetson TX2
# Client: Jetson TX2/Raspberry Pi3 Docker
# 1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS
# 2. Serverを起動する
# 3. Clientを起動する
# コード修正
# lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある
# lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある
'''
Python 3.6
送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる
ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している
'''
print("wait. launching...")
import socket, select
import time
import cv2
import numpy as np
import time
import os
import sys
import logging
import threading
import numpy as np
from lib.functions import *
from lib.object_detection import ObjectDetection
from lib.opencv_lane_detection import LaneDetection
from lib.webcam import WebcamVideoStream
# ログ設定
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s',
)
# 解析、送信スレッド動作フラグ
is_analyze_running = False
sock = None
out = None
# IPM変換後の画像におけるx,yメートル(黒い部分も含む)
X_METER=1.5
Y_METER=1
# ライン検出クラス
ld = None
# 物体検出クラス
od = None
def do_analyze():
global is_analyze_running
global sock
global out
global X_METER
global Y_METER
global ld
global od
# 映像を保存するかどうか
IS_SAVE = True
OUTPUT_DIR ='./'
OUTPUT_FILENAME = 'received.avi'
HANDLE_ANGLE = 42
frame_counter = 0
fourcc = None
control = None
roi_vertices = None
ipm_vertices = None
speed = None
# 映像準備
camera = WebcamVideoStream()
cols,rows,fps,fourcc = camera.init_webcam()
camera.start()
fps = 1
if IS_SAVE:
out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps, (int(cols), int(rows)))
########################################
# ライン検出準備
########################################
ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows)
while is_analyze_running:
frame_start_time = time.time()
#time.sleep(0.2)
########################################
# 映像取得
########################################
cv_bgr = camera.read()
frame_counter += 1
########################################
# 物体認識
########################################
# avi動画に保存する
if IS_SAVE:
out.write(cv_bgr)
rclasses,rscores,rbboxes = od.get_detection(cv_bgr)
print(rclasses,rscores,rbboxes)
if len(rclasses) > 0:
prediction_class = np.min(rclasses)
if prediction_class == 1:
# 止まれを検出した
is_need_header_receive = True
control='0,0,'
sock.sendall(("CONTROL,"+ control).encode('ascii'))
continue
elif prediction_class == 2:
# 10を検出した
speed = 40
elif prediction_class == 3:
# 20を検出した
speed = 50
elif prediction_class == 4:
# 30を検出した
speed = 60
else:
# 物体検出無し
if speed is None:
speed = 40
handle_angle = 0
########################################
# ライン検出
########################################
ld.cv_bgr = cv_bgr
# ラインを検出する
try:
tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \
meters_from_center = ld.lane_detection()
except:
# ライン検出失敗
is_need_header_receive = True
control='0,0,'
sock.sendall(("CONTROL,"+ control).encode('ascii'))
continue
########################################
# 速度調整を行う
########################################
#if np.abs(angle2_deg) > np.abs(angle1_deg):
# speed = 50
#else:
# speed = 60
'''
左右について
tilt_deg: -が右、+が左
angle_deg: +が右、-が左
meters_from_center: -が右にいる、+が左にいる
handle_angle: +が右、-が左
'''
########################################
# ハンドル角調整を行う
########################################
handle_angle = -1*tilt1_deg
if meters_from_center >= 0:
# 左にいる
if np.abs(meters_from_center)*100 > 20:
# とても離れて左にいる:右に全開で曲がる
handle_angle=HANDLE_ANGLE
elif np.abs(meters_from_center)*100 > 10:
if tilt2_deg > 0 :
# 離れて左いる、奥は左カーブ:右に少し曲がる
handle_angle=HANDLE_ANGLE/2
else:
# 離れて左いる、奥は右カーブ:右に全開で曲がる
handle_angle=HANDLE_ANGLE
else:
# 右にいる
if np.abs(meters_from_center)*100 > 20:
# とても離れて右にいる:左に全開で曲がる
handle_angle=-1*HANDLE_ANGLE
elif np.abs(meters_from_center)*100 > 10:
if tilt2_deg < 0 :
# 離れて右いる、奥は右カーブ:左に少し曲がる
handle_angle=-1*HANDLE_ANGLE/2
else:
# 離れて右いる、奥は左カーブ、左に全開で曲がる
handle_angle=-1*HANDLE_ANGLE
# 動作可能な角度内に調整する
if handle_angle > HANDLE_ANGLE:
handle_angle = HANDLE_ANGLE
if handle_angle < -1*HANDLE_ANGLE:
handle_angle = -1*HANDLE_ANGLE
# 車両制御送信
control=str(speed)+','+str(handle_angle)+','
print("speed={},handle_angle={},CONTROL,{}".format(speed,handle_angle,control))
sock.sendall(("CONTROL,"+ control).encode('ascii'))
frame_end_time = time.time()
print("FPS={}".format(round(1/(frame_end_time-frame_start_time),2)))
def main():
global is_analyze_running
global sock
global out
global ld
global od
# 通信設定
HOST = '192.168.0.77' # Server IP Address
PORT = 6666 # Server TCP Port
#HOST = 'a32158c3da9f' # AWS Docker
#PORT = 8091 # AWS TCP Port
#HOST = '2204f9b0e871' # PC Docker
#PORT = 8091 # PC TCP Port
########################################
# 通信準備
########################################
connected_clients_sockets = []
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen(10)
connected_clients_sockets.append(server_socket)
# Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する
is_need_header_receive = True
########################################
# 物体認識準備
########################################
od = ObjectDetection()
print("Server start")
try:
while True:
########################################
# 受信待ち
########################################
read_sockets, write_sockets, error_sockets = select.select(connected_clients_sockets, [], [])
for sock in read_sockets:
if sock == server_socket:
sockfd, client_address = server_socket.accept()
connected_clients_sockets.append(sockfd)
else:
# ClientがServerにHeaderを送る時は4096 Byte以下にすること
packet = sock.recv(4096)
print(type(packet))
#
if is_need_header_receive:
print('header')
packet = packet.decode('ascii')
txt = str(packet)
if packet:
print('packet True')
if packet == 'START':
is_analyze_running = True
t = threading.Thread(target=do_analyze)
t.start()
elif packet.startswith('BYE'):
print('got BYE')
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
if out is not None:
out.release()
else:
print('client disconnect')
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
if out is not None:
out.release()
if not is_need_header_receive:
# ここには来ない
print('body')
if packet:
print('packet True')
is_need_header_receive = True
else:
print('data finished')
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
if out is not None:
out.release()
except:
import traceback
traceback.print_exc()
finally:
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
server_socket.close()
if out is not None:
out.release()
if __name__ == '__main__':
main()
print("end server")
| [((35, 0, 37, 1), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((78, 13, 78, 32), 'lib.webcam.WebcamVideoStream', 'WebcamVideoStream', ({}, {}), '()', False, 'from lib.webcam import WebcamVideoStream\n'), ((88, 9, 88, 59), 'lib.opencv_lane_detection.LaneDetection', 'LaneDetection', (), '', False, 'from lib.opencv_lane_detection import LaneDetection\n'), ((220, 20, 220, 69), 'socket.socket', 'socket.socket', ({(220, 34, 220, 48): 'socket.AF_INET', (220, 50, 220, 68): 'socket.SOCK_STREAM'}, {}), '(socket.AF_INET, socket.SOCK_STREAM)', False, 'import socket, select\n'), ((231, 9, 231, 26), 'lib.object_detection.ObjectDetection', 'ObjectDetection', ({}, {}), '()', False, 'from lib.object_detection import ObjectDetection\n'), ((91, 27, 91, 38), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((198, 25, 198, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((83, 30, 83, 71), 'os.path.join', 'os.path.join', ({(83, 43, 83, 53): 'OUTPUT_DIR', (83, 55, 83, 70): 'OUTPUT_FILENAME'}, {}), '(OUTPUT_DIR, OUTPUT_FILENAME)', False, 'import os\n'), ((107, 31, 107, 47), 'numpy.min', 'np.min', ({(107, 38, 107, 46): 'rclasses'}, {}), '(rclasses)', True, 'import numpy as np\n'), ((240, 57, 240, 105), 'select.select', 'select.select', ({(240, 71, 240, 96): 'connected_clients_sockets', (240, 98, 240, 100): '[]', (240, 102, 240, 104): '[]'}, {}), '(connected_clients_sockets, [], [])', False, 'import socket, select\n'), ((297, 8, 297, 29), 'traceback.print_exc', 'traceback.print_exc', ({}, {}), '()', False, 'import traceback\n'), ((165, 15, 165, 41), 'numpy.abs', 'np.abs', ({(165, 22, 165, 40): 'meters_from_center'}, {}), '(meters_from_center)', True, 'import numpy as np\n'), ((177, 15, 177, 41), 'numpy.abs', 'np.abs', ({(177, 22, 177, 40): 'meters_from_center'}, {}), '(meters_from_center)', True, 'import numpy as np\n'), ((168, 17, 168, 43), 'numpy.abs', 'np.abs', ({(168, 24, 168, 42): 'meters_from_center'}, {}), '(meters_from_center)', True, 'import numpy as np\n'), ((180, 17, 180, 43), 'numpy.abs', 'np.abs', ({(180, 24, 180, 42): 'meters_from_center'}, {}), '(meters_from_center)', True, 'import numpy as np\n'), ((259, 36, 259, 71), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n')] |
zackorndorff/revsync | client.py | 17255aebd281edffb3f3330c21cda00039bc51a3 | from collections import defaultdict
import json
import re
import redis
import threading
import time
import traceback
import uuid
import base64
import binascii
TTL = 2
hash_keys = ('cmd', 'user')
cmd_hash_keys = {
'comment': ('addr',),
'extra_comment': ('addr',),
'area_comment': ('addr',),
'rename': ('addr',),
'stackvar_renamed': ('addr', 'offset', 'name',),
'struc_created': ('struc_name', 'is_union',),
'struc_deleted': ('struc_name',),
'struc_renamed': ('old_name', 'new_name',),
'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',),
'struc_member_deleted': ('struc_name', 'offset',),
'struc_member_renamed': ('struc_name', 'offset', 'member_name',),
'struc_member_changed': ('struc_name', 'offset', 'size',),
}
key_dec = {
'c': 'cmd',
'a': 'addr',
'u': 'user',
't': 'text',
'i': 'uuid',
'b': 'blocks'
}
key_enc = dict((v, k) for k, v in key_dec.items())
nick_filter = re.compile(r'[^a-zA-Z0-9_\-]')
def decode(data):
d = json.loads(data)
return dict((key_dec.get(k, k), v) for k, v in d.items())
def dtokey(d):
return tuple(((k, v) for k, v in sorted(d.items()) if k not in ('user', 'ts', 'uuid')))
def remove_ttl(a):
now = time.time()
return [d for d in a if now - d[0] < TTL]
class Client:
def __init__(self, host, port, nick, password=None):
self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5)
self.r.info()
self.nick = nick_filter.sub('_', nick)
self.ps = {}
self.nolock = threading.Lock()
self.nosend = defaultdict(list)
self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii'))
def debounce(self, no, data):
dkey = dtokey(data)
now = time.time()
with self.nolock:
for data in no:
ts = data[0]
key = data[1:]
if dkey == key and now - ts < TTL:
no.remove(data)
return True
return False
def _sub_thread(self, ps, cb, key):
for item in ps.listen():
try:
if item['type'] == 'message':
data = decode(item['data'])
if 'user' in data:
data['user'] = nick_filter.sub('_', data['user'])
# reject our own messages
if data.get('uuid') == self.uuid:
continue
with self.nolock:
self.nosend[key] = remove_ttl(self.nosend[key])
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data)
elif item['type'] == 'subscribe':
decoded = []
for data in self.r.lrange(key, 0, -1):
try:
decoded.append(decode(data))
except Exception:
print('error decoding history', data)
traceback.print_exc()
state = []
dedup = set()
for data in reversed(decoded):
cmd = data.get('cmd')
if cmd:
keys = hash_keys + cmd_hash_keys.get(cmd, ())
hashkey = tuple([str(data.get(k)) for k in keys])
if all(hashkey):
if hashkey in dedup:
continue
dedup.add(hashkey)
state.append(data)
for data in reversed(state):
try:
with self.nolock:
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data, replay=True)
except Exception:
print('error replaying history', data)
traceback.print_exc()
else:
print('unknown redis push', item)
except Exception:
print('error processing item', item)
traceback.print_exc()
def join(self, key, cb):
ps = self.r.pubsub()
ps.subscribe(key)
t = threading.Thread(target=self._sub_thread, args=(ps, cb, key))
t.daemon = True
t.start()
self.ps[key] = ps
self.publish(key, {'cmd': 'join'}, perm=False)
def leave(self, key):
ps = self.ps.pop(key, None)
if ps:
ps.unsubscribe(key)
def publish(self, key, data, perm=True, send_uuid=True):
if self.debounce(self.nosend[key], data):
return
data['user'] = self.nick
data['ts'] = self.r.time()[0]
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
if perm:
self.r.rpush(key, data)
self.r.publish(key, data)
def push(self, key, data, send_uuid=True):
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
self.r.lpush(key, data)
| [((38, 14, 38, 44), 're.compile', 're.compile', ({(38, 25, 38, 43): '"""[^a-zA-Z0-9_\\\\-]"""'}, {}), "('[^a-zA-Z0-9_\\\\-]')", False, 'import re\n'), ((41, 8, 41, 24), 'json.loads', 'json.loads', ({(41, 19, 41, 23): 'data'}, {}), '(data)', False, 'import json\n'), ((48, 10, 48, 21), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((53, 17, 53, 101), 'redis.StrictRedis', 'redis.StrictRedis', (), '', False, 'import redis\n'), ((57, 22, 57, 38), 'threading.Lock', 'threading.Lock', ({}, {}), '()', False, 'import threading\n'), ((58, 22, 58, 39), 'collections.defaultdict', 'defaultdict', ({(58, 34, 58, 38): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((63, 14, 63, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((126, 12, 126, 73), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((147, 15, 147, 70), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((156, 15, 156, 70), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((121, 16, 121, 37), 'traceback.print_exc', 'traceback.print_exc', ({}, {}), '()', False, 'import traceback\n'), ((59, 60, 59, 72), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((85, 49, 85, 60), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((94, 28, 94, 49), 'traceback.print_exc', 'traceback.print_exc', ({}, {}), '()', False, 'import traceback\n'), ((116, 28, 116, 49), 'traceback.print_exc', 'traceback.print_exc', ({}, {}), '()', False, 'import traceback\n'), ((112, 57, 112, 68), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
pinheiroo27/ontask_b | ontask/condition/urls.py | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | # -*- coding: utf-8 -*-
"""URLs to manipulate columns."""
from django.urls import path
from ontask.condition import views
app_name = 'condition'
urlpatterns = [
#
# FILTERS
#
path(
'<int:pk>/create_filter/',
views.FilterCreateView.as_view(),
name='create_filter'),
path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'),
path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'),
#
# CONDITIONS
#
path(
'<int:pk>/create_condition/',
views.ConditionCreateView.as_view(),
name='create_condition'),
path(
'<int:pk>/edit_condition/',
views.edit_condition,
name='edit_condition'),
path(
'<int:pk>/delete_condition/',
views.delete_condition,
name='delete_condition'),
# Clone the condition
path(
'<int:pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
path(
'<int:pk>/<int:action_pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
]
| [((18, 4, 18, 72), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((19, 4, 19, 78), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((28, 4, 31, 30), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((32, 4, 35, 32), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((38, 4, 41, 31), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((42, 4, 45, 31), 'django.urls.path', 'path', (), '', False, 'from django.urls import path\n'), ((16, 8, 16, 40), 'ontask.condition.views.FilterCreateView.as_view', 'views.FilterCreateView.as_view', ({}, {}), '()', False, 'from ontask.condition import views\n'), ((26, 8, 26, 43), 'ontask.condition.views.ConditionCreateView.as_view', 'views.ConditionCreateView.as_view', ({}, {}), '()', False, 'from ontask.condition import views\n')] |
googleinterns/via-content-understanding | VideoClassification/SegmentLevelClassifier/model.py | ca12ebe6aa4da16224a8ca86dc45aaaaa7cfda09 | """Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Defines the architecture of the Video Classifier.
"""
import math
import tensorflow as tf
class NetVLAD(tf.keras.layers.Layer):
"""Applies NetVLAD to the input.
Args:
num_clusters: The number of clusters to use.
input_shape: 3D tensor denoting the input shape of the NetVLAD layer.
Input Shape:
3D tensor with shape: `(batch_size, time, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, feature_dim * num_clusters)`.
"""
def __init__(self, num_clusters, input_shape, **kwargs):
super().__init__(**kwargs)
if num_clusters <= 0:
raise ValueError("`num_clusters` must be greater than 1: %i" % num_clusters)
self.num_clusters = num_clusters
feature_dim = input_shape[-1]
if not isinstance(feature_dim, int):
feature_dim = feature_dim.value
self.fc = tf.keras.layers.Dense(
units=self.num_clusters,
activation=tf.nn.softmax,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="vlad_fc" + str(num_clusters)
)
self.cluster_centers = self.add_weight(
shape=(1, feature_dim, self.num_clusters),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=1.0 / math.sqrt(feature_dim)
),
trainable=True,
name="cluster_centers" + str(num_clusters)
)
self.feature_dim = feature_dim
self.max_frames = input_shape[-2]
def call(self, frames):
"""Apply the NetVLAD module to the given frames.
Args:
frames: A tensor with shape [batch_size, max_frames, feature_dim].
Returns:
vlad_out: A tensor with shape [batch_size, feature_dim * num_clusters].
Raises:
ValueError: If the `feature_dim` of input is not defined.
"""
feature_dim = self.feature_dim
max_frames = self.max_frames
frames = tf.reshape(frames, (-1, feature_dim))
activation = self.fc(frames)
activation = tf.reshape(activation, (-1, max_frames, self.num_clusters))
activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True)
cluster_activation = activation_sum * self.cluster_centers
frames = tf.reshape(frames, (-1, max_frames, feature_dim))
activation = tf.transpose(
tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames), perm=(0, 2, 1)
)
vlad_out = activation - cluster_activation
vlad_out = tf.nn.l2_normalize(vlad_out, 1)
vlad_out = tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters))
vlad_out = tf.nn.l2_normalize(vlad_out, 1)
return vlad_out
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters])
def get_config(self):
config = {"num_clusters": self.num_clusters}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class ContextGating(tf.keras.layers.Layer):
"""Implements the Context Gating Layer from https://arxiv.org/abs/1706.06905
Input shape:
2D tensor with shape: `(batch_size, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, feature_dim)`.
"""
def __init__(self, input_shape, **kwargs):
super(ContextGating, self).__init__(**kwargs)
feature_dim = input_shape[-1]
if not isinstance(feature_dim, int):
feature_dim = feature_dim.value
self.fc = tf.keras.layers.Dense(
units=feature_dim,
activation=tf.nn.sigmoid,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
)
def call(self, model_input):
"""Apply the ContextGating module to the given input.
Args:
model_input: A tensor with shape [batch_size, feature_dim].
Returns:
A tensor with shape [batch_size, feature_dim].
Raises:
ValueError: If the `feature_dim` of model_input is not defined.
"""
model_input.shape.assert_has_rank(2)
feature_dim = model_input.shape.as_list()[-1]
if feature_dim is None:
raise ValueError("Last dimension must be defined.")
context_gate = self.fc(model_input)
output = tf.math.multiply(context_gate, model_input)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
return dict(list(base_config.items()))
class MOELogistic(tf.keras.layers.Layer):
"""Implements a Mixture of Logistic Experts classifier.
Input shape:
2D tensor with shape: `(batch_size, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, num_classes)`.
"""
def __init__(self, input_shape, num_classes, num_mixtures, **kwargs):
super(MOELogistic, self).__init__(**kwargs)
self.num_classes = num_classes
self.num_mixtures = num_mixtures
self.gate_fc = tf.keras.layers.Dense(
units=num_classes*(num_mixtures+1),
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
)
self.expert_fc = tf.keras.layers.Dense(
units=num_classes*num_mixtures,
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
)
def call(self, input):
"""Apply the MoE algorithm to the given input.
Args:
input: A tensor with shape [batch_size, feature_dim].
Returns:
A tensor with shape [batch_size, num_classes].
Raises:
ValueError: If the `feature_dim` of input is not defined.
"""
gate_activations = self.gate_fc(input)
expert_activations = self.expert_fc(input)
#Calculate the distribution across mixtures
gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1]))
expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures]))
probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1)
probs = tf.reshape(probs, [-1, self.num_classes])
return probs
def compute_output_shape(self, input_shape):
return (input_shape[0], self.num_classes)
def get_config(self):
base_config = super().get_config()
config = base_config.update({'number of classes': self.num_classes, 'number of mixtures': self.num_mixtures})
return config
class VideoClassifier:
"""The Video Classifier model, implemented according to the winning model from the Youtube-8M Challenge.
The model can be found here: https://arxiv.org/pdf/1706.06905.pdf
Arguments:
num_clusters: the number of clusters to be used for NetVLAD. The audio clusters will be num_clusters/2.
video_input_shape: shape of the input video features. Shape of [batch_size, num_samples, video_feature_dim].
audio_input_shape: shape fo the input audio features. Shape of [batch_size, num_samples, audio_feature_dim].
Raises:
ValueError: If num_clusters is not divisible by 2.
ValueError: If the batch sizes of the audio_input_shape and video_input_shape do not match.
ValueError: If the number of samples of the audio_input_shape and video_input_shape do not match.
"""
def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units, iterations, **kwargs):
super(VideoClassifier, self).__init__(**kwargs)
if num_clusters % 2 != 0:
raise ValueError("num_clusters must be divisible by 2.")
batch_size = video_input_shape[0]
if audio_input_shape[0] != batch_size:
raise ValueError("audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.")
if audio_input_shape[1] != video_input_shape[1]:
raise ValueError("audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.")
self.num_frames = video_input_shape[1]
self.num_classes = num_classes
self.num_mixtures = num_mixtures
self.iterations = iterations
self.video_feature_dim = video_input_shape[2]
self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name="video_vlad")
self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name="audio_vlad")
#Relu6 is used as it is employed in the paper.
self.fc = tf.keras.layers.Dense(
units=fc_units,
activation=tf.nn.relu6,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc"
)
self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name="first_cg")
self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name="moe")
self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name="second_cg")
def build_model(self, input_shape, batch_size):
"""Perform one forward pass of the model.
Args:
model_input: input features of shape [batch_size, max_frames, video_feature_dim + audio_feature_dim].
Returns:
A tensor with shape [batch_size, num_classes].
"""
model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)
video_input = model_input[:,:,:self.video_feature_dim]
audio_input = model_input[:,:,self.video_feature_dim:]
video_vlad_out = self.video_vlad(video_input)
audio_vlad_out = self.audio_vlad(audio_input)
vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1)
fc_out = self.fc(vlad_out)
cg_out = self.first_cg(fc_out)
moe_out = self.moe(cg_out)
final_out = self.second_cg(moe_out)
final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out)
return final_model
class SegmentClassifier:
"""The Segment Classifier model, implemented according to the winning model from the Youtube-8M Challenge.
The model can be found here: https://arxiv.org/abs/1911.08548
Arguments:
num_clusters: the number of clusters to be used for NetVLAD. The audio clusters will be num_clusters/2.
video_input_shape: shape of the input video features. Shape of [batch_size, num_samples, video_feature_dim].
audio_input_shape: shape fo the input audio features. Shape of [batch_size, num_samples, audio_feature_dim].
Raises:
ValueError: If num_clusters is not divisible by 2.
ValueError: If the batch sizes of the audio_input_shape and video_input_shape do not match.
ValueError: If the number of samples of the audio_input_shape and video_input_shape do not match.
"""
def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs):
super(SegmentClassifier, self).__init__(**kwargs)
if num_clusters % 2 != 0:
raise ValueError("num_clusters must be divisible by 2.")
batch_size = video_input_shape[0]
if audio_input_shape[0] != batch_size:
raise ValueError("audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.")
if audio_input_shape[1] != video_input_shape[1]:
raise ValueError("audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.")
self.num_frames = video_input_shape[1]
self.num_classes = num_classes
self.video_feature_dim = video_input_shape[2]
self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name="video_vlad")
self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name="audio_vlad")
#Relu6 is used as it is employed in the paper.
self.fc = tf.keras.layers.Dense(
units=fc_units,
activation=tf.nn.relu6,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc"
)
self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name="first_cg")
self.fc2 = tf.keras.layers.Dense(
units=1,
activation=tf.keras.activations.sigmoid,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc2"
)
def build_model(self, input_shape, second_input_shape, batch_size):
"""Perform one forward pass of the model.
Args:
input_shape: input shape for video features. Shape is of the form: [max_frames, video_feature_dim + audio_feature_dim].
second_input_shape: input shape of new class specific features. Shape is of the form [num_new_features]
Returns:
A tensor with shape [batch_size, num_classes].
"""
model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)
model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size)
video_input = model_input[:,:,:self.video_feature_dim]
audio_input = model_input[:,:,self.video_feature_dim:]
video_vlad_out = self.video_vlad(video_input)
audio_vlad_out = self.audio_vlad(audio_input)
vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1)
vlad_out = tf.concat([vlad_out, model_input2], axis=1)
fc_out = self.fc(vlad_out)
cg_out = self.first_cg(fc_out)
final_out = self.fc2(cg_out)
final_model = tf.keras.models.Model(inputs=[model_input, model_input2], outputs=final_out)
return final_model | [((68, 13, 68, 50), 'tensorflow.reshape', 'tf.reshape', ({(68, 24, 68, 30): 'frames', (68, 32, 68, 49): '(-1, feature_dim)'}, {}), '(frames, (-1, feature_dim))', True, 'import tensorflow as tf\n'), ((70, 17, 70, 76), 'tensorflow.reshape', 'tf.reshape', ({(70, 28, 70, 38): 'activation', (70, 40, 70, 75): '(-1, max_frames, self.num_clusters)'}, {}), '(activation, (-1, max_frames, self.num_clusters))', True, 'import tensorflow as tf\n'), ((72, 21, 72, 75), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (), '', True, 'import tensorflow as tf\n'), ((75, 13, 75, 62), 'tensorflow.reshape', 'tf.reshape', ({(75, 24, 75, 30): 'frames', (75, 32, 75, 61): '(-1, max_frames, feature_dim)'}, {}), '(frames, (-1, max_frames, feature_dim))', True, 'import tensorflow as tf\n'), ((81, 15, 81, 46), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', ({(81, 34, 81, 42): 'vlad_out', (81, 44, 81, 45): '1'}, {}), '(vlad_out, 1)', True, 'import tensorflow as tf\n'), ((82, 15, 82, 74), 'tensorflow.reshape', 'tf.reshape', ({(82, 26, 82, 34): 'vlad_out', (82, 36, 82, 73): '(-1, feature_dim * self.num_clusters)'}, {}), '(vlad_out, (-1, feature_dim * self.num_clusters))', True, 'import tensorflow as tf\n'), ((83, 15, 83, 46), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', ({(83, 34, 83, 42): 'vlad_out', (83, 44, 83, 45): '1'}, {}), '(vlad_out, 1)', True, 'import tensorflow as tf\n'), ((89, 11, 89, 80), 'tensorflow.TensorShape', 'tf.TensorShape', ({(89, 26, 89, 79): '[input_shape[0], input_shape[-1] * self.num_clusters]'}, {}), '([input_shape[0], input_shape[-1] * self.num_clusters])', True, 'import tensorflow as tf\n'), ((131, 13, 131, 56), 'tensorflow.math.multiply', 'tf.math.multiply', ({(131, 30, 131, 42): 'context_gate', (131, 44, 131, 55): 'model_input'}, {}), '(context_gate, model_input)', True, 'import tensorflow as tf\n'), ((183, 12, 183, 53), 'tensorflow.reshape', 'tf.reshape', ({(183, 23, 183, 28): 'probs', (183, 30, 183, 52): '[-1, self.num_classes]'}, {}), '(probs, [-1, self.num_classes])', True, 'import tensorflow as tf\n'), ((251, 18, 251, 81), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (), '', True, 'import tensorflow as tf\n'), ((259, 15, 259, 66), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((266, 18, 266, 78), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (), '', True, 'import tensorflow as tf\n'), ((328, 18, 328, 81), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (), '', True, 'import tensorflow as tf\n'), ((329, 19, 329, 89), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (), '', True, 'import tensorflow as tf\n'), ((334, 15, 334, 66), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((335, 15, 335, 58), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((339, 18, 339, 94), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (), '', True, 'import tensorflow as tf\n'), ((179, 30, 179, 85), 'tensorflow.reshape', 'tf.reshape', ({(179, 41, 179, 57): 'gate_activations', (179, 59, 179, 84): '[-1, self.num_mixtures + 1]'}, {}), '(gate_activations, [-1, self.num_mixtures + 1])', True, 'import tensorflow as tf\n'), ((180, 32, 180, 87), 'tensorflow.reshape', 'tf.reshape', ({(180, 43, 180, 61): 'expert_activations', (180, 63, 180, 86): '[-1, self.num_mixtures]'}, {}), '(expert_activations, [-1, self.num_mixtures])', True, 'import tensorflow as tf\n'), ((182, 26, 182, 88), 'tensorflow.math.multiply', 'tf.math.multiply', ({(182, 43, 182, 74): 'gate_dist[:, :self.num_mixtures]', (182, 76, 182, 87): 'expert_dist'}, {}), '(gate_dist[:, :self.num_mixtures], expert_dist)', True, 'import tensorflow as tf\n'), ((41, 25, 41, 55), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ({(41, 50, 41, 54): '1e-05'}, {}), '(1e-05)', True, 'import tensorflow as tf\n'), ((77, 16, 77, 56), 'tensorflow.transpose', 'tf.transpose', (), '', True, 'import tensorflow as tf\n'), ((88, 18, 88, 45), 'tensorflow.TensorShape', 'tf.TensorShape', ({(88, 33, 88, 44): 'input_shape'}, {}), '(input_shape)', True, 'import tensorflow as tf\n'), ((113, 25, 113, 55), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ({(113, 50, 113, 54): '1e-05'}, {}), '(1e-05)', True, 'import tensorflow as tf\n'), ((157, 25, 157, 55), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ({(157, 50, 157, 54): '1e-06'}, {}), '(1e-06)', True, 'import tensorflow as tf\n'), ((162, 25, 162, 55), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ({(162, 50, 162, 54): '1e-06'}, {}), '(1e-06)', True, 'import tensorflow as tf\n'), ((233, 25, 233, 55), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ({(233, 50, 233, 54): '1e-05'}, {}), '(1e-05)', True, 'import tensorflow as tf\n'), ((306, 25, 306, 55), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ({(306, 50, 306, 54): '1e-05'}, {}), '(1e-05)', True, 'import tensorflow as tf\n'), ((315, 25, 315, 55), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', ({(315, 50, 315, 54): '1e-05'}, {}), '(1e-05)', True, 'import tensorflow as tf\n'), ((47, 21, 47, 43), 'math.sqrt', 'math.sqrt', ({(47, 31, 47, 42): 'feature_dim'}, {}), '(feature_dim)', False, 'import math\n')] |
faruq2021/ivy | ivy/functional/backends/jax/old/math.py | 1b24beadbd673d6a9dd504e037c68547e5640627 | """
Collection of Jax math functions, wrapped to fit Ivy syntax and signature.
"""
# global
import jax as _jax
import jax.numpy as _jnp
tan = _jnp.tan
acos = _jnp.arccos
atan = _jnp.arctan
atan2 = _jnp.arctan2
cosh = _jnp.cosh
atanh = _jnp.arctanh
log = _jnp.log
exp = _jnp.exp
erf = _jax.scipy.special.erf
| [] |
Adoni/ZhihuCrawler | neaten_db.py | c275192ced3a344d7b93b7cfd3ebf87ed179400d | from pymongo import MongoClient
from pyltp import Segmentor
def insert_questions_from_answered_question():
in_db = MongoClient().zhihu.user_answered_questions
out_db = MongoClient().zhihu_network.questions
existed_question_id = set(map(lambda q: q['_id'], out_db.find()))
segmentor = Segmentor()
segmentor.load("/Users/sunxiaofei/workspace/ltp_data/cws.model")
for u in in_db.find():
for a in u['answers']:
if a['q_id'] in existed_question_id:
continue
existed_question_id.add(a['q_id'])
if len(existed_question_id) % 1000 == 0:
print(len(existed_question_id))
words = segmentor.segment(a['title'].strip().replace(
'\n', ' ').replace('\r', ' ').replace('\b', ' '))
if len(words) < 3:
continue
out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)})
def insert_questions_from_followed_question():
in_db = MongoClient().zhihu.user_followed_questions
out_db = MongoClient().zhihu_network.questions
existed_question_id = set(map(lambda q: q['_id'], out_db.find()))
segmentor = Segmentor()
segmentor.load("/Users/sunxiaofei/workspace/ltp_data/cws.model")
for u in in_db.find():
for q in u['questions']:
if q['id'] in existed_question_id:
continue
existed_question_id.add(q['id'])
words = segmentor.segment(q['title'].strip().replace(
'\n', ' ').replace('\r', ' ').replace('\b', ' '))
if len(words) < 3:
continue
out_db.insert({'_id': q['id'], 'title': ' '.join(words)})
def insert_questions_from_asked_question():
in_db = MongoClient().zhihu.user_asked_questions
out_db = MongoClient().zhihu_network.questions
existed_question_id = set(map(lambda q: q['_id'], out_db.find()))
segmentor = Segmentor()
segmentor.load("/Users/sunxiaofei/workspace/ltp_data/cws.model")
for u in in_db.find():
for q in u['questions']:
if q['id'] in existed_question_id:
continue
existed_question_id.add(q['id'])
if len(existed_question_id) % 1000 == 0:
print(len(existed_question_id))
words = segmentor.segment(q['title'].strip().replace(
'\n', ' ').replace('\r', ' ').replace('\b', ' '))
if len(words) < 3:
continue
out_db.insert({'_id': q['id'], 'title': ' '.join(words)})
def insert_questions_from_collected_question():
in_db = MongoClient().zhihu.user_collected_questions
out_db = MongoClient().zhihu_network.questions
existed_question_id = set(map(lambda q: q['_id'], out_db.find()))
segmentor = Segmentor()
segmentor.load("/Users/sunxiaofei/workspace/ltp_data/cws.model")
for u in in_db.find():
for c_name, c_questions in u['collections'].items():
for a in c_questions:
if a['q_id'] == -1:
continue
if a['q_id'] in existed_question_id:
continue
existed_question_id.add(a['q_id'])
if len(existed_question_id) % 1000 == 0:
print(len(existed_question_id))
words = segmentor.segment(a['title'].strip().replace(
'\n', ' ').replace('\r', ' ').replace('\b', ' '))
if len(words) < 3:
continue
out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)})
def delete_noise_question():
db = MongoClient().zhihu_network.questions
id_to_delete = []
for q in db.find():
if len(q['title'].split(' ')) < 3:
id_to_delete.append(q['_id'])
print(len(id_to_delete))
for _id in id_to_delete:
db.delete_one({'_id': _id})
def remove_enger_inline():
db = MongoClient().zhihu_network.questions
for q in db.find():
if '\n' in q['title'] or '\r' in q['title'] or '\b' in q['title']:
q['title'] = q['title'].replace('\n', ' ')
q['title'] = q['title'].replace('\r', ' ')
q['title'] = q['title'].replace('\b', ' ')
db.update_one({'_id': q['_id']},
{'$set': {'title': q['title']}},
upsert=True)
def insert_user_list():
keys = ['_id', 'name', 'is_zero_user', 'gender', 'location', 'business',
'education', 'motto', 'answer_num', 'collection_num',
'followed_column_num', 'followed_topic_num', 'followee_num',
'follower_num', 'post_num', 'question_num', 'thank_num',
'upvote_num', 'photo_url', 'weibo_url']
out_db = MongoClient().zhihu_network.users
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for line in open('./user_info.data'):
line = line.strip().split('\t')
try:
assert (len(keys) == len(line))
except:
continue
user = dict(zip(keys, line))
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
for key in user:
if key.endswith('_num'):
user[key] = int(user[key])
out_db.insert(user)
def insert_user_follow_user_list():
out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for line in open('./user_followees.data'):
line = line.strip().split('\t')
user = dict()
user['_id'] = line[0]
user['neibors'] = line[1:]
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
out_db.insert(user)
def insert_user_follow_question_list():
in_db = MongoClient().zhihu.user_followed_questions
out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for user in in_db.find():
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
q_ids = [q['id'] for q in user['questions']]
out_db.insert({'_id': user['_id'], 'neibors': q_ids})
def insert_user_ask_question_list():
in_db = MongoClient().zhihu.user_asked_questions
out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for user in in_db.find():
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
q_ids = [q['id'] for q in user['questions']]
out_db.insert({'_id': user['_id'], 'neibors': q_ids})
def insert_user_collect_question_list():
in_db = MongoClient().zhihu.user_collected_questions
out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for user in in_db.find():
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
q_ids = []
for _, c in user['collections'].items():
q_ids += [q['q_id'] for q in c]
out_db.insert({'_id': user['_id'], 'neibors': q_ids})
def insert_user_answer_question_list():
in_db = MongoClient().zhihu.user_answered_questions
out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for user in in_db.find():
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
q_ids = [a['q_id'] for a in user['answers']]
out_db.insert({'_id': user['_id'], 'neibors': q_ids})
if __name__ == '__main__':
# insert_questions_from_answered_question()
# insert_questions_from_followed_question()
# insert_questions_from_asked_question()
# insert_questions_from_collected_question()
#delete_noise_question()
#remove_enger_inline()
# insert_user_list()
insert_user_follow_user_list()
# insert_user_follow_question_list()
# insert_user_ask_question_list()
# insert_user_collect_question_list()
# insert_user_answer_question_list()
| [((9, 16, 9, 27), 'pyltp.Segmentor', 'Segmentor', ({}, {}), '()', False, 'from pyltp import Segmentor\n'), ((29, 16, 29, 27), 'pyltp.Segmentor', 'Segmentor', ({}, {}), '()', False, 'from pyltp import Segmentor\n'), ((47, 16, 47, 27), 'pyltp.Segmentor', 'Segmentor', ({}, {}), '()', False, 'from pyltp import Segmentor\n'), ((67, 16, 67, 27), 'pyltp.Segmentor', 'Segmentor', ({}, {}), '()', False, 'from pyltp import Segmentor\n'), ((6, 12, 6, 25), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((7, 13, 7, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((26, 12, 26, 25), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((27, 13, 27, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((44, 12, 44, 25), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((45, 13, 45, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((64, 12, 64, 25), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((65, 13, 65, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((87, 9, 87, 22), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((98, 9, 98, 22), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((115, 13, 115, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((134, 13, 134, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((148, 12, 148, 25), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((149, 13, 149, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((160, 12, 160, 25), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((161, 13, 161, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((172, 12, 172, 25), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((173, 13, 173, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((186, 12, 186, 25), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n'), ((187, 13, 187, 26), 'pymongo.MongoClient', 'MongoClient', ({}, {}), '()', False, 'from pymongo import MongoClient\n')] |
timgates42/netcdf4-python | test/tst_vlen.py | d8b1cb11454f9beec674a29904c91f48db608c2c | import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
ragged = f.createVariable(VAR1_NAME, vlen_t,\
(DIM2_NAME,DIM1_NAME))
strings = f.createVariable(VAR2_NAME, str,
(DIM2_NAME,DIM1_NAME))
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
string_scalar = f.createVariable(VAR4_NAME,str,())
vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())
ragged[:] = data
ragged[-1,-1] = data[-1,-1]
strings[:] = datas
strings[-2,-2] = datas[-2,-2]
strings_alt[:] = datas.astype(str)
string_scalar[...] = 'foo' #issue458
vlen_scalar[...] = np.array([1,2,3],np.int16)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR1_NAME]
vs = f.variables[VAR2_NAME]
vs_alt = f.variables[VAR3_NAME]
assert list(f.vltypes.keys()) == [VL_NAME]
assert f.vltypes[VL_NAME].dtype == VL_BASETYPE
assert f.variables['string_scalar'][...] == 'foo'
assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))
data2 = v[:]
data2s = vs[:]
for i in range(nlons):
for j in range(nlats):
assert_array_equal(data2[j,i], data[j,i])
assert datas[j,i] == data2s[j,i]
assert_array_equal(datas, vs_alt[:])
f.close()
class TestInvalidDataType(unittest.TestCase):
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')
f.createDimension('x', 1)
# using assertRaisesRegext as a context manager
# only works with python >= 2.7 (issue #497)
#with self.assertRaisesRegexp(ValueError, 'strings are only supported'):
# f.createVariable('foo', str, ('x',))
try:
f.createVariable('foo', str, ('x',))
except ValueError:
pass
f.close()
os.remove(FILE_NAME)
class TestScalarVlenString(unittest.TestCase):
# issue 333
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF4')
teststring = f.createVariable('teststring', str)
stringout = "yyyymmdd_hhmmss"
teststring[()] = stringout
f.close()
f = Dataset(FILE_NAME)
assert f.variables['teststring'][:] == stringout
f.close()
os.remove(FILE_NAME)
class TestIntegerIndex(unittest.TestCase):
# issue 526
def runTest(self):
strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')
strtest.createDimension('tenstrings', 10)
strtest.createVariable('tenstrings', str, ['tenstrings'])
strtest['tenstrings'][np.int32(5)] = 'asdf'
strtest['tenstrings'][6.0] = 'asdf'
strtest.close()
f = Dataset(FILE_NAME)
assert f.variables['tenstrings'][np.int32(5)] == 'asdf'
assert f.variables['tenstrings'][6.0] == 'asdf'
f.close()
os.remove(FILE_NAME)
class TestObjectArrayIndexing(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
strings_alt[:] = datas.astype(str)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
vs_alt = f.variables[VAR3_NAME]
unicode_strings = vs_alt[:]
fancy_indexed = unicode_strings[0][[1,2,4]]
assert fancy_indexed[0] == 'abc'
assert fancy_indexed[1] == 'abcd'
assert fancy_indexed[2] == 'abcdef'
f.close()
class VlenAppendTestCase(unittest.TestCase):
def setUp(self):
import netCDF4
if netCDF4.__netcdf4libversion__ < "4.4.1":
self.skip = True
try:
self.skipTest("This test requires NetCDF 4.4.1 or later.")
except AttributeError:
# workaround for Python 2.6 (skipTest(reason) is new
# in Python 2.7)
pass
else:
self.skip = False
self.file = FILE_NAME
f = Dataset(self.file, 'w')
vlen_type = f.createVLType(np.float64, 'vltest')
f.createDimension('x', None)
v = f.createVariable('vl', vlen_type, 'x')
w = f.createVariable('vl2', np.float64, 'x')
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing appending to vlen variables (issue #527)."""
# workaround for Python 2.6
if self.skip:
return
f = Dataset(self.file, 'a')
w = f.variables["vl2"]
v = f.variables["vl"]
w[0:3] = np.arange(3, dtype=np.float64)
v[0] # sometimes crashes
v[0].tolist() # sometimes crashes
v[0].size # BOOM!
f.close()
class Vlen_ScaledInts(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file, 'w')
vlen_type = nc.createVLType(np.uint8, 'vltest')
nc.createDimension('x', None)
v = nc.createVariable('vl', vlen_type, 'x')
v.scale_factor = 1./254.
v.missing_value=np.array(255,np.uint8)
# random lengths between 1 and 1000
ilen = np.random.randint(1,1000,size=100)
n = 0
for nlen in ilen:
data = np.random.uniform(low=0.0, high=1.0, size=nlen)
v[n] = data
if n==99: self.data = data
n += 1
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing packing float vlens as scaled integers (issue #1003)."""
nc = Dataset(self.file)
data = nc['vl'][-1]
# check max error of compression
err = np.abs(data - self.data)
assert(err.max() < nc['vl'].scale_factor)
# turn off auto-scaling
nc.set_auto_maskandscale(False)
data = nc['vl'][-1]
assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))
nc.close()
if __name__ == '__main__':
unittest.main()
| [((20, 7, 20, 35), 'numpy.empty', 'np.empty', ({(20, 16, 20, 27): 'nlats * nlons', (20, 28, 20, 34): 'object'}, {}), '(nlats * nlons, object)', True, 'import numpy as np\n'), ((21, 8, 21, 36), 'numpy.empty', 'np.empty', ({(21, 17, 21, 28): 'nlats * nlons', (21, 29, 21, 35): 'object'}, {}), '(nlats * nlons, object)', True, 'import numpy as np\n'), ((27, 7, 27, 37), 'numpy.reshape', 'np.reshape', ({(27, 18, 27, 22): 'data', (27, 23, 27, 36): '(nlats, nlons)'}, {}), '(data, (nlats, nlons))', True, 'import numpy as np\n'), ((28, 8, 28, 39), 'numpy.reshape', 'np.reshape', ({(28, 19, 28, 24): 'datas', (28, 25, 28, 38): '(nlats, nlons)'}, {}), '(datas, (nlats, nlons))', True, 'import numpy as np\n'), ((9, 12, 9, 67), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (), '', False, 'import tempfile\n'), ((25, 14, 25, 45), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((228, 4, 228, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((35, 12, 35, 34), 'netCDF4.Dataset', 'Dataset', ({(35, 20, 35, 29): 'self.file', (35, 30, 35, 33): '"""w"""'}, {}), "(self.file, 'w')", False, 'from netCDF4 import Dataset\n'), ((53, 27, 53, 53), 'numpy.array', 'np.array', ({(53, 36, 53, 43): '[1, 2, 3]', (53, 44, 53, 52): 'np.int16'}, {}), '([1, 2, 3], np.int16)', True, 'import numpy as np\n'), ((58, 8, 58, 28), 'os.remove', 'os.remove', ({(58, 18, 58, 27): 'self.file'}, {}), '(self.file)', False, 'import os\n'), ((62, 12, 62, 35), 'netCDF4.Dataset', 'Dataset', ({(62, 20, 62, 29): 'self.file', (62, 31, 62, 34): '"""r"""'}, {}), "(self.file, 'r')", False, 'from netCDF4 import Dataset\n'), ((76, 8, 76, 44), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(76, 27, 76, 32): 'datas', (76, 34, 76, 43): 'vs_alt[:]'}, {}), '(datas, vs_alt[:])', False, 'from numpy.testing import assert_array_equal\n'), ((82, 12, 82, 61), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((93, 8, 93, 28), 'os.remove', 'os.remove', ({(93, 18, 93, 27): 'FILE_NAME'}, {}), '(FILE_NAME)', False, 'import os\n'), ((98, 12, 98, 53), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((103, 12, 103, 30), 'netCDF4.Dataset', 'Dataset', ({(103, 20, 103, 29): 'FILE_NAME'}, {}), '(FILE_NAME)', False, 'from netCDF4 import Dataset\n'), ((106, 8, 106, 28), 'os.remove', 'os.remove', ({(106, 18, 106, 27): 'FILE_NAME'}, {}), '(FILE_NAME)', False, 'import os\n'), ((111, 18, 111, 59), 'netCDF4.Dataset', 'Dataset', (), '', False, 'from netCDF4 import Dataset\n'), ((117, 12, 117, 30), 'netCDF4.Dataset', 'Dataset', ({(117, 20, 117, 29): 'FILE_NAME'}, {}), '(FILE_NAME)', False, 'from netCDF4 import Dataset\n'), ((121, 8, 121, 28), 'os.remove', 'os.remove', ({(121, 18, 121, 27): 'FILE_NAME'}, {}), '(FILE_NAME)', False, 'import os\n'), ((127, 12, 127, 34), 'netCDF4.Dataset', 'Dataset', ({(127, 20, 127, 29): 'self.file', (127, 30, 127, 33): '"""w"""'}, {}), "(self.file, 'w')", False, 'from netCDF4 import Dataset\n'), ((138, 8, 138, 28), 'os.remove', 'os.remove', ({(138, 18, 138, 27): 'self.file'}, {}), '(self.file)', False, 'import os\n'), ((142, 12, 142, 35), 'netCDF4.Dataset', 'Dataset', ({(142, 20, 142, 29): 'self.file', (142, 31, 142, 34): '"""r"""'}, {}), "(self.file, 'r')", False, 'from netCDF4 import Dataset\n'), ((167, 12, 167, 35), 'netCDF4.Dataset', 'Dataset', ({(167, 20, 167, 29): 'self.file', (167, 31, 167, 34): '"""w"""'}, {}), "(self.file, 'w')", False, 'from netCDF4 import Dataset\n'), ((176, 8, 176, 28), 'os.remove', 'os.remove', ({(176, 18, 176, 27): 'self.file'}, {}), '(self.file)', False, 'import os\n'), ((184, 12, 184, 35), 'netCDF4.Dataset', 'Dataset', ({(184, 20, 184, 29): 'self.file', (184, 31, 184, 34): '"""a"""'}, {}), "(self.file, 'a')", False, 'from netCDF4 import Dataset\n'), ((187, 17, 187, 47), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((196, 13, 196, 36), 'netCDF4.Dataset', 'Dataset', ({(196, 21, 196, 30): 'self.file', (196, 32, 196, 35): '"""w"""'}, {}), "(self.file, 'w')", False, 'from netCDF4 import Dataset\n'), ((201, 24, 201, 46), 'numpy.array', 'np.array', ({(201, 33, 201, 36): '255', (201, 37, 201, 45): 'np.uint8'}, {}), '(255, np.uint8)', True, 'import numpy as np\n'), ((203, 15, 203, 49), 'numpy.random.randint', 'np.random.randint', (), '', True, 'import numpy as np\n'), ((213, 8, 213, 28), 'os.remove', 'os.remove', ({(213, 18, 213, 27): 'self.file'}, {}), '(self.file)', False, 'import os\n'), ((216, 13, 216, 31), 'netCDF4.Dataset', 'Dataset', ({(216, 21, 216, 30): 'self.file'}, {}), '(self.file)', False, 'from netCDF4 import Dataset\n'), ((219, 14, 219, 38), 'numpy.abs', 'np.abs', ({(219, 21, 219, 37): 'data - self.data'}, {}), '(data - self.data)', True, 'import numpy as np\n'), ((69, 59, 69, 85), 'numpy.array', 'np.array', ({(69, 68, 69, 75): '[1, 2, 3]', (69, 76, 69, 84): 'np.int16'}, {}), '([1, 2, 3], np.int16)', True, 'import numpy as np\n'), ((206, 19, 206, 66), 'numpy.random.uniform', 'np.random.uniform', (), '', True, 'import numpy as np\n'), ((224, 27, 224, 73), 'numpy.around', 'np.around', ({(224, 37, 224, 72): "(self.data[-1] / nc['vl'].scale_factor)"}, {}), "(self.data[-1] / nc['vl'].scale_factor)", True, 'import numpy as np\n'), ((74, 16, 74, 57), 'numpy.testing.assert_array_equal', 'assert_array_equal', ({(74, 35, 74, 45): 'data2[j, i]', (74, 47, 74, 56): 'data[j, i]'}, {}), '(data2[j, i], data[j, i])', False, 'from numpy.testing import assert_array_equal\n'), ((114, 30, 114, 41), 'numpy.int32', 'np.int32', ({(114, 39, 114, 40): '(5)'}, {}), '(5)', True, 'import numpy as np\n'), ((118, 41, 118, 52), 'numpy.int32', 'np.int32', ({(118, 50, 118, 51): '(5)'}, {}), '(5)', True, 'import numpy as np\n')] |
ScriptBox99/deepmind-sonnet | sonnet/src/once.py | 5cbfdc356962d9b6198d5b63f0826a80acfdf35b | # Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility to run functions and methods once."""
import uuid
from sonnet.src import utils
_ONCE_PROPERTY = "_snt_once"
def _check_no_output(output):
if output is not None:
raise ValueError("@snt.once decorated functions cannot return values")
def once(f):
"""Decorator which ensures a wrapped method is only ever run once.
>>> @snt.once
... def f():
... print('Hello, world!')
>>> f()
Hello, world!
>>> f()
>>> f()
If `f` is a method then it will be evaluated once per instance:
>>> class MyObject:
... @snt.once
... def f(self):
... print('Hello, world!')
>>> o = MyObject()
>>> o.f()
Hello, world!
>>> o.f()
>>> o2 = MyObject()
>>> o2.f()
Hello, world!
>>> o.f()
>>> o2.f()
If an error is raised during execution of `f` it will be raised to the user.
Next time the method is run, it will be treated as not having run before.
Args:
f: A function to wrap which should only be called once.
Returns:
Wrapped version of `f` which will only evaluate `f` the first time it is
called.
"""
# TODO(tomhennigan) Perhaps some more human friendly identifier?
once_id = uuid.uuid4()
@utils.decorator
def wrapper(wrapped, instance, args, kwargs):
"""Decorator which ensures a wrapped method is only ever run once."""
if instance is None:
# NOTE: We can't use the weakset since you can't weakref None.
if not wrapper.seen_none:
_check_no_output(wrapped(*args, **kwargs))
wrapper.seen_none = True
return
# Get or set the `seen` set for this object.
seen = getattr(instance, _ONCE_PROPERTY, None)
if seen is None:
seen = set()
setattr(instance, _ONCE_PROPERTY, seen)
if once_id not in seen:
_check_no_output(wrapped(*args, **kwargs))
seen.add(once_id)
wrapper.seen_none = False
decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none
decorated.__snt_once_wrapped__ = f
return decorated
| [((70, 12, 70, 24), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n')] |
DGarciaMedina/PiArmDiego | env.py | cb4664796aa99b0717145f9e4889bfba5190059f | import piarm
import time
import numpy as np
import cv2
import random
class MyArm2D:
def __init__(self, move_robot = False):
self.move_robot = move_robot
if self.move_robot:
self.robot = piarm.PiArm()
self.open_connection()
self.DEFAULT = [500, 500, 500, 500, 500, 500]
self.num_members = 3
self.adjustable_joints = [3,4,5]
self.initial_height = 73 # height in mm of motor 5's axle
self.lengths = {
"h_0": 73,
"a": 97.5,
"b": 96,
"c": 160
}
self.base_width = 110
self.base_height = 45
# All the angles are with respect to the vertical
self.max_angles = [90 for _ in range(self.num_members)]
self.min_angles = [-90 for _ in range(self.num_members)]
self.min_angles[0] = 0 # To prevent it from hitting the base of the arm
self.angles = 90*np.ones(self.num_members) # angles of motor 3, 4 and 5 ranging between
# min_angle and max_angle
self.member_thickness = 30
self.img_width = 1000
self.x_offset = int(self.img_width/2)
self.y_offset = self.lengths["h_0"]
self.img_height = int(sum(list(self.lengths.values())) + self.y_offset + 20)
self.img = np.zeros((self.img_height, self.img_width, 3))
self.timestep = 0
self.max_timestep = 200
# This is to check that all the joints (except for the last one) is above
# the ground
self.min_joint_heights = [20, 20, 10]
self.goal_coords = [None, None]
self.update_goal_coords()
self.joint_positions = [[0,0] for i in range(self.num_members + 1)]
self.update_positions()
self.distance2goal = None
self.update_distance_2_goal()
def __del__(self):
print("Closing connection...")
if self.move_robot:
self.close_connection()
def open_connection(self):
if self.robot.alive:
raise Exception("Robot is already switched on")
self.robot.connect("/dev/ttyS0")
if self.robot.alive:
print("Success connecting to robot")
return True
else:
print("Failed to connect to robot")
return False
def move_to_default_pos(self):
if self.robot.alive:
for ID in range(1, 7):
self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]), 500)
return True
else:
return False
def move_to_pos(self):
# First, convert the angles in degrees between -90º and +90º
# to angles between 125 and 875
# 90 -> 500
# 0 -> 125
angles_deg = self.angles - 90
angles_deg[2] -= angles_deg[1]
angles_deg[1] -= angles_deg[0]
angles_piarm = [int(500 + (375/90)*angle_deg) for angle_deg in angles_deg]
angles_piarm[0] = 1000 - angles_piarm[0]
angles_piarm[1] = 1000 - angles_piarm[1]
print("Angles in degrees: ", angles_deg)
print("Moving arms with angles: ", angles_piarm)
if self.robot.alive:
for ID in range(3, 6):
self.robot.servoWrite(8 - ID, int(angles_piarm[ID - 3]), 500)
time.sleep(1)
return True
else:
return False
def close_connection(self):
if not self.robot.alive:
raise Exception("Robot is already switched off")
self.robot.disconnect()
if not self.robot.alive:
print("Success disconnecting from robot")
return True
else:
print("Failed to disconnect from robot")
return False
def update_goal_coords(self):
max_length = sum(list(self.lengths.values())[1:])
r = random.uniform(0.8*max_length,max_length)
theta = random.uniform(-np.pi/4, np.pi/2)
x = r * np.sin(theta)
y = r * np.cos(theta)
self.goal_coords = [int(x), int(y)]
def update_distance_2_goal(self):
gripper_pos = self.joint_positions[-1]
self.distance2goal = np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for i in range(2)]))
def update_positions(self):
"""
Positions are with respect to the origin (0,0), right underneath
motor 5. It is positive if it is away from the origin.
"""
self.joint_positions[0] = [0, self.lengths["h_0"]]
self.joint_positions[1] = [
self.joint_positions[0][0] + self.lengths["a"] * np.sin(np.deg2rad(self.angles[0])),
self.joint_positions[0][1] + self.lengths["a"] * np.cos(np.deg2rad(self.angles[0]))
]
self.joint_positions[2] = [
self.joint_positions[1][0] + self.lengths["b"] * np.sin(np.deg2rad(self.angles[1])),
self.joint_positions[1][1] + self.lengths["b"] * np.cos(np.deg2rad(self.angles[1]))
]
self.joint_positions[3] = [
self.joint_positions[2][0] + self.lengths["c"] * np.sin(np.deg2rad(self.angles[2])),
self.joint_positions[2][1] + self.lengths["c"] * np.cos(np.deg2rad(self.angles[2]))
]
# Convert to integers
self.joint_positions = [[int(x[0]),int(x[1])] for x in self.joint_positions]
def move_arm(self, actions):
"""
The inputs are the new set of angles [theta0, theta1, theta2]
"""
for i, action in enumerate(actions):
self.angles[i:] += action
for member_index in range(1,self.num_members):
self.max_angles[member_index] = self.angles[member_index - 1] + 90
self.min_angles[member_index] = self.angles[member_index - 1] - 90
self.update_positions()
self.update_distance_2_goal()
def render(self):
self.img = np.zeros((self.img_height, self.img_width, 3))
# Render the floor
self.img = cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset), (0,255,0), -1)
# Render the base of the arm
self.img = cv2.rectangle(self.img,
(int(self.x_offset - self.base_width/2), self.y_offset),
(int(self.x_offset - self.base_width/2 + self.base_width), self.y_offset + self.base_height),
(0, 165, 255),
-1)
goal_x, goal_y = self.goal_coords
self.img = cv2.circle(self.img, (goal_x + self.x_offset, goal_y + self.y_offset), int(self.member_thickness/2), (128, 0, 128), 5)
for member_id in range(self.num_members):
first_joint = self.joint_positions[member_id].copy()
second_joint = self.joint_positions[member_id + 1].copy()
first_joint[0] += self.x_offset
first_joint[1] += self.y_offset
second_joint[0] += self.x_offset
second_joint[1] += self.y_offset
self.img = cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness)
self.img = cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1)
# Flip image upside down
self.img = cv2.flip(self.img, 0)
self.img = cv2.putText(self.img,
"Distance: " + str(round(self.distance2goal,2)),
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255,255,255),
2)
cv2.imshow("Arm", self.img)
cv2.moveWindow("Arm",20,50)
def reset(self):
self.angles = 90*np.ones(self.num_members)
self.update_positions()
self.img = np.zeros((self.img_height, self.img_width, 3))
self.timestep = 0
self.update_goal_coords()
self.render()
if self.move_robot:
self.move_to_default_pos()
def check_arm_angles(self):
for member_index in range(self.num_members):
if self.angles[member_index] < self.min_angles[member_index]:
return False
if self.angles[member_index] > self.max_angles[member_index]:
return False
return True
def check_arm_positions(self):
for joint_index in range(1,len(self.joint_positions)):
member_pos = self.joint_positions[joint_index][1]
min_height = self.min_joint_heights[joint_index-1]
if member_pos < min_height:
return False
return True
def get_reward(self, forbidden_action):
if forbidden_action:
reward_scaling_factor = 2
else:
reward_scaling_factor = 1
return - self.distance2goal * reward_scaling_factor
def step(self, actions):
self.move_arm(actions)
forbidden_action = False
okay_angles = self.check_arm_angles()
okay_positions = self.check_arm_positions()
if not okay_angles:
print("An angle threshold was exceeded")
self.move_arm(-actions)
forbidden_action = True
if not okay_positions:
print("A position threshold was exqqceeded")
self.move_arm(-actions)
forbidden_action = True
self.render()
if self.move_robot:
self.move_to_pos()
r = self.get_reward(forbidden_action)
self.timestep += 1
is_done = self.timestep >= self.max_timestep
return self.angles, r, is_done
| [((46, 19, 46, 65), 'numpy.zeros', 'np.zeros', ({(46, 28, 46, 64): '(self.img_height, self.img_width, 3)'}, {}), '((self.img_height, self.img_width, 3))', True, 'import numpy as np\n'), ((131, 12, 131, 53), 'random.uniform', 'random.uniform', ({(131, 27, 131, 41): '0.8 * max_length', (131, 42, 131, 52): 'max_length'}, {}), '(0.8 * max_length, max_length)', False, 'import random\n'), ((132, 16, 132, 49), 'random.uniform', 'random.uniform', ({(132, 31, 132, 39): '-np.pi / 4', (132, 41, 132, 48): 'np.pi / 2'}, {}), '(-np.pi / 4, np.pi / 2)', False, 'import random\n'), ((186, 19, 186, 65), 'numpy.zeros', 'np.zeros', ({(186, 28, 186, 64): '(self.img_height, self.img_width, 3)'}, {}), '((self.img_height, self.img_width, 3))', True, 'import numpy as np\n'), ((189, 19, 189, 97), 'cv2.rectangle', 'cv2.rectangle', ({(189, 33, 189, 41): 'self.img', (189, 43, 189, 48): '(0, 0)', (189, 50, 189, 81): '(self.img_width, self.y_offset)', (189, 83, 189, 92): '(0, 255, 0)', (189, 94, 189, 96): '-1'}, {}), '(self.img, (0, 0), (self.img_width, self.y_offset), (0, 255, 0\n ), -1)', False, 'import cv2\n'), ((216, 19, 216, 40), 'cv2.flip', 'cv2.flip', ({(216, 28, 216, 36): 'self.img', (216, 38, 216, 39): '0'}, {}), '(self.img, 0)', False, 'import cv2\n'), ((226, 8, 226, 35), 'cv2.imshow', 'cv2.imshow', ({(226, 19, 226, 24): '"""Arm"""', (226, 26, 226, 34): 'self.img'}, {}), "('Arm', self.img)", False, 'import cv2\n'), ((227, 8, 227, 35), 'cv2.moveWindow', 'cv2.moveWindow', ({(227, 23, 227, 28): '"""Arm"""', (227, 29, 227, 31): '(20)', (227, 32, 227, 34): '(50)'}, {}), "('Arm', 20, 50)", False, 'import cv2\n'), ((235, 19, 235, 65), 'numpy.zeros', 'np.zeros', ({(235, 28, 235, 64): '(self.img_height, self.img_width, 3)'}, {}), '((self.img_height, self.img_width, 3))', True, 'import numpy as np\n'), ((14, 25, 14, 38), 'piarm.PiArm', 'piarm.PiArm', ({}, {}), '()', False, 'import piarm\n'), ((36, 25, 36, 50), 'numpy.ones', 'np.ones', ({(36, 33, 36, 49): 'self.num_members'}, {}), '(self.num_members)', True, 'import numpy as np\n'), ((111, 12, 111, 25), 'time.sleep', 'time.sleep', ({(111, 23, 111, 24): '(1)'}, {}), '(1)', False, 'import time\n'), ((134, 16, 134, 29), 'numpy.sin', 'np.sin', ({(134, 23, 134, 28): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((135, 16, 135, 29), 'numpy.cos', 'np.cos', ({(135, 23, 135, 28): 'theta'}, {}), '(theta)', True, 'import numpy as np\n'), ((231, 25, 231, 50), 'numpy.ones', 'np.ones', ({(231, 33, 231, 49): 'self.num_members'}, {}), '(self.num_members)', True, 'import numpy as np\n'), ((153, 68, 153, 94), 'numpy.deg2rad', 'np.deg2rad', ({(153, 79, 153, 93): 'self.angles[0]'}, {}), '(self.angles[0])', True, 'import numpy as np\n'), ((154, 68, 154, 94), 'numpy.deg2rad', 'np.deg2rad', ({(154, 79, 154, 93): 'self.angles[0]'}, {}), '(self.angles[0])', True, 'import numpy as np\n'), ((157, 68, 157, 94), 'numpy.deg2rad', 'np.deg2rad', ({(157, 79, 157, 93): 'self.angles[1]'}, {}), '(self.angles[1])', True, 'import numpy as np\n'), ((158, 68, 158, 94), 'numpy.deg2rad', 'np.deg2rad', ({(158, 79, 158, 93): 'self.angles[1]'}, {}), '(self.angles[1])', True, 'import numpy as np\n'), ((161, 68, 161, 94), 'numpy.deg2rad', 'np.deg2rad', ({(161, 79, 161, 93): 'self.angles[2]'}, {}), '(self.angles[2])', True, 'import numpy as np\n'), ((162, 68, 162, 94), 'numpy.deg2rad', 'np.deg2rad', ({(162, 79, 162, 93): 'self.angles[2]'}, {}), '(self.angles[2])', True, 'import numpy as np\n')] |
jonywtf/grpc | src/python/src/grpc/_adapter/_links_test.py | 124f3c5a4b65bb88f13be7c68482eb83d945ad02 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test of the GRPC-backed ForeLink and RearLink."""
import threading
import unittest
from grpc._adapter import _proto_scenarios
from grpc._adapter import _test_links
from grpc._adapter import fore
from grpc._adapter import rear
from grpc.framework.base import interfaces
from grpc.framework.base.packets import packets as tickets
from grpc.framework.foundation import logging_pool
_IDENTITY = lambda x: x
_TIMEOUT = 2
class RoundTripTest(unittest.TestCase):
def setUp(self):
self.fore_link_pool = logging_pool.pool(80)
self.rear_link_pool = logging_pool.pool(80)
def tearDown(self):
self.rear_link_pool.shutdown(wait=True)
self.fore_link_pool.shutdown(wait=True)
def testZeroMessageRoundTrip(self):
test_operation_id = object()
test_method = 'test method'
test_fore_link = _test_links.ForeLink(None, None)
def rear_action(front_to_back_ticket, fore_link):
if front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE):
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None)
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: None}, {test_method: None}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool, {test_method: None},
{test_method: None}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
front_to_back_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
rear_link.accept_front_to_back_ticket(front_to_back_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_fore_link.condition:
self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION)
def testEntireRoundTrip(self):
test_operation_id = object()
test_method = 'test method'
test_front_to_back_datum = b'\x07'
test_back_to_front_datum = b'\x08'
test_fore_link = _test_links.ForeLink(None, None)
rear_sequence_number = [0]
def rear_action(front_to_back_ticket, fore_link):
if front_to_back_ticket.payload is None:
payload = None
else:
payload = test_back_to_front_datum
terminal = front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
if payload is not None or terminal:
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, rear_sequence_number[0],
tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
payload)
rear_sequence_number[0] += 1
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: _IDENTITY},
{test_method: _IDENTITY}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool, {test_method: _IDENTITY},
{test_method: _IDENTITY}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
front_to_back_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
interfaces.ServicedSubscription.Kind.FULL, None,
test_front_to_back_datum, _TIMEOUT)
rear_link.accept_front_to_back_ticket(front_to_back_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_rear_link.condition:
front_to_back_payloads = tuple(
ticket.payload for ticket in test_rear_link.tickets
if ticket.payload is not None)
with test_fore_link.condition:
back_to_front_payloads = tuple(
ticket.payload for ticket in test_fore_link.tickets
if ticket.payload is not None)
self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads)
self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads)
def _perform_scenario_test(self, scenario):
test_operation_id = object()
test_method = scenario.method()
test_fore_link = _test_links.ForeLink(None, None)
rear_lock = threading.Lock()
rear_sequence_number = [0]
def rear_action(front_to_back_ticket, fore_link):
with rear_lock:
if front_to_back_ticket.payload is not None:
response = scenario.response_for_request(front_to_back_ticket.payload)
else:
response = None
terminal = front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
if response is not None or terminal:
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, rear_sequence_number[0],
tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
response)
rear_sequence_number[0] += 1
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: scenario.deserialize_request},
{test_method: scenario.serialize_response}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool,
{test_method: scenario.serialize_request},
{test_method: scenario.deserialize_response}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
commencement_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method,
interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
fore_sequence_number = 1
rear_link.accept_front_to_back_ticket(commencement_ticket)
for request in scenario.requests():
continuation_ticket = tickets.FrontToBackPacket(
test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION,
None, None, None, request, None)
fore_sequence_number += 1
rear_link.accept_front_to_back_ticket(continuation_ticket)
completion_ticket = tickets.FrontToBackPacket(
test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None,
None, None, None, None)
fore_sequence_number += 1
rear_link.accept_front_to_back_ticket(completion_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_rear_link.condition:
requests = tuple(
ticket.payload for ticket in test_rear_link.tickets
if ticket.payload is not None)
with test_fore_link.condition:
responses = tuple(
ticket.payload for ticket in test_fore_link.tickets
if ticket.payload is not None)
self.assertTrue(scenario.verify_requests(requests))
self.assertTrue(scenario.verify_responses(responses))
def testEmptyScenario(self):
self._perform_scenario_test(_proto_scenarios.EmptyScenario())
def testBidirectionallyUnaryScenario(self):
self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())
def testBidirectionallyStreamingScenario(self):
self._perform_scenario_test(
_proto_scenarios.BidirectionallyStreamingScenario())
if __name__ == '__main__':
unittest.main()
| [((249, 2, 249, 17), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((50, 26, 50, 47), 'grpc.framework.foundation.logging_pool.pool', 'logging_pool.pool', ({(50, 44, 50, 46): '80'}, {}), '(80)', False, 'from grpc.framework.foundation import logging_pool\n'), ((51, 26, 51, 47), 'grpc.framework.foundation.logging_pool.pool', 'logging_pool.pool', ({(51, 44, 51, 46): '80'}, {}), '(80)', False, 'from grpc.framework.foundation import logging_pool\n'), ((60, 21, 60, 53), 'grpc._adapter._test_links.ForeLink', '_test_links.ForeLink', ({(60, 42, 60, 46): 'None', (60, 48, 60, 52): 'None'}, {}), '(None, None)', False, 'from grpc._adapter import _test_links\n'), ((67, 21, 67, 60), 'grpc._adapter._test_links.RearLink', '_test_links.RearLink', ({(67, 42, 67, 53): 'rear_action', (67, 55, 67, 59): 'None'}, {}), '(rear_action, None)', False, 'from grpc._adapter import _test_links\n'), ((69, 16, 70, 80), 'grpc._adapter.fore.ForeLink', 'fore.ForeLink', ({(70, 8, 70, 27): 'self.fore_link_pool', (70, 29, 70, 48): '{test_method: None}', (70, 50, 70, 69): '{test_method: None}', (70, 71, 70, 75): 'None', (70, 77, 70, 79): '()'}, {}), '(self.fore_link_pool, {test_method: None}, {test_method: None},\n None, ())', False, 'from grpc._adapter import fore\n'), ((76, 16, 78, 53), 'grpc._adapter.rear.RearLink', 'rear.RearLink', ({(77, 8, 77, 19): '"""localhost"""', (77, 21, 77, 25): 'port', (77, 27, 77, 46): 'self.rear_link_pool', (77, 48, 77, 67): '{test_method: None}', (78, 8, 78, 27): '{test_method: None}', (78, 29, 78, 34): 'False', (78, 36, 78, 40): 'None', (78, 42, 78, 46): 'None', (78, 48, 78, 52): 'None'}, {}), "('localhost', port, self.rear_link_pool, {test_method: None},\n {test_method: None}, False, None, None, None)", False, 'from grpc._adapter import rear\n'), ((83, 27, 85, 72), 'grpc.framework.base.packets.packets.FrontToBackPacket', 'tickets.FrontToBackPacket', ({(84, 8, 84, 25): 'test_operation_id', (84, 27, 84, 28): '0', (84, 30, 84, 49): 'tickets.Kind.ENTIRE', (84, 51, 84, 62): 'test_method', (85, 8, 85, 49): 'interfaces.ServicedSubscription.Kind.FULL', (85, 51, 85, 55): 'None', (85, 57, 85, 61): 'None', (85, 63, 85, 71): '_TIMEOUT'}, {}), '(test_operation_id, 0, tickets.Kind.ENTIRE,\n test_method, interfaces.ServicedSubscription.Kind.FULL, None, None,\n _TIMEOUT)', True, 'from grpc.framework.base.packets import packets as tickets\n'), ((104, 21, 104, 53), 'grpc._adapter._test_links.ForeLink', '_test_links.ForeLink', ({(104, 42, 104, 46): 'None', (104, 48, 104, 52): 'None'}, {}), '(None, None)', False, 'from grpc._adapter import _test_links\n'), ((120, 21, 120, 60), 'grpc._adapter._test_links.RearLink', '_test_links.RearLink', ({(120, 42, 120, 53): 'rear_action', (120, 55, 120, 59): 'None'}, {}), '(rear_action, None)', False, 'from grpc._adapter import _test_links\n'), ((122, 16, 124, 43), 'grpc._adapter.fore.ForeLink', 'fore.ForeLink', ({(123, 8, 123, 27): 'self.fore_link_pool', (123, 29, 123, 53): '{test_method: _IDENTITY}', (124, 8, 124, 32): '{test_method: _IDENTITY}', (124, 34, 124, 38): 'None', (124, 40, 124, 42): '()'}, {}), '(self.fore_link_pool, {test_method: _IDENTITY}, {test_method:\n _IDENTITY}, None, ())', False, 'from grpc._adapter import fore\n'), ((130, 16, 132, 58), 'grpc._adapter.rear.RearLink', 'rear.RearLink', ({(131, 8, 131, 19): '"""localhost"""', (131, 21, 131, 25): 'port', (131, 27, 131, 46): 'self.rear_link_pool', (131, 48, 131, 72): '{test_method: _IDENTITY}', (132, 8, 132, 32): '{test_method: _IDENTITY}', (132, 34, 132, 39): 'False', (132, 41, 132, 45): 'None', (132, 47, 132, 51): 'None', (132, 53, 132, 57): 'None'}, {}), "('localhost', port, self.rear_link_pool, {test_method:\n _IDENTITY}, {test_method: _IDENTITY}, False, None, None, None)", False, 'from grpc._adapter import rear\n'), ((137, 27, 140, 43), 'grpc.framework.base.packets.packets.FrontToBackPacket', 'tickets.FrontToBackPacket', ({(138, 8, 138, 25): 'test_operation_id', (138, 27, 138, 28): '0', (138, 30, 138, 49): 'tickets.Kind.ENTIRE', (138, 51, 138, 62): 'test_method', (139, 8, 139, 49): 'interfaces.ServicedSubscription.Kind.FULL', (139, 51, 139, 55): 'None', (140, 8, 140, 32): 'test_front_to_back_datum', (140, 34, 140, 42): '_TIMEOUT'}, {}), '(test_operation_id, 0, tickets.Kind.ENTIRE,\n test_method, interfaces.ServicedSubscription.Kind.FULL, None,\n test_front_to_back_datum, _TIMEOUT)', True, 'from grpc.framework.base.packets import packets as tickets\n'), ((165, 21, 165, 53), 'grpc._adapter._test_links.ForeLink', '_test_links.ForeLink', ({(165, 42, 165, 46): 'None', (165, 48, 165, 52): 'None'}, {}), '(None, None)', False, 'from grpc._adapter import _test_links\n'), ((166, 16, 166, 32), 'threading.Lock', 'threading.Lock', ({}, {}), '()', False, 'import threading\n'), ((183, 21, 183, 60), 'grpc._adapter._test_links.RearLink', '_test_links.RearLink', ({(183, 42, 183, 53): 'rear_action', (183, 55, 183, 59): 'None'}, {}), '(rear_action, None)', False, 'from grpc._adapter import _test_links\n'), ((185, 16, 187, 61), 'grpc._adapter.fore.ForeLink', 'fore.ForeLink', ({(186, 8, 186, 27): 'self.fore_link_pool', (186, 29, 186, 72): '{test_method: scenario.deserialize_request}', (187, 8, 187, 50): '{test_method: scenario.serialize_response}', (187, 52, 187, 56): 'None', (187, 58, 187, 60): '()'}, {}), '(self.fore_link_pool, {test_method: scenario.\n deserialize_request}, {test_method: scenario.serialize_response}, None, ())', False, 'from grpc._adapter import fore\n'), ((193, 16, 196, 78), 'grpc._adapter.rear.RearLink', 'rear.RearLink', ({(194, 8, 194, 19): '"""localhost"""', (194, 21, 194, 25): 'port', (194, 27, 194, 46): 'self.rear_link_pool', (195, 8, 195, 49): '{test_method: scenario.serialize_request}', (196, 8, 196, 52): '{test_method: scenario.deserialize_response}', (196, 54, 196, 59): 'False', (196, 61, 196, 65): 'None', (196, 67, 196, 71): 'None', (196, 73, 196, 77): 'None'}, {}), "('localhost', port, self.rear_link_pool, {test_method:\n scenario.serialize_request}, {test_method: scenario.\n deserialize_response}, False, None, None, None)", False, 'from grpc._adapter import rear\n'), ((201, 26, 203, 72), 'grpc.framework.base.packets.packets.FrontToBackPacket', 'tickets.FrontToBackPacket', ({(202, 8, 202, 25): 'test_operation_id', (202, 27, 202, 28): '0', (202, 30, 202, 55): 'tickets.Kind.COMMENCEMENT', (202, 57, 202, 68): 'test_method', (203, 8, 203, 49): 'interfaces.ServicedSubscription.Kind.FULL', (203, 51, 203, 55): 'None', (203, 57, 203, 61): 'None', (203, 63, 203, 71): '_TIMEOUT'}, {}), '(test_operation_id, 0, tickets.Kind.COMMENCEMENT,\n test_method, interfaces.ServicedSubscription.Kind.FULL, None, None,\n _TIMEOUT)', True, 'from grpc.framework.base.packets import packets as tickets\n'), ((212, 24, 214, 31), 'grpc.framework.base.packets.packets.FrontToBackPacket', 'tickets.FrontToBackPacket', ({(213, 8, 213, 25): 'test_operation_id', (213, 27, 213, 47): 'fore_sequence_number', (213, 49, 213, 72): 'tickets.Kind.COMPLETION', (213, 74, 213, 78): 'None', (214, 8, 214, 12): 'None', (214, 14, 214, 18): 'None', (214, 20, 214, 24): 'None', (214, 26, 214, 30): 'None'}, {}), '(test_operation_id, fore_sequence_number, tickets.\n Kind.COMPLETION, None, None, None, None, None)', True, 'from grpc.framework.base.packets import packets as tickets\n'), ((207, 28, 209, 42), 'grpc.framework.base.packets.packets.FrontToBackPacket', 'tickets.FrontToBackPacket', ({(208, 10, 208, 27): 'test_operation_id', (208, 29, 208, 49): 'fore_sequence_number', (208, 51, 208, 76): 'tickets.Kind.CONTINUATION', (209, 10, 209, 14): 'None', (209, 16, 209, 20): 'None', (209, 22, 209, 26): 'None', (209, 28, 209, 35): 'request', (209, 37, 209, 41): 'None'}, {}), '(test_operation_id, fore_sequence_number, tickets.\n Kind.CONTINUATION, None, None, None, request, None)', True, 'from grpc.framework.base.packets import packets as tickets\n'), ((238, 32, 238, 64), 'grpc._adapter._proto_scenarios.EmptyScenario', '_proto_scenarios.EmptyScenario', ({}, {}), '()', False, 'from grpc._adapter import _proto_scenarios\n'), ((241, 32, 241, 79), 'grpc._adapter._proto_scenarios.BidirectionallyUnaryScenario', '_proto_scenarios.BidirectionallyUnaryScenario', ({}, {}), '()', False, 'from grpc._adapter import _proto_scenarios\n'), ((245, 8, 245, 59), 'grpc._adapter._proto_scenarios.BidirectionallyStreamingScenario', '_proto_scenarios.BidirectionallyStreamingScenario', ({}, {}), '()', False, 'from grpc._adapter import _proto_scenarios\n'), ((64, 31, 65, 80), 'grpc.framework.base.packets.packets.BackToFrontPacket', 'tickets.BackToFrontPacket', ({(65, 12, 65, 45): 'front_to_back_ticket.operation_id', (65, 47, 65, 48): '0', (65, 50, 65, 73): 'tickets.Kind.COMPLETION', (65, 75, 65, 79): 'None'}, {}), '(front_to_back_ticket.operation_id, 0, tickets.\n Kind.COMPLETION, None)', True, 'from grpc.framework.base.packets import packets as tickets\n'), ((114, 31, 117, 20), 'grpc.framework.base.packets.packets.BackToFrontPacket', 'tickets.BackToFrontPacket', ({(115, 12, 115, 45): 'front_to_back_ticket.operation_id', (115, 47, 115, 70): 'rear_sequence_number[0]', (116, 12, 116, 78): 'tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION', (117, 12, 117, 19): 'payload'}, {}), '(front_to_back_ticket.operation_id,\n rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else\n tickets.Kind.CONTINUATION, payload)', True, 'from grpc.framework.base.packets import packets as tickets\n'), ((177, 31, 180, 21), 'grpc.framework.base.packets.packets.BackToFrontPacket', 'tickets.BackToFrontPacket', ({(178, 12, 178, 45): 'front_to_back_ticket.operation_id', (178, 47, 178, 70): 'rear_sequence_number[0]', (179, 12, 179, 78): 'tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION', (180, 12, 180, 20): 'response'}, {}), '(front_to_back_ticket.operation_id,\n rear_sequence_number[0], tickets.Kind.COMPLETION if terminal else\n tickets.Kind.CONTINUATION, response)', True, 'from grpc.framework.base.packets import packets as tickets\n')] |
stjordanis/Hyperactive | tests/_test_progress_board.py | 5acf247d8023ff6761593b9d0954bdd912d20aed | import os, glob
import subprocess
from subprocess import DEVNULL, STDOUT
abspath = os.path.abspath(__file__)
dir_ = os.path.dirname(abspath)
files = glob.glob(dir_ + "/_progress_board_tests/_test_progress_board_*.py")
for file_path in files:
file_name = str(file_path.rsplit("/", maxsplit=1)[1])
try:
print("\033[0;33;40m Testing", file_name, end="...\r")
subprocess.check_call(["pytest", file_path], stdout=DEVNULL, stderr=STDOUT)
except subprocess.CalledProcessError:
print("\033[0;31;40m Error in", file_name)
else:
print("\033[0;32;40m", file_name, "is correct")
| [((6, 10, 6, 35), 'os.path.abspath', 'os.path.abspath', ({(6, 26, 6, 34): '__file__'}, {}), '(__file__)', False, 'import os, glob\n'), ((7, 7, 7, 31), 'os.path.dirname', 'os.path.dirname', ({(7, 23, 7, 30): 'abspath'}, {}), '(abspath)', False, 'import os, glob\n'), ((9, 8, 9, 76), 'glob.glob', 'glob.glob', ({(9, 18, 9, 75): "dir_ + '/_progress_board_tests/_test_progress_board_*.py'"}, {}), "(dir_ + '/_progress_board_tests/_test_progress_board_*.py')", False, 'import os, glob\n'), ((16, 8, 16, 83), 'subprocess.check_call', 'subprocess.check_call', (), '', False, 'import subprocess\n')] |
jhalljhall/beiwe-backend | pages/forest_pages.py | 06d28926a2830c7ad53c32ec41ff49320932aeed | import csv
import datetime
from collections import defaultdict
from django.contrib import messages
from django.http.response import FileResponse
from django.shortcuts import redirect, render
from django.utils import timezone
from django.views.decorators.http import require_GET, require_http_methods, require_POST
from authentication.admin_authentication import (authenticate_admin,
authenticate_researcher_study_access, forest_enabled)
from constants.data_access_api_constants import CHUNK_FIELDS
from constants.forest_constants import ForestTaskStatus, ForestTree
from database.data_access_models import ChunkRegistry
from database.study_models import Study
from database.tableau_api_models import ForestTask
from database.user_models import Participant
from forms.django_forms import CreateTasksForm
from libs.http_utils import easy_url
from libs.internal_types import ParticipantQuerySet, ResearcherRequest
from libs.streaming_zip import zip_generator
from libs.utils.date_utils import daterange
from middleware.abort_middleware import abort
from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer
@require_GET
@authenticate_researcher_study_access
@forest_enabled
def analysis_progress(request: ResearcherRequest, study_id=None):
study: Study = Study.objects.get(pk=study_id)
participants: ParticipantQuerySet = Participant.objects.filter(study=study_id)
# generate chart of study analysis progress logs
trackers = ForestTask.objects.filter(participant__in=participants).order_by("created_on")
start_date = (study.get_earliest_data_time_bin() or study.created_on).date()
end_date = (study.get_latest_data_time_bin() or timezone.now()).date()
# this code simultaneously builds up the chart of most recent forest results for date ranges
# by participant and tree, and tracks the metadata
params = dict()
results = defaultdict(lambda: "--")
tracker: ForestTask
for tracker in trackers:
for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True):
results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status
if tracker.status == tracker.status.success:
params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id
else:
params[(tracker.participant_id, tracker.forest_tree, date)] = None
# generate the date range for charting
dates = list(daterange(start_date, end_date, inclusive=True))
chart = []
for participant in participants:
for tree in ForestTree.values():
row = [participant.patient_id, tree] + \
[results[(participant.id, tree, date)] for date in dates]
chart.append(row)
# ensure that within each tree, only a single set of param values are used (only the most recent runs
# are considered, and unsuccessful runs are assumed to invalidate old runs, clearing params)
params_conflict = False
for tree in set([k[1] for k in params.keys()]):
if len(set([m for k, m in params.items() if m is not None and k[1] == tree])) > 1:
params_conflict = True
break
return render(
request,
'forest/analysis_progress.html',
context=dict(
study=study,
chart_columns=["participant", "tree"] + dates,
status_choices=ForestTaskStatus,
params_conflict=params_conflict,
start_date=start_date,
end_date=end_date,
chart=chart # this uses the jinja safe filter and should never involve user input
)
)
@require_http_methods(['GET', 'POST'])
@authenticate_admin
@forest_enabled
def create_tasks(request: ResearcherRequest, study_id=None):
# Only a SITE admin can queue forest tasks
if not request.session_researcher.site_admin:
return abort(403)
try:
study = Study.objects.get(pk=study_id)
except Study.DoesNotExist:
return abort(404)
# FIXME: remove this double endpoint pattern, it is bad.
if request.method == "GET":
return render_create_tasks(request, study)
form = CreateTasksForm(data=request.POST, study=study)
if not form.is_valid():
error_messages = [
f'"{field}": {message}'
for field, messages in form.errors.items()
for message in messages
]
error_messages_string = "\n".join(error_messages)
messages.warning(request, f"Errors:\n\n{error_messages_string}")
return render_create_tasks(request, study)
form.save()
messages.success(request, "Forest tasks successfully queued!")
return redirect(easy_url("forest_pages.task_log", study_id=study_id))
@require_GET
@authenticate_researcher_study_access
@forest_enabled
def task_log(request: ResearcherRequest, study_id=None):
study = Study.objects.get(pk=study_id)
forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by("-created_on")
return render(
request,
"forest/task_log.html",
context=dict(
study=study,
is_site_admin=request.session_researcher.site_admin,
status_choices=ForestTaskStatus,
forest_log=ForestTaskSerializer(forest_tasks, many=True).data,
)
)
@require_GET
@authenticate_admin
def download_task_log(request: ResearcherRequest):
forest_tasks = ForestTask.objects.order_by("created_on")
return FileResponse(
stream_forest_task_log_csv(forest_tasks),
content_type="text/csv",
filename=f"forest_task_log_{timezone.now().isoformat()}.csv",
as_attachment=True,
)
@require_POST
@authenticate_admin
@forest_enabled
def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id):
if not request.session_researcher.site_admin:
return abort(403)
number_updated = \
ForestTask.objects.filter(
external_id=forest_task_external_id, status=ForestTaskStatus.queued
).update(
status=ForestTaskStatus.cancelled,
stacktrace=f"Canceled by {request.session_researcher.username} on {datetime.date.today()}",
)
if number_updated > 0:
messages.success(request, "Forest task successfully cancelled.")
else:
messages.warning(request, "Sorry, we were unable to find or cancel this Forest task.")
return redirect(easy_url("forest_pages.task_log", study_id=study_id))
@require_GET
@authenticate_admin
@forest_enabled
def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id):
try:
tracker: ForestTask = ForestTask.objects.get(
external_id=forest_task_external_id, participant__study_id=study_id
)
except ForestTask.DoesNotExist:
return abort(404)
chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS)
f = FileResponse(
zip_generator(chunks),
content_type="zip",
as_attachment=True,
filename=f"{tracker.get_slug()}.zip",
)
f.set_headers(None)
return f
def stream_forest_task_log_csv(forest_tasks):
buffer = CSVBuffer()
writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields)
writer.writeheader()
yield buffer.read()
for forest_task in forest_tasks:
writer.writerow(ForestTaskCsvSerializer(forest_task).data)
yield buffer.read()
def render_create_tasks(request: ResearcherRequest, study: Study):
participants = Participant.objects.filter(study=study)
try:
start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest("time_bin")
end_date = ChunkRegistry.objects.filter(participant__in=participants).latest("time_bin")
start_date = start_date.time_bin.date()
end_date = end_date.time_bin.date()
except ChunkRegistry.DoesNotExist:
start_date = study.created_on.date()
end_date = timezone.now().date()
return render(
request,
"forest/create_tasks.html",
context=dict(
study=study,
participants=list(
study.participants.order_by("patient_id").values_list("patient_id", flat=True)
),
trees=ForestTree.choices(),
start_date=start_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
)
)
class CSVBuffer:
line = ""
def read(self):
return self.line
def write(self, line):
self.line = line
| [((87, 1, 87, 38), 'django.views.decorators.http.require_http_methods', 'require_http_methods', ({(87, 22, 87, 37): "['GET', 'POST']"}, {}), "(['GET', 'POST'])", False, 'from django.views.decorators.http import require_GET, require_http_methods, require_POST\n'), ((32, 19, 32, 49), 'database.study_models.Study.objects.get', 'Study.objects.get', (), '', False, 'from database.study_models import Study\n'), ((33, 40, 33, 82), 'database.user_models.Participant.objects.filter', 'Participant.objects.filter', (), '', False, 'from database.user_models import Participant\n'), ((44, 14, 44, 39), 'collections.defaultdict', 'defaultdict', ({(44, 26, 44, 38): "lambda : '--'"}, {}), "(lambda : '--')", False, 'from collections import defaultdict\n'), ((102, 11, 102, 58), 'forms.django_forms.CreateTasksForm', 'CreateTasksForm', (), '', False, 'from forms.django_forms import CreateTasksForm\n'), ((115, 4, 115, 66), 'django.contrib.messages.success', 'messages.success', ({(115, 21, 115, 28): 'request', (115, 30, 115, 65): '"""Forest tasks successfully queued!"""'}, {}), "(request, 'Forest tasks successfully queued!')", False, 'from django.contrib import messages\n'), ((123, 12, 123, 42), 'database.study_models.Study.objects.get', 'Study.objects.get', (), '', False, 'from database.study_models import Study\n'), ((140, 19, 140, 60), 'database.tableau_api_models.ForestTask.objects.order_by', 'ForestTask.objects.order_by', ({(140, 47, 140, 59): '"""created_on"""'}, {}), "('created_on')", False, 'from database.tableau_api_models import ForestTask\n'), ((196, 13, 196, 83), 'csv.DictWriter', 'csv.DictWriter', (), '', False, 'import csv\n'), ((206, 19, 206, 58), 'database.user_models.Participant.objects.filter', 'Participant.objects.filter', (), '', False, 'from database.user_models import Participant\n'), ((47, 20, 47, 93), 'libs.utils.date_utils.daterange', 'daterange', (), '', False, 'from libs.utils.date_utils import daterange\n'), ((55, 17, 55, 64), 'libs.utils.date_utils.daterange', 'daterange', (), '', False, 'from libs.utils.date_utils import daterange\n'), ((59, 20, 59, 39), 'constants.forest_constants.ForestTree.values', 'ForestTree.values', ({}, {}), '()', False, 'from constants.forest_constants import ForestTaskStatus, ForestTree\n'), ((93, 15, 93, 25), 'middleware.abort_middleware.abort', 'abort', ({(93, 21, 93, 24): '(403)'}, {}), '(403)', False, 'from middleware.abort_middleware import abort\n'), ((95, 16, 95, 46), 'database.study_models.Study.objects.get', 'Study.objects.get', (), '', False, 'from database.study_models import Study\n'), ((111, 8, 111, 72), 'django.contrib.messages.warning', 'messages.warning', ({(111, 25, 111, 32): 'request', (111, 34, 111, 71): 'f"""Errors:\n\n{error_messages_string}"""'}, {}), '(request, f"""Errors:\n\n{error_messages_string}""")', False, 'from django.contrib import messages\n'), ((116, 20, 116, 72), 'libs.http_utils.easy_url', 'easy_url', (), '', False, 'from libs.http_utils import easy_url\n'), ((154, 15, 154, 25), 'middleware.abort_middleware.abort', 'abort', ({(154, 21, 154, 24): '(403)'}, {}), '(403)', False, 'from middleware.abort_middleware import abort\n'), ((165, 8, 165, 72), 'django.contrib.messages.success', 'messages.success', ({(165, 25, 165, 32): 'request', (165, 34, 165, 71): '"""Forest task successfully cancelled."""'}, {}), "(request, 'Forest task successfully cancelled.')", False, 'from django.contrib import messages\n'), ((167, 8, 167, 94), 'django.contrib.messages.warning', 'messages.warning', ({(167, 25, 167, 32): 'request', (167, 34, 167, 93): '"""Sorry, we were unable to find or cancel this Forest task."""'}, {}), "(request,\n 'Sorry, we were unable to find or cancel this Forest task.')", False, 'from django.contrib import messages\n'), ((169, 20, 169, 72), 'libs.http_utils.easy_url', 'easy_url', (), '', False, 'from libs.http_utils import easy_url\n'), ((177, 30, 179, 9), 'database.tableau_api_models.ForestTask.objects.get', 'ForestTask.objects.get', (), '', False, 'from database.tableau_api_models import ForestTask\n'), ((185, 8, 185, 29), 'libs.streaming_zip.zip_generator', 'zip_generator', ({(185, 22, 185, 28): 'chunks'}, {}), '(chunks)', False, 'from libs.streaming_zip import zip_generator\n'), ((36, 15, 36, 70), 'database.tableau_api_models.ForestTask.objects.filter', 'ForestTask.objects.filter', (), '', False, 'from database.tableau_api_models import ForestTask\n'), ((97, 15, 97, 25), 'middleware.abort_middleware.abort', 'abort', ({(97, 21, 97, 24): '(404)'}, {}), '(404)', False, 'from middleware.abort_middleware import abort\n'), ((124, 19, 124, 76), 'database.tableau_api_models.ForestTask.objects.filter', 'ForestTask.objects.filter', (), '', False, 'from database.tableau_api_models import ForestTask\n'), ((157, 8, 159, 9), 'database.tableau_api_models.ForestTask.objects.filter', 'ForestTask.objects.filter', (), '', False, 'from database.tableau_api_models import ForestTask\n'), ((181, 15, 181, 25), 'middleware.abort_middleware.abort', 'abort', ({(181, 21, 181, 24): '(404)'}, {}), '(404)', False, 'from middleware.abort_middleware import abort\n'), ((183, 13, 183, 74), 'database.data_access_models.ChunkRegistry.objects.filter', 'ChunkRegistry.objects.filter', (), '', False, 'from database.data_access_models import ChunkRegistry\n'), ((39, 52, 39, 66), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((201, 24, 201, 60), 'serializers.forest_serializers.ForestTaskCsvSerializer', 'ForestTaskCsvSerializer', ({(201, 48, 201, 59): 'forest_task'}, {}), '(forest_task)', False, 'from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer\n'), ((208, 21, 208, 79), 'database.data_access_models.ChunkRegistry.objects.filter', 'ChunkRegistry.objects.filter', (), '', False, 'from database.data_access_models import ChunkRegistry\n'), ((209, 19, 209, 77), 'database.data_access_models.ChunkRegistry.objects.filter', 'ChunkRegistry.objects.filter', (), '', False, 'from database.data_access_models import ChunkRegistry\n'), ((161, 79, 161, 100), 'datetime.date.today', 'datetime.date.today', ({}, {}), '()', False, 'import datetime\n'), ((214, 19, 214, 33), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n'), ((223, 18, 223, 38), 'constants.forest_constants.ForestTree.choices', 'ForestTree.choices', ({}, {}), '()', False, 'from constants.forest_constants import ForestTaskStatus, ForestTree\n'), ((132, 23, 132, 68), 'serializers.forest_serializers.ForestTaskSerializer', 'ForestTaskSerializer', (), '', False, 'from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer\n'), ((144, 36, 144, 50), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n')] |
janthiemen/data_scout | data_scout/transformations/math_custom.py | 6366eedfb20ed429bc96100de4dd2c7409e5dd88 | from __future__ import division
from .transformation import Transformation
from pyparsing import (Literal, CaselessLiteral, Word, Combine, Group, Optional,
ZeroOrMore, Forward, nums, alphas, oneOf)
import math
import re
import operator
__author__ = 'Paul McGuire'
__version__ = '$Revision: 0.0 $'
__date__ = '$Date: 2009-03-20 $'
__source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py
http://pyparsing.wikispaces.com/message/view/home/15549426
'''
__note__ = '''
All I've done is rewrap Paul McGuire's fourFn.py as a class, so I can use it
more easily in other places.
'''
class Custom(Transformation):
"""
Most of this code comes from the fourFn.py pyparsing example
"""
title = "Custom equation"
key = "Math equation"
fields = {
"equation": {"name": "Equation", "type": "string", "help": "The equation to evaluate. Column values should be entered as {COLUMN NAME}",
"required": True, "input": "text", "default": ""},
"output": {"name": "Output column", "type": "string", "input": "text", "required": True,
"help": "The name of the (newly created) column that contains the results", "default": ""},
}
def __init__(self, arguments: dict, sample_size: int, example: dict = None):
"""
Initialize the transformation with the given parameters.
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
Arguments:
arguments {dict} -- The arguments
"""
super().__init__(arguments, sample_size, example)
self.equation = arguments["equation"]
self.output = arguments["output"]
point = Literal(".")
e = CaselessLiteral("E")
fnumber = Combine(Word("+-" + nums, nums) +
Optional(point + Optional(Word(nums))) +
Optional(e + Word("+-" + nums, nums)))
ident = Word(alphas, alphas + nums + "_$")
plus = Literal("+")
minus = Literal("-")
mult = Literal("*")
div = Literal("/")
mod = Literal("%")
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
addop = plus | minus
multop = mult | div | mod
expop = Literal("^")
pi = CaselessLiteral("PI")
expr = Forward()
atom = ((Optional(oneOf("- +")) +
(ident + lpar + expr + rpar | pi | e | fnumber).setParseAction(self.push_first))
| Optional(oneOf("- +")) + Group(lpar + expr + rpar)
).setParseAction(self.push_u_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of
# "atom [ ^ atom ]...", we get right-to-left exponents, instead of left-to-right
# that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor << atom + \
ZeroOrMore((expop + factor).setParseAction(self.push_first))
term = factor + \
ZeroOrMore((multop + factor).setParseAction(self.push_first))
expr << term + \
ZeroOrMore((addop + term).setParseAction(self.push_first))
# addop_term = ( addop + term ).setParseAction( self.push_first )
# general_term = term + ZeroOrMore( addop_term ) | OneOrMore( addop_term)
# expr << general_term
self.bnf = expr
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
self.opn = {"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"%": operator.mod,
"/": operator.truediv,
"^": operator.pow}
self.expr_stack = None
self.fn = {"sin": math.sin,
"sinh": math.sinh,
"cos": math.cos,
"cosh": math.cosh,
"tan": math.tan,
"tanh": math.tanh,
"exp": math.exp,
"sqrt": math.sqrt,
"radians": math.radians,
"degrees": math.degrees,
"sign": lambda x: 0 if x == 0 else x / abs(x),
"log": math.log10,
"ln": math.log,
"abs": abs,
"trunc": lambda a: int(a),
"round": round,
"floor": math.floor,
"ceil": math.ceil,
"sgn": lambda a: abs(a) > epsilon and cmp(a, 0) or 0}
def push_first(self, strg, loc, toks):
self.expr_stack.append(toks[0])
def push_u_minus(self, strg, loc, toks):
if toks and toks[0] == '-':
self.expr_stack.append('unary -')
def evaluate_stack(self, s):
op = s.pop()
if op == 'unary -':
return -self.evaluate_stack(s)
if op in "+-*/^%":
op2 = self.evaluate_stack(s)
op1 = self.evaluate_stack(s)
return self.opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in self.fn:
return self.fn[op](self.evaluate_stack(s))
elif op[0].isalpha():
return 0
else:
return float(op)
def eval(self, num_string, parse_all=True):
self.expr_stack = []
results = self.bnf.parseString(num_string, parse_all)
val = self.evaluate_stack(self.expr_stack[:])
return val
def __call__(self, row, index: int):
"""This class is called on each row.
Arguments:
row {dict} -- The complete row
Returns:
dict -- The row, including the extra output column
"""
row[self.output] = self.eval(re.sub(r'{(\w+)}', lambda x: str(row.get(x.group(1), 0)), self.equation))
return row, index
| [((56, 16, 56, 28), 'pyparsing.Literal', 'Literal', ({(56, 24, 56, 27): '"""."""'}, {}), "('.')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((57, 12, 57, 32), 'pyparsing.CaselessLiteral', 'CaselessLiteral', ({(57, 28, 57, 31): '"""E"""'}, {}), "('E')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((61, 16, 61, 50), 'pyparsing.Word', 'Word', ({(61, 21, 61, 27): 'alphas', (61, 29, 61, 49): "alphas + nums + '_$'"}, {}), "(alphas, alphas + nums + '_$')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((62, 15, 62, 27), 'pyparsing.Literal', 'Literal', ({(62, 23, 62, 26): '"""+"""'}, {}), "('+')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((63, 16, 63, 28), 'pyparsing.Literal', 'Literal', ({(63, 24, 63, 27): '"""-"""'}, {}), "('-')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((64, 15, 64, 27), 'pyparsing.Literal', 'Literal', ({(64, 23, 64, 26): '"""*"""'}, {}), "('*')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((65, 14, 65, 26), 'pyparsing.Literal', 'Literal', ({(65, 22, 65, 25): '"""/"""'}, {}), "('/')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((66, 14, 66, 26), 'pyparsing.Literal', 'Literal', ({(66, 22, 66, 25): '"""%"""'}, {}), "('%')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((71, 16, 71, 28), 'pyparsing.Literal', 'Literal', ({(71, 24, 71, 27): '"""^"""'}, {}), "('^')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((72, 13, 72, 34), 'pyparsing.CaselessLiteral', 'CaselessLiteral', ({(72, 29, 72, 33): '"""PI"""'}, {}), "('PI')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((73, 15, 73, 24), 'pyparsing.Forward', 'Forward', ({}, {}), '()', False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((81, 17, 81, 26), 'pyparsing.Forward', 'Forward', ({}, {}), '()', False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((67, 15, 67, 27), 'pyparsing.Literal', 'Literal', ({(67, 23, 67, 26): '"""("""'}, {}), "('(')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((68, 15, 68, 27), 'pyparsing.Literal', 'Literal', ({(68, 23, 68, 26): '""")"""'}, {}), "(')')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((58, 26, 58, 49), 'pyparsing.Word', 'Word', ({(58, 31, 58, 42): "'+-' + nums", (58, 44, 58, 48): 'nums'}, {}), "('+-' + nums, nums)", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((60, 39, 60, 62), 'pyparsing.Word', 'Word', ({(60, 44, 60, 55): "'+-' + nums", (60, 57, 60, 61): 'nums'}, {}), "('+-' + nums, nums)", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((76, 43, 76, 68), 'pyparsing.Group', 'Group', ({(76, 49, 76, 67): 'lpar + expr + rpar'}, {}), '(lpar + expr + rpar)', False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((74, 26, 74, 38), 'pyparsing.oneOf', 'oneOf', ({(74, 32, 74, 37): '"""- +"""'}, {}), "('- +')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((76, 27, 76, 39), 'pyparsing.oneOf', 'oneOf', ({(76, 33, 76, 38): '"""- +"""'}, {}), "('- +')", False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n'), ((59, 52, 59, 62), 'pyparsing.Word', 'Word', ({(59, 57, 59, 61): 'nums'}, {}), '(nums)', False, 'from pyparsing import Literal, CaselessLiteral, Word, Combine, Group, Optional, ZeroOrMore, Forward, nums, alphas, oneOf\n')] |
cybertraining-dsc/fa19-516-171 | project/cloudmesh-storage/cloudmesh/vdir/api/manager.py | 1dba8cde09f7b05c80557ea7ae462161c590568b | #
# this manager stores directly into the db wit Database update
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.mongo.CmDatabase import CmDatabase
from cloudmesh.common.console import Console
from cloudmesh.storage.Provider import Provider
import os
from datetime import datetime
class Vdir(object):
def __init__(self):
self.cm = CmDatabase()
self.col = self.cm.db['local-vdir']
self.directory = 'vdir'
def cd(self, dirname=None):
try:
if dirname is None:
if self.directory == 'vdir':
Console.error("Root directory reached.")
else:
cwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
self.directory = cwd['parent']
pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
return pwd
else:
directory = self.col.find_one({'type': 'directory', 'cm.name': dirname})
if directory['parent'] == self.directory:
self.directory = dirname
pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
return pwd
else:
Console.error('Directory does not exist at this location.')
except Exception as e:
print(e)
@DatabaseUpdate()
def mkdir(self, dirname):
try:
directory = self.col.find_one({"cm.name": dirname, 'type': 'directory'})
if directory is None:
dir_dict = dict()
dir_dict['cm'] = {
'name': dirname,
'kind': 'vdir',
'cloud': 'local'
}
dir_dict['type'] = 'directory'
dir_dict['parent'] = self.directory
dir_dict['cm']['created'] = datetime.utcnow()
dir_dict['cm']['modified'] = datetime.utcnow()
return dir_dict
else:
Console.error("Directory with that name exists.")
except Exception as e:
print(e)
def ls(self, directory=None):
try:
dash = '-' * 40
if directory is not None:
cloudmesh = self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]})
count = self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]})
else:
cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})
count = self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})
locations = "{:<20} {:>}".format("Name", "Location") + "\n" + dash + "\n"
for i in range(0, count):
entry = cloudmesh[i]
if entry['type'] == 'fileendpoint':
location = entry['provider'] + ":" + entry['cloud_directory'] + "/" + entry['filename']
else:
if self.directory == '':
location = 'Vdir'
else:
location = self.directory
locations += "{:<20} {:>}".format(entry['cm']['name'], location) + "\n"
print(locations)
return locations
except Exception as e:
print(e)
@DatabaseUpdate()
def add(self, endpoint, dir_and_name):
try:
dirname = os.path.dirname(dir_and_name).split('/')[-1]
if dirname == '':
dirname = 'vdir'
directory = 'vdir'
else:
directory = self.col.find_one({"cm.name": dirname, 'type': 'directory'})
filename = os.path.basename(dir_and_name)
file = self.col.find_one({"cm.name": filename, 'type': 'fileendpoint'})
if directory is not None and file is None:
file_dict = dict()
file_dict['cm'] = {
'name': filename,
'kind': 'vdir',
'cloud': 'local'
}
file_dict['type'] = 'fileendpoint'
file_dict['vdirectory'] = dirname
file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1]
file_dict['filename'] = os.path.basename(endpoint)
file_dict['provider'] = os.path.dirname(endpoint).split(':')[0]
file_dict['cm']['created'] = datetime.utcnow()
file_dict['cm']['modified'] = datetime.utcnow()
return file_dict
elif directory is None:
Console.error("Virtual directory not found.")
elif file is not None:
print(file)
Console.error("File with that name already exists.")
except Exception as e:
print(e)
def get(self, name, destination=None):
try:
doc = self.col.find_one({'cm.name': name, 'type': 'fileendpoint'})
if doc is not None:
self.col.update_one({'cm.name': name, 'type': 'fileendpoint'},
{'$set': {'modified': datetime.utcnow()}})
service = doc['provider']
source = os.path.join(doc['cloud_directory'], doc['filename'])
print(source)
if destination is None:
destination = '~/.cloudmesh/vdir'
p = Provider(service)
file = p.get(source, destination, False)
return file
else:
Console.error("File not found.")
except Exception as e:
print(e)
def delete(self, dir_or_name):
try:
result = self.col.find_one({'cm.name': dir_or_name})
self.col.delete_one({'cm.name': dir_or_name})
return result
except Exception as e:
print(e)
def status(self, dir_or_name):
try:
result = self.col.find_one({'cm.name': dir_or_name})
return result
except Exception as e:
print(e)
| [((40, 5, 40, 21), 'cloudmesh.mongo.DataBaseDecorator.DatabaseUpdate', 'DatabaseUpdate', ({}, {}), '()', False, 'from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate\n'), ((86, 5, 86, 21), 'cloudmesh.mongo.DataBaseDecorator.DatabaseUpdate', 'DatabaseUpdate', ({}, {}), '()', False, 'from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate\n'), ((15, 18, 15, 30), 'cloudmesh.mongo.CmDatabase.CmDatabase', 'CmDatabase', ({}, {}), '()', False, 'from cloudmesh.mongo.CmDatabase import CmDatabase\n'), ((95, 23, 95, 53), 'os.path.basename', 'os.path.basename', ({(95, 40, 95, 52): 'dir_and_name'}, {}), '(dir_and_name)', False, 'import os\n'), ((53, 44, 53, 61), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n'), ((54, 45, 54, 62), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n'), ((57, 16, 57, 65), 'cloudmesh.common.console.Console.error', 'Console.error', ({(57, 30, 57, 64): '"""Directory with that name exists."""'}, {}), "('Directory with that name exists.')", False, 'from cloudmesh.common.console import Console\n'), ((107, 40, 107, 66), 'os.path.basename', 'os.path.basename', ({(107, 57, 107, 65): 'endpoint'}, {}), '(endpoint)', False, 'import os\n'), ((109, 45, 109, 62), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n'), ((110, 46, 110, 63), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n'), ((127, 25, 127, 78), 'os.path.join', 'os.path.join', ({(127, 38, 127, 60): "doc['cloud_directory']", (127, 62, 127, 77): "doc['filename']"}, {}), "(doc['cloud_directory'], doc['filename'])", False, 'import os\n'), ((131, 20, 131, 37), 'cloudmesh.storage.Provider.Provider', 'Provider', ({(131, 29, 131, 36): 'service'}, {}), '(service)', False, 'from cloudmesh.storage.Provider import Provider\n'), ((135, 16, 135, 48), 'cloudmesh.common.console.Console.error', 'Console.error', ({(135, 30, 135, 47): '"""File not found."""'}, {}), "('File not found.')", False, 'from cloudmesh.common.console import Console\n'), ((23, 20, 23, 60), 'cloudmesh.common.console.Console.error', 'Console.error', ({(23, 34, 23, 59): '"""Root directory reached."""'}, {}), "('Root directory reached.')", False, 'from cloudmesh.common.console import Console\n'), ((36, 20, 36, 79), 'cloudmesh.common.console.Console.error', 'Console.error', ({(36, 34, 36, 78): '"""Directory does not exist at this location."""'}, {}), "('Directory does not exist at this location.')", False, 'from cloudmesh.common.console import Console\n'), ((113, 16, 113, 61), 'cloudmesh.common.console.Console.error', 'Console.error', ({(113, 30, 113, 60): '"""Virtual directory not found."""'}, {}), "('Virtual directory not found.')", False, 'from cloudmesh.common.console import Console\n'), ((89, 22, 89, 51), 'os.path.dirname', 'os.path.dirname', ({(89, 38, 89, 50): 'dir_and_name'}, {}), '(dir_and_name)', False, 'import os\n'), ((116, 16, 116, 68), 'cloudmesh.common.console.Console.error', 'Console.error', ({(116, 30, 116, 67): '"""File with that name already exists."""'}, {}), "('File with that name already exists.')", False, 'from cloudmesh.common.console import Console\n'), ((106, 47, 106, 72), 'os.path.dirname', 'os.path.dirname', ({(106, 63, 106, 71): 'endpoint'}, {}), '(endpoint)', False, 'import os\n'), ((108, 40, 108, 65), 'os.path.dirname', 'os.path.dirname', ({(108, 56, 108, 64): 'endpoint'}, {}), '(endpoint)', False, 'import os\n'), ((125, 58, 125, 75), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n')] |
cjpit/redash | redash/query_runner/influx_db.py | 27aafdb07e3a427da8f88d55a0c0d7cc64379da2 | import json
import logging
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from influxdb import InfluxDBClusterClient
enabled = True
except ImportError:
enabled = False
def _transform_result(results):
result_columns = []
result_rows = []
for result in results:
for series in result.raw.get('series', []):
for column in series['columns']:
if column not in result_columns:
result_columns.append(column)
tags = series.get('tags', {})
for key in tags.keys():
if key not in result_columns:
result_columns.append(key)
for result in results:
for series in result.raw.get('series', []):
for point in series['values']:
result_row = {}
for column in result_columns:
tags = series.get('tags', {})
if column in tags:
result_row[column] = tags[column]
elif column in series['columns']:
index = series['columns'].index(column)
value = point[index]
result_row[column] = value
result_rows.append(result_row)
return json.dumps({
"columns": [{'name': c} for c in result_columns],
"rows": result_rows
}, cls=JSONEncoder)
class InfluxDB(BaseQueryRunner):
noop_query = "show measurements limit 1"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
},
'required': ['url']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "influxdb"
def run_query(self, query, user):
client = InfluxDBClusterClient.from_DSN(self.configuration['url'])
logger.debug("influxdb url: %s", self.configuration['url'])
logger.debug("influxdb got query: %s", query)
try:
results = client.query(query)
if not isinstance(results, list):
results = [results]
json_data = _transform_result(results)
error = None
except Exception as ex:
json_data = None
error = ex.message
return json_data, error
register(InfluxDB)
| [((7, 9, 7, 36), 'logging.getLogger', 'logging.getLogger', ({(7, 27, 7, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((45, 11, 48, 23), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((79, 17, 79, 74), 'influxdb.InfluxDBClusterClient.from_DSN', 'InfluxDBClusterClient.from_DSN', ({(79, 48, 79, 73): "self.configuration['url']"}, {}), "(self.configuration['url'])", False, 'from influxdb import InfluxDBClusterClient\n')] |
victorfica/utils | ics/mergeGatingSets.py | b61935a860838a0e70afde7c9ecf2c68f51a2c4b | #!/usr/bin/env python
"""
Usage examples:
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv
sbatch -n 1 -t 3-0 -c 4 -o functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv"
sbatch -n 1 -t 3-0 -c 4 -o functions_markers_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out functions_markers_extract.csv"
sbatch -n 1 -t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv"
sbatch -n 1 -t 3-0 -c 4 -o cell_functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv"
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
To delete all tmp files use:
find . -name \merged_tmp*.feather -type f -delete
"""
def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples, testbatch, outFile, metaCols=None, filters=None, useFeather=False):
out = []
batchList = [opj(dataFolder, bf) for bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))]
if testbatch:
batchList = batchList[:1]
matchStr = 'gs_*.feather'
if ncpus > 1 and _PARMAP:
res = parmap.map(mergeSamples,
batchList,
extractionFunc,
extractionKwargs,
matchStr,
testsamples,
metaCols,
filters,
pool=Pool(processes=ncpus))
else:
if _PARMAP:
res = parmap.map(mergeSamples,
batchList,
extractionFunc,
extractionKwargs,
matchStr,
testsamples,
metaCols,
filters,
parallel=False)
else:
func = partial(mergeSamples,
extractionFunc=extractionFunc,
extractionKwargs=extractionKwargs,
matchStr=matchStr,
test=testsamples,
metaCols=metaCols,
filters=filters)
res = list(map(func, batchList))
outFilename = mergeFeathers(res, outFile, writeCSV=1 - int(useFeather))
return outFilename
def testMatching(dataFolder):
out = []
for bf in os.listdir(dataFolder):
batchFolder = opj(dataFolder, bf)
if os.path.isdir(opj(dataFolder, bf)):
featherLU = matchSamples(batchFolder, test=False)
tmp = pd.Series(featherLU).to_frame()
tmp.loc[:, 'batch'] = bf
tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf)
out.append(tmp)
return pd.concat(out, axis=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Extract features and merge batches into one CSV.')
parser.add_argument('--folder', type=str,
help='Data folder containing all batch folders.',
default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata')
parser.add_argument('--function', type=str,
help='Name of extraction to apply ("functions")',
default='functions')
parser.add_argument('--subsets', type=str,
help='Filename listing subsets for analysis.',
default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv')
parser.add_argument('--out', type=str,
help='Output filename for CSV.',
default='merged_out.csv')
parser.add_argument('--ncpus', type=int,
help='Number of CPUs/cores to use for parallelization.',
default=1)
parser.add_argument('--testsamples', action='store_true', help='Only process two samples from each batch.')
parser.add_argument('--testbatch', action='store_true', help='Only process twp samples from one batch.')
parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching, to validate metadata.')
parser.add_argument('--feather', action='store_true', help='Store as feather as oposed to CSV')
parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from public github.com')
args = parser.parse_args()
try:
import parmap
from multiprocessing import Pool
_PARMAP = True
except:
_PARMAP = False
print('Could not find package "parmap", parallelization not enabled.')
import itertools
import pandas as pd
import numpy as np
from os.path import join as opj
import os
from functools import partial
import time
import sys
import feather
"""Make sure the utils are on path before importing"""
sys.path.append(args.utils)
# from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples
from ics import *
if args.matchingonly:
metaDf = testMatching(args.folder)
metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out))
print('Wrote matching metadata to %s.' % opj(args.folder, 'metamatch_' + args.out))
else:
subsets, markers, functions, exclude = parseSubsets(args.subsets)
features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
mincells=5)),
'bool_functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
mincells=0)),
'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets,
functions=functions,
markers=markers,
compressions=[('ALL', 2),
(['IFNg','IL2', 'TNFa'], 2)])),
'functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
compressions=[('ALL', 1),
('ALL', 2),
(['IFNg','IL2', 'TNFa'], 1),
(['IFNg','IL2', 'TNFa'], 2),
(['IFNg','IL2'], 1)])),
'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))}
extractionFunc, extractionKwargs = features[args.function]
if args.testbatch:
print('Test: processing samples from one batch')
if args.testsamples:
print('Test: processing two samples per batch')
outFile = opj(args.folder, args.out)
if args.feather:
outFile = outFile.replace('.csv', '.feather')
wrote = mergeBatches(args.folder,
extractionFunc=extractionFunc,
extractionKwargs=extractionKwargs,
testsamples=args.testsamples,
testbatch=args.testbatch,
outFile=outFile,
metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'],
filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7, 10, 11, 12]},
useFeather=int(args.feather),
ncpus=args.ncpus)
if wrote == outFile:
print('Wrote extracted data to %s.' % outFile)
else:
print('Error writing file to disk: %s' % wrote) | [((69, 14, 69, 36), 'os.listdir', 'os.listdir', ({(69, 25, 69, 35): 'dataFolder'}, {}), '(dataFolder)', False, 'import os\n'), ((77, 11, 77, 33), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((81, 13, 81, 100), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((124, 4, 124, 31), 'sys.path.append', 'sys.path.append', ({(124, 20, 124, 30): 'args.utils'}, {}), '(args.utils)', False, 'import sys\n'), ((29, 17, 29, 36), 'os.path.join', 'opj', ({(29, 21, 29, 31): 'dataFolder', (29, 33, 29, 35): 'bf'}, {}), '(dataFolder, bf)', True, 'from os.path import join as opj\n'), ((70, 22, 70, 41), 'os.path.join', 'opj', ({(70, 26, 70, 36): 'dataFolder', (70, 38, 70, 40): 'bf'}, {}), '(dataFolder, bf)', True, 'from os.path import join as opj\n'), ((164, 18, 164, 44), 'os.path.join', 'opj', ({(164, 22, 164, 33): 'args.folder', (164, 35, 164, 43): 'args.out'}, {}), '(args.folder, args.out)', True, 'from os.path import join as opj\n'), ((29, 47, 29, 69), 'os.listdir', 'os.listdir', ({(29, 58, 29, 68): 'dataFolder'}, {}), '(dataFolder)', False, 'import os\n'), ((45, 18, 53, 44), 'parmap.map', 'parmap.map', (), '', False, 'import parmap\n'), ((55, 19, 61, 43), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((71, 25, 71, 44), 'os.path.join', 'opj', ({(71, 29, 71, 39): 'dataFolder', (71, 41, 71, 43): 'bf'}, {}), '(dataFolder, bf)', True, 'from os.path import join as opj\n'), ((75, 41, 75, 60), 'os.path.join', 'opj', ({(75, 45, 75, 55): 'dataFolder', (75, 57, 75, 59): 'bf'}, {}), '(dataFolder, bf)', True, 'from os.path import join as opj\n'), ((131, 22, 131, 63), 'os.path.join', 'opj', ({(131, 26, 131, 37): 'args.folder', (131, 39, 131, 62): "('metamatch_' + args.out)"}, {}), "(args.folder, 'metamatch_' + args.out)", True, 'from os.path import join as opj\n'), ((29, 87, 29, 106), 'os.path.join', 'opj', ({(29, 91, 29, 101): 'dataFolder', (29, 103, 29, 105): 'bf'}, {}), '(dataFolder, bf)', True, 'from os.path import join as opj\n'), ((42, 34, 42, 55), 'multiprocessing.Pool', 'Pool', (), '', False, 'from multiprocessing import Pool\n'), ((132, 49, 132, 90), 'os.path.join', 'opj', ({(132, 53, 132, 64): 'args.folder', (132, 66, 132, 89): "('metamatch_' + args.out)"}, {}), "(args.folder, 'metamatch_' + args.out)", True, 'from os.path import join as opj\n'), ((73, 18, 73, 38), 'pandas.Series', 'pd.Series', ({(73, 28, 73, 37): 'featherLU'}, {}), '(featherLU)', True, 'import pandas as pd\n')] |
zoni/ulauncher-meet | meeting.py | 1b76627c69dfc539645acd27e30c9b8fd8fe08ae | from dataclasses import dataclass
@dataclass
class Meeting:
name: str
id: str
| [] |
uuosio/uuosio.gscdk | setup.py | a2e364d4499c1372567aa5933e2d8e02340a8385 |
import os
import shutil
import setuptools
# from skbuild import setup
from distutils.core import setup
from distutils.sysconfig import get_python_lib
import glob
# if os.path.exists('pysrc/tinygo'):
# shutil.rmtree('pysrc/tinygo')
# shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo')
release_files = []
for root, dirs, files in os.walk("pysrc/tinygo"):
for f in files:
release_files.append(os.path.join(root.replace('pysrc/', ''), f))
# print(release_files)
setup(
name="gscdk",
version="0.3.5",
description="Go Smart Contract Development Kit",
author='The UUOSIO Team',
license="BSD-3-Clause",
url="https://github.com/uuosio/uuosio.gscdk",
packages=['gscdk'],
package_dir={'gscdk': 'pysrc'},
package_data={
# "": ["*"],
'gscdk': release_files,
},
setup_requires=['wheel']
# scripts=['compiler/build/release/tinygo/bin/eosio-go'],
# install_requires=[
# ],
# include_package_data=True
)
| [((16, 25, 16, 48), 'os.walk', 'os.walk', ({(16, 33, 16, 47): '"""pysrc/tinygo"""'}, {}), "('pysrc/tinygo')", False, 'import os\n'), ((22, 0, 40, 1), 'distutils.core.setup', 'setup', (), '', False, 'from distutils.core import setup\n')] |
michalurbanski/bkgames | tests/data_creator_action.py | 69b1d16ae27d3118dd78449ce7deecbd6e1b95e7 | from typing import Callable
class DataCreatorAction:
def __init__(self, func: Callable, priority_for_creation: int = 99, priority_for_removal: int = 99):
self.func = func
self.priority_for_creation = priority_for_creation
self.priority_for_removal = priority_for_removal
| [] |
brianchiang-tw/HackerRank | Python/Numpy/Min and Max/min_and_max.py | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | import numpy as np
if __name__ == '__main__':
h, w = map( int, input().split() )
row_list = []
for i in range(h):
single_row = list( map(int, input().split() ) )
np_row = np.array( single_row )
row_list.append( np_row )
min_of_each_row = np.min( row_list, axis = 1)
max_of_min = np.max( min_of_each_row )
print( max_of_min )
| [((18, 22, 18, 49), 'numpy.min', 'np.min', (), '', True, 'import numpy as np\n'), ((20, 17, 20, 42), 'numpy.max', 'np.max', ({(20, 25, 20, 40): 'min_of_each_row'}, {}), '(min_of_each_row)', True, 'import numpy as np\n'), ((13, 17, 13, 39), 'numpy.array', 'np.array', ({(13, 27, 13, 37): 'single_row'}, {}), '(single_row)', True, 'import numpy as np\n')] |
allure-framework/allure-pytest | allure/pytest_plugin.py | d55180aaeb21233e7ca577ffc6f67a07837c63f2 | import uuid
import pickle
import pytest
import argparse
from collections import namedtuple
from six import text_type
from allure.common import AllureImpl, StepContext
from allure.constants import Status, AttachmentType, Severity, \
FAILED_STATUSES, Label, SKIPPED_STATUSES
from allure.utils import parent_module, parent_down_from_module, labels_of, \
all_of, get_exception_message, now, mangle_testnames
from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel
def pytest_addoption(parser):
parser.getgroup("reporting").addoption('--alluredir',
action="store",
dest="allurereportdir",
metavar="DIR",
default=None,
help="Generate Allure report in the specified directory (may not exist)")
severities = [v for (_, v) in all_of(Severity)]
def label_type(name, legal_values=set()):
"""
argparse-type factory for labelish things.
processed value is set of tuples (name, value).
:param name: of label type (for future TestLabel things)
:param legal_values: a `set` of values that are legal for this label, if any limit whatsoever
:raises ArgumentTypeError: if `legal_values` are given and there are values that fall out of that
"""
def a_label_type(string):
atoms = set(string.split(','))
if legal_values and not atoms < legal_values:
raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values)))
return set((name, v) for v in atoms)
return a_label_type
parser.getgroup("general").addoption('--allure_severities',
action="store",
dest="allureseverities",
metavar="SEVERITIES_SET",
default={},
type=label_type(name=Label.SEVERITY, legal_values=set(severities)),
help="""Comma-separated list of severity names.
Tests only with these severities will be run.
Possible values are:%s.""" % ', '.join(severities))
parser.getgroup("general").addoption('--allure_features',
action="store",
dest="allurefeatures",
metavar="FEATURES_SET",
default={},
type=label_type(name=Label.FEATURE),
help="""Comma-separated list of feature names.
Run tests that have at least one of the specified feature labels.""")
parser.getgroup("general").addoption('--allure_stories',
action="store",
dest="allurestories",
metavar="STORIES_SET",
default={},
type=label_type(name=Label.STORY),
help="""Comma-separated list of story names.
Run tests that have at least one of the specified story labels.""")
def pytest_configure(config):
reportdir = config.option.allurereportdir
if reportdir: # we actually record something
allure_impl = AllureImpl(reportdir)
testlistener = AllureTestListener(config)
pytest.allure._allurelistener = testlistener
config.pluginmanager.register(testlistener)
if not hasattr(config, 'slaveinput'):
# on xdist-master node do all the important stuff
config.pluginmanager.register(AllureAgregatingListener(allure_impl, config))
config.pluginmanager.register(AllureCollectionListener(allure_impl))
class AllureTestListener(object):
"""
Per-test listener.
Is responsible for recording in-test data and for attaching it to the test report thing.
The per-test reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook.
"""
def __init__(self, config):
self.config = config
self.environment = {}
self.test = None
# FIXME: that flag makes us pre-report failures in the makereport hook.
# it is here to cope with xdist's begavior regarding -x.
# see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish
self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue("maxfail")
@pytest.mark.hookwrapper
def pytest_runtest_protocol(self, item, nextitem):
try:
# for common items
description = item.function.__doc__
except AttributeError:
# for doctests that has no `function` attribute
description = item.reportinfo()[2]
self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])),
description=description,
start=now(),
attachments=[],
labels=labels_of(item),
status=None,
steps=[],
id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish
self.stack = [self.test]
yield
self.test = None
self.stack = []
def attach(self, title, contents, attach_type):
"""
Store attachment object in current state for later actual write in the `AllureAgregatingListener.write_attach`
"""
attach = Attach(source=contents, # we later re-save those, oh my...
title=title,
type=attach_type)
self.stack[-1].attachments.append(attach)
def dynamic_issue(self, *issues):
"""
Attaches ``issues`` to the current active case
"""
if self.test:
self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues])
def description(self, description):
"""
Sets description for the test
"""
if self.test:
self.test.description = description
def start_step(self, name):
"""
Starts an new :py:class:`allure.structure.TestStep` with given ``name``,
pushes it to the ``self.stack`` and returns the step.
"""
step = TestStep(name=name,
title=name,
start=now(),
attachments=[],
steps=[])
self.stack[-1].steps.append(step)
self.stack.append(step)
return step
def stop_step(self):
"""
Stops the step at the top of ``self.stack``
"""
step = self.stack.pop()
step.stop = now()
def _fill_case(self, report, call, pyteststatus, status):
"""
Finalizes with important data
:param report: py.test's `TestReport`
:param call: py.test's `CallInfo`
:param pyteststatus: the failed/xfailed/xpassed thing
:param status: a :py:class:`allure.constants.Status` entry
"""
[self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()]
self.test.stop = now()
self.test.status = status
if status in FAILED_STATUSES:
self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report),
trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail)
elif status in SKIPPED_STATUSES:
skip_message = type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail
trim_msg_len = 89
short_message = skip_message.split('\n')[0][:trim_msg_len]
# FIXME: see pytest.runner.pytest_runtest_makereport
self.test.failure = Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)),
trace=status == Status.PENDING and report.longrepr or short_message != skip_message and skip_message or '')
def report_case(self, item, report):
"""
Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood way
"""
parent = parent_module(item)
# we attach a four-tuple: (test module ID, test module name, test module doc, environment, TestCase)
report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid,
parent.module.__name__,
parent.module.__doc__ or '',
self.environment,
self.test)))
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
"""
Decides when to actually report things.
pytest runs this (naturally) three times -- with report.when being:
setup <--- fixtures are to be initialized in this one
call <--- when this finishes the main code has finished
teardown <--- tears down fixtures (that still possess important info)
`setup` and `teardown` are always called, but `call` is called only if `setup` passes.
See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas.
The "other side" (AllureAggregatingListener) expects us to send EXACTLY ONE test report (it wont break, but it will duplicate cases in the report -- which is bad.
So we work hard to decide exact moment when we call `_stop_case` to do that. This method may benefit from FSM (we keep track of what has already happened via self.test.status)
Expected behavior is:
FAILED when call fails and others OK
BROKEN when either setup OR teardown are broken (and call may be anything)
PENDING if skipped and xfailed
SKIPPED if skipped and not xfailed
"""
report = (yield).get_result()
status = self.config.hook.pytest_report_teststatus(report=report)
status = status and status[0]
if report.when == 'call':
if report.passed:
self._fill_case(report, call, status, Status.PASSED)
elif report.failed:
self._fill_case(report, call, status, Status.FAILED)
# FIXME: this is here only to work around xdist's stupid -x thing when in exits BEFORE THE TEARDOWN test log. Meh, i should file an issue to xdist
if self._magicaldoublereport:
# to minimize ze impact
self.report_case(item, report)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'setup': # setup / teardown
if report.failed:
self._fill_case(report, call, status, Status.BROKEN)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'teardown':
# as teardown is always called for testitem -- report our status here
if not report.passed:
if self.test.status not in FAILED_STATUSES:
# if test was OK but failed at teardown => broken
self._fill_case(report, call, status, Status.BROKEN)
else:
# mark it broken so, well, someone has idea of teardown failure
# still, that's no big deal -- test has already failed
# TODO: think about that once again
self.test.status = Status.BROKEN
# if a test isn't marked as "unreported" or it has failed, add it to the report.
if not item.get_marker("unreported") or self.test.status in FAILED_STATUSES:
self.report_case(item, report)
def pytest_runtest_setup(item):
item_labels = set((l.name, l.value) for l in labels_of(item)) # see label_type
arg_labels = set().union(item.config.option.allurefeatures,
item.config.option.allurestories,
item.config.option.allureseverities)
if arg_labels and not item_labels & arg_labels:
pytest.skip('Not suitable with selected labels: %s.' % ', '.join(text_type(l) for l in sorted(arg_labels)))
class LazyInitStepContext(StepContext):
"""
This is a step context used for decorated steps.
It provides a possibility to create step decorators, being initiated before pytest_configure, when no AllureListener initiated yet.
"""
def __init__(self, allure_helper, title):
self.allure_helper = allure_helper
self.title = title
self.step = None
@property
def allure(self):
listener = self.allure_helper.get_listener()
# if listener has `stack` we are inside a test
# record steps only when that
# FIXME: this breaks encapsulation a lot
if hasattr(listener, 'stack'):
return listener
class AllureHelper(object):
"""
This object holds various utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach``
"""
def __init__(self):
self._allurelistener = None # FIXME: this gets injected elsewhere, like in the pytest_configure
def get_listener(self):
return self._allurelistener
def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment
"""
Attaches ``contents`` to a current context with given ``name`` and ``type``.
"""
if self._allurelistener:
self._allurelistener.attach(name, contents, type)
def label(self, name, *value):
"""
A decorator factory that returns ``pytest.mark`` for a given label.
"""
allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name))
return allure_label(*value)
def severity(self, severity):
"""
A decorator factory that returns ``pytest.mark`` for a given allure ``level``.
"""
return self.label(Label.SEVERITY, severity)
def feature(self, *features):
"""
A decorator factory that returns ``pytest.mark`` for a given features.
"""
return self.label(Label.FEATURE, *features)
def story(self, *stories):
"""
A decorator factory that returns ``pytest.mark`` for a given stories.
"""
return self.label(Label.STORY, *stories)
def issue(self, *issues):
"""
A decorator factory that returns ``pytest.mark`` for a given issues.
"""
return self.label(Label.ISSUE, *issues)
def dynamic_issue(self, *issues):
"""
Mark test ``issues`` from inside.
"""
if self._allurelistener:
self._allurelistener.dynamic_issue(*issues)
def description(self, description):
"""
Sets description for the test
"""
if self._allurelistener:
self._allurelistener.description(description)
def testcase(self, *testcases):
"""
A decorator factory that returns ``pytest.mark`` for a given testcases.
"""
return self.label(Label.TESTCASE, *testcases)
def step(self, title):
"""
A contextmanager/decorator for steps.
TODO: when moving to python 3, rework this with ``contextlib.ContextDecorator``.
Usage examples::
import pytest
def test_foo():
with pytest.allure.step('mystep'):
assert False
@pytest.allure.step('make test data')
def make_test_data_bar():
raise ValueError('No data today')
def test_bar():
assert make_test_data_bar()
@pytest.allure.step
def make_test_data_baz():
raise ValueError('No data today')
def test_baz():
assert make_test_data_baz()
@pytest.fixture()
@pytest.allure.step('test fixture')
def steppy_fixture():
return 1
def test_baz(steppy_fixture):
assert steppy_fixture
"""
if callable(title):
return LazyInitStepContext(self, title.__name__)(title)
else:
return LazyInitStepContext(self, title)
def single_step(self, text):
"""
Writes single line to report.
"""
if self._allurelistener:
with self.step(text):
pass
def environment(self, **env_dict):
if self._allurelistener:
self._allurelistener.environment.update(env_dict)
@property
def attach_type(self):
return AttachmentType
@property
def severity_level(self):
return Severity
def __getattr__(self, attr):
"""
Provides fancy shortcuts for severity::
# these are the same
pytest.allure.CRITICAL
pytest.allure.severity(pytest.allure.severity_level.CRITICAL)
"""
if attr in dir(Severity) and not attr.startswith('_'):
return self.severity(getattr(Severity, attr))
else:
raise AttributeError
MASTER_HELPER = AllureHelper()
def pytest_namespace():
return {'allure': MASTER_HELPER}
class AllureAgregatingListener(object):
"""
Listens to pytest hooks to generate reports for common tests.
"""
def __init__(self, impl, config):
self.impl = impl
# module's nodeid => TestSuite object
self.suites = {}
def pytest_sessionfinish(self):
"""
We are done and have all the results in `self.suites`
Lets write em down.
But first we kinda-unify the test cases.
We expect cases to come from AllureTestListener -- and the have ._id field to manifest their identity.
Of all the test cases in suite.testcases we leave LAST with the same ID -- becase logreport can be sent MORE THAN ONE TIME
(namely, if the test fails and then gets broken -- to cope with the xdist's -x behavior we have to have tests even at CALL failures)
TODO: do it in a better, more efficient way
"""
for s in self.suites.values():
if s.tests: # nobody likes empty suites
s.stop = max(case.stop for case in s.tests)
known_ids = set()
refined_tests = []
for t in s.tests[::-1]:
if t.id not in known_ids:
known_ids.add(t.id)
refined_tests.append(t)
s.tests = refined_tests[::-1]
with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f:
self.impl._write_xml(f, s)
self.impl.store_environment()
def write_attach(self, attachment):
"""
Writes attachment object from the `AllureTestListener` to the FS, fixing it fields
:param attachment: a :py:class:`allure.structure.Attach` object
"""
# OMG, that is bad
attachment.source = self.impl._save_attach(attachment.source, attachment.type)
attachment.type = attachment.type.mime_type
def pytest_runtest_logreport(self, report):
if hasattr(report, '_allure_result'):
module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result)
report._allure_result = None # so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98
self.impl.environment.update(environment)
for a in testcase.iter_attachments():
self.write_attach(a)
self.suites.setdefault(module_id, TestSuite(name=module_name,
description=module_doc,
tests=[],
labels=[],
start=testcase.start, # first case starts the suite!
stop=None)).tests.append(testcase)
CollectFail = namedtuple('CollectFail', 'name status message trace')
class AllureCollectionListener(object):
"""
Listens to pytest collection-related hooks
to generate reports for modules that failed to collect.
"""
def __init__(self, impl):
self.impl = impl
self.fails = []
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
status = Status.BROKEN
else:
status = Status.CANCELED
self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split("::"))[-1],
status=status,
message=get_exception_message(None, None, report),
trace=report.longrepr))
def pytest_sessionfinish(self):
"""
Creates a testsuite with collection failures if there were any.
"""
if self.fails:
self.impl.start_suite(name='test_collection_phase',
title='Collection phase',
description='This is the tests collection phase. Failures are modules that failed to collect.')
for fail in self.fails:
self.impl.start_case(name=fail.name.split(".")[-1])
self.impl.stop_case(status=fail.status, message=fail.message, trace=fail.trace)
self.impl.stop_suite()
| [((542, 14, 542, 68), 'collections.namedtuple', 'namedtuple', ({(542, 25, 542, 38): '"""CollectFail"""', (542, 40, 542, 67): '"""name status message trace"""'}, {}), "('CollectFail', 'name status message trace')", False, 'from collections import namedtuple\n'), ((77, 22, 77, 43), 'allure.common.AllureImpl', 'AllureImpl', ({(77, 33, 77, 42): 'reportdir'}, {}), '(reportdir)', False, 'from allure.common import AllureImpl, StepContext\n'), ((136, 17, 138, 41), 'allure.structure.Attach', 'Attach', (), '', False, 'from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel\n'), ((174, 20, 174, 25), 'allure.utils.now', 'now', ({}, {}), '()', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((186, 25, 186, 30), 'allure.utils.now', 'now', ({}, {}), '()', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((205, 17, 205, 36), 'allure.utils.parent_module', 'parent_module', ({(205, 31, 205, 35): 'item'}, {}), '(item)', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((25, 34, 25, 50), 'allure.utils.all_of', 'all_of', ({(25, 41, 25, 49): 'Severity'}, {}), '(Severity)', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((525, 72, 525, 107), 'pickle.loads', 'pickle.loads', ({(525, 85, 525, 106): 'report._allure_result'}, {}), '(report._allure_result)', False, 'import pickle\n'), ((118, 35, 118, 40), 'allure.utils.now', 'now', ({}, {}), '()', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((120, 36, 120, 51), 'allure.utils.labels_of', 'labels_of', ({(120, 46, 120, 50): 'item'}, {}), '(item)', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((162, 30, 162, 35), 'allure.utils.now', 'now', ({}, {}), '()', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((207, 46, 211, 71), 'pickle.dumps', 'pickle.dumps', ({(207, 59, 211, 70): "(parent.nodeid, parent.module.__name__, parent.module.__doc__ or '', self.\n environment, self.test)"}, {}), "((parent.nodeid, parent.module.__name__, parent.module.__doc__ or\n '', self.environment, self.test))", False, 'import pickle\n'), ((281, 49, 281, 64), 'allure.utils.labels_of', 'labels_of', ({(281, 59, 281, 63): 'item'}, {}), '(item)', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((123, 36, 123, 48), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((146, 37, 146, 77), 'allure.structure.TestLabel', 'TestLabel', (), '', False, 'from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel\n'), ((190, 48, 190, 105), 'allure.utils.get_exception_message', 'get_exception_message', ({(190, 70, 190, 82): 'call.excinfo', (190, 84, 190, 96): 'pyteststatus', (190, 98, 190, 104): 'report'}, {}), '(call.excinfo, pyteststatus, report)', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((288, 73, 288, 85), 'six.text_type', 'text_type', ({(288, 83, 288, 84): 'l'}, {}), '(l)', False, 'from six import text_type\n'), ((565, 50, 565, 91), 'allure.utils.get_exception_message', 'get_exception_message', ({(565, 72, 565, 76): 'None', (565, 78, 565, 82): 'None', (565, 84, 565, 90): 'report'}, {}), '(None, None, report)', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((507, 64, 507, 76), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((534, 46, 539, 66), 'allure.structure.TestSuite', 'TestSuite', (), '', False, 'from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel\n'), ((116, 77, 116, 106), 'allure.utils.parent_down_from_module', 'parent_down_from_module', ({(116, 101, 116, 105): 'item'}, {}), '(item)', False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n')] |
domlysi/django-treenode | treenode/debug.py | 86e7c76e2b2d60c071cfce6ad1493b2b51f2d304 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.db import connection
import logging
import timeit
logger = logging.getLogger(__name__)
class debug_performance(object):
def __init__(self, message_prefix=''):
super(debug_performance, self).__init__()
self.__message_prefix = message_prefix
@staticmethod
def _get_queries():
return len(connection.queries)
@staticmethod
def _get_timer():
return timeit.default_timer()
def __enter__(self):
self.__init_queries = debug_performance._get_queries()
self.__init_timer = debug_performance._get_timer()
return None
def __exit__(self, type_, value, traceback):
queries = (debug_performance._get_queries() - self.__init_queries)
timer = (debug_performance._get_timer() - self.__init_timer)
if settings.DEBUG:
message = '\r%sexecuted %s %s in %ss.' % (
self.__message_prefix,
queries,
'query' if queries == 1 else 'queries',
timer, )
print(message)
| [((10, 9, 10, 36), 'logging.getLogger', 'logging.getLogger', ({(10, 27, 10, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((25, 15, 25, 37), 'timeit.default_timer', 'timeit.default_timer', ({}, {}), '()', False, 'import timeit\n')] |
vibhorvk/BlendString | String_tool.py | 3bf62083716b3b1f4976abeb3528771eeb79e2cf | bl_info = {
"name": "STRING",
"blender": (2, 80, 0),
"category": "Object",
'Author' : 'Vibhor Gupta'
}
import bpy
import bmesh
class STRING(bpy.types.Operator):
"""My Object Moving Script""" # Use this as a tooltip for menu items and buttons.
bl_idname = "object.stringtool_ot" # Unique identifier for buttons and menu items to reference.
bl_label = "String" # Display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # Enable undo for the operator.
bdepth: bpy.props.FloatProperty(name = "String Thickness", min = 0.1, max = 5, precision = 2 )
def execute(self, context):
# The original script
####################
#to create an edge between two given objects
def Edgify(ob1,ob2):
loc1 = ob1.location
loc2 = ob2.location
verts = [loc1,loc2]
bpy.ops.mesh.primitive_plane_add(location = (0,0,0))
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete(type='VERT')
#creating the vertices using the current mesh data into bmesh
pipe = bpy.context.object.data
bm = bmesh.new()
for v in verts:
bm.verts.new(v)
bpy.ops.object.editmode_toggle()
bm.to_mesh(pipe)
bm.free()
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.edge_face_add()
bpy.ops.object.editmode_toggle()
def string(olist):
edges = []
l = len(olist)
for x in range(l):
for y in range(l):
if y != x and x < y :
Edgify(olist[x], olist[y])
edges.append(bpy.context.active_object)
return edges
def piper(xlist):
bpy.ops.object.select_all(action='DESELECT')
for x in xlist:
x.select_set(True)
bpy.ops.object.join()
bpy.ops.object.convert(target='CURVE')
def check(olist):
if len(olist) == 0:
self.report({'INFO'},'NONE SELECTED OBJECTS')
return 0
else:
return 1
oblist = bpy.context.selected_objects
Edgelist = string(oblist)
piper(Edgelist)
actob = bpy.context.active_object
actob.data.bevel_depth = self.bdepth
bpy.ops.object.shade_smooth()
########################
return {'FINISHED'} # Lets Blender know the operator finished successfully.
class STRING_PT(bpy.types.Panel):
bl_idname = "object_stringtool_pt"
bl_label = "String"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "newprop"
def draw(self, context):
# You can set the property values that should be used when the user
# presses the button in the UI.
layout = self.layout
props = layout.operator('object.stringtool_ot')
def register():
bpy.utils.register_class(STRING)
def unregister():
bpy.utils.unregister_class(STRING)
# This allows you to run the script directly from Blender's Text editor
# to test the add-on without having to install it.
if __name__ == "__main__":
register()
| [((19, 12, 19, 98), 'bpy.props.FloatProperty', 'bpy.props.FloatProperty', (), '', False, 'import bpy\n'), ((110, 4, 110, 36), 'bpy.utils.register_class', 'bpy.utils.register_class', ({(110, 29, 110, 35): 'STRING'}, {}), '(STRING)', False, 'import bpy\n'), ((114, 4, 114, 38), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', ({(114, 31, 114, 37): 'STRING'}, {}), '(STRING)', False, 'import bpy\n'), ((85, 8, 85, 37), 'bpy.ops.object.shade_smooth', 'bpy.ops.object.shade_smooth', ({}, {}), '()', False, 'import bpy\n'), ((29, 12, 29, 64), 'bpy.ops.mesh.primitive_plane_add', 'bpy.ops.mesh.primitive_plane_add', (), '', False, 'import bpy\n'), ((30, 12, 30, 44), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ({}, {}), '()', False, 'import bpy\n'), ((31, 12, 31, 52), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', (), '', False, 'import bpy\n'), ((32, 12, 32, 44), 'bpy.ops.mesh.delete', 'bpy.ops.mesh.delete', (), '', False, 'import bpy\n'), ((36, 17, 36, 28), 'bmesh.new', 'bmesh.new', ({}, {}), '()', False, 'import bmesh\n'), ((41, 12, 41, 44), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ({}, {}), '()', False, 'import bpy\n'), ((44, 12, 44, 44), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ({}, {}), '()', False, 'import bpy\n'), ((45, 12, 45, 52), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', (), '', False, 'import bpy\n'), ((46, 12, 46, 40), 'bpy.ops.mesh.edge_face_add', 'bpy.ops.mesh.edge_face_add', ({}, {}), '()', False, 'import bpy\n'), ((47, 12, 47, 44), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ({}, {}), '()', False, 'import bpy\n'), ((62, 12, 62, 56), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', (), '', False, 'import bpy\n'), ((65, 12, 65, 33), 'bpy.ops.object.join', 'bpy.ops.object.join', ({}, {}), '()', False, 'import bpy\n'), ((66, 12, 66, 50), 'bpy.ops.object.convert', 'bpy.ops.object.convert', (), '', False, 'import bpy\n')] |
aricsanders/pyMez3 | Code/DataHandlers/__init__.py | 13e2b9900af2287db0cc42a0190d31da165ce174 | """
The DataHandlers subpackage is designed to manipulate data, by allowing different data types to be opened,
created, saved and updated. The subpackage is further divided into modules grouped by a common theme. Classes for data
that are already on disk normally follows the following pattern:
`instance=ClassName(file_path,**options)`
For Example to
open a XML file that you don't know the model, use
`xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')'
or
`xml=XMLBase('MyXML.xml')`
All data models normally have save(), str() and if appropriate show() methods.
Examples
--------
<a href="../../../Examples/How_To_Open_S2p.html"> How to open a s2p file </a>
Import Structure
----------------
DataHandlers typically import from Utils but __NOT__ from Analysis, InstrumentControl or FrontEnds
Help
-----
<a href="../index.html">`pyMez.Code`</a>
<div>
<a href="../../../pyMez_Documentation.html">Documentation Home</a> |
<a href="../../index.html">API Documentation Home</a> |
<a href="../../../Examples/html/Examples_Home.html">Examples</a> |
<a href="../../../Reference_Index.html">Index </a>
</div>
"""
| [] |
somenzz/djangomail | djangomail/backends/dummy.py | 7d4f833cd71289a51eb935757d8b628e9c9f8aa1 | """
Dummy email backend that does nothing.
"""
from djangomail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
return len(list(email_messages))
| [] |
Avinesh/awx | awx/plugins/library/scan_services.py | 6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf | #!/usr/bin/env python
import re
from ansible.module_utils.basic import * # noqa
DOCUMENTATION = '''
---
module: scan_services
short_description: Return service state information as fact data
description:
- Return service state information as fact data for various service management utilities
version_added: "1.9"
options:
requirements: [ ]
author: Matthew Jones
'''
EXAMPLES = '''
- monit: scan_services
# Example fact output:
# host | success >> {
# "ansible_facts": {
# "services": {
# "network": {
# "source": "sysv",
# "state": "running",
# "name": "network"
# },
# "arp-ethers.service": {
# "source": "systemd",
# "state": "stopped",
# "name": "arp-ethers.service"
# }
# }
# }
'''
class BaseService(object):
def __init__(self, module):
self.module = module
self.incomplete_warning = False
class ServiceScanService(BaseService):
def gather_services(self):
services = {}
service_path = self.module.get_bin_path("service")
if service_path is None:
return None
initctl_path = self.module.get_bin_path("initctl")
chkconfig_path = self.module.get_bin_path("chkconfig")
# sysvinit
if service_path is not None and chkconfig_path is None:
rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
for line in stdout.split("\n"):
line_data = line.split()
if len(line_data) < 4:
continue # Skipping because we expected more data
service_name = " ".join(line_data[3:])
if line_data[1] == "+":
service_state = "running"
else:
service_state = "stopped"
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
# Upstart
if initctl_path is not None and chkconfig_path is None:
p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
real_stdout = stdout.replace("\r","")
for line in real_stdout.split("\n"):
m = p.match(line)
if not m:
continue
service_name = m.group('name')
service_goal = m.group('goal')
service_state = m.group('state')
if m.group('pid'):
pid = m.group('pid')
else:
pid = None # NOQA
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
services[service_name] = payload
# RH sysvinit
elif chkconfig_path is not None:
#print '%s --status-all | grep -E "is (running|stopped)"' % service_path
p = re.compile(
r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
# Check for special cases where stdout does not fit pattern
match_any = False
for line in stdout.split('\n'):
if p.match(line):
match_any = True
if not match_any:
p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
match_any = False
for line in stdout.split('\n'):
if p_simple.match(line):
match_any = True
if match_any:
# Try extra flags " -l --allservices" needed for SLES11
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
elif '--list' in stderr:
# Extra flag needed for RHEL5
rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
for line in stdout.split('\n'):
m = p.match(line)
if m:
service_name = m.group('service')
service_state = 'stopped'
if m.group('rl3') == 'on':
rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
service_state = rc
if rc in (0,):
service_state = 'running'
#elif rc in (1,3):
else:
if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
self.incomplete_warning = True
continue
else:
service_state = 'stopped'
service_data = {"name": service_name, "state": service_state, "source": "sysv"}
services[service_name] = service_data
return services
class SystemctlScanService(BaseService):
def systemd_enabled(self):
# Check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError:
# If comm doesn't exist, old kernel, no systemd
return False
for line in f:
if 'systemd' in line:
return True
return False
def gather_services(self):
services = {}
if not self.systemd_enabled():
return None
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
if systemctl_path is None:
return None
rc, stdout, stderr = self.module.run_command("%s list-unit-files --type=service | tail -n +2 | head -n -2" % systemctl_path, use_unsafe_shell=True)
for line in stdout.split("\n"):
line_data = line.split()
if len(line_data) != 2:
continue
if line_data[1] == "enabled":
state_val = "running"
else:
state_val = "stopped"
services[line_data[0]] = {"name": line_data[0], "state": state_val, "source": "systemd"}
return services
def main():
module = AnsibleModule(argument_spec = dict()) # noqa
service_modules = (ServiceScanService, SystemctlScanService)
all_services = {}
incomplete_warning = False
for svc_module in service_modules:
svcmod = svc_module(module)
svc = svcmod.gather_services()
if svc is not None:
all_services.update(svc)
if svcmod.incomplete_warning:
incomplete_warning = True
if len(all_services) == 0:
results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
else:
results = dict(ansible_facts=dict(services=all_services))
if incomplete_warning:
results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
module.exit_json(**results)
main()
| [((72, 16, 72, 113), 're.compile', 're.compile', ({(72, 27, 72, 112): '"""^\\\\s?(?P<name>.*)\\\\s(?P<goal>\\\\w+)\\\\/(?P<state>\\\\w+)(\\\\,\\\\sprocess\\\\s(?P<pid>[0-9]+))?\\\\s*$"""'}, {}), "(\n '^\\\\s?(?P<name>.*)\\\\s(?P<goal>\\\\w+)\\\\/(?P<state>\\\\w+)(\\\\,\\\\sprocess\\\\s(?P<pid>[0-9]+))?\\\\s*$'\n )", False, 'import re\n'), ((92, 16, 94, 113), 're.compile', 're.compile', ({(93, 16, 94, 112): '"""(?P<service>.*?)\\\\s+[0-9]:(?P<rl0>on|off)\\\\s+[0-9]:(?P<rl1>on|off)\\\\s+[0-9]:(?P<rl2>on|off)\\\\s+[0-9]:(?P<rl3>on|off)\\\\s+[0-9]:(?P<rl4>on|off)\\\\s+[0-9]:(?P<rl5>on|off)\\\\s+[0-9]:(?P<rl6>on|off)"""'}, {}), "(\n '(?P<service>.*?)\\\\s+[0-9]:(?P<rl0>on|off)\\\\s+[0-9]:(?P<rl1>on|off)\\\\s+[0-9]:(?P<rl2>on|off)\\\\s+[0-9]:(?P<rl3>on|off)\\\\s+[0-9]:(?P<rl4>on|off)\\\\s+[0-9]:(?P<rl5>on|off)\\\\s+[0-9]:(?P<rl6>on|off)'\n )", False, 'import re\n'), ((102, 27, 102, 76), 're.compile', 're.compile', ({(102, 38, 102, 75): '"""(?P<service>.*?)\\\\s+(?P<rl0>on|off)"""'}, {}), "('(?P<service>.*?)\\\\s+(?P<rl0>on|off)')", False, 'import re\n')] |
duckm8795/runscope-circleci | app.py | 2fd42e64bddb4b8f34c437c2d834b92369c9a2bf | import requests
import sys
import time
import os
def main():
trigger_url = sys.argv[1]
trigger_resp = requests.get(trigger_url)
if trigger_resp.ok:
trigger_json = trigger_resp.json().get("data", {})
test_runs = trigger_json.get("runs", [])
print ("Started {} test runs.".format(len(test_runs)))
results = {}
while len(results.keys()) < len(test_runs):
time.sleep(1)
for run in test_runs:
test_run_id = run.get("test_run_id")
if not test_run_id in results:
result = _get_result(run)
if result.get("result") in ["pass", "fail"]:
results[test_run_id] = result
pass_count = sum([r.get("result") == "pass" for r in results.values()])
fail_count = sum([r.get("result") == "fail" for r in results.values()])
if fail_count > 0:
print ("{} test runs passed. {} test runs failed.".format(pass_count, fail_count))
exit(1)
print ("All test runs passed.")
def _get_result(test_run):
# generate Personal Access Token at https://www.runscope.com/applications
if not "RUNSCOPE_ACCESS_TOKEN" in os.environ:
print ("Please set the environment variable RUNSCOPE_ACCESS_TOKEN. You can get an access token by going to https://www.runscope.com/applications")
exit(1)
API_TOKEN = os.environ["RUNSCOPE_ACCESS_TOKEN"]
opts = {
"base_url": "https://api.runscope.com",
"bucket_key": test_run.get("bucket_key"),
"test_id": test_run.get("test_id"),
"test_run_id": test_run.get("test_run_id")
}
result_url = "{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}".format(**opts)
print ("Getting result: {}".format(result_url))
headers = {
"Authorization": "Bearer {}".format(API_TOKEN),
"User-Agent": "python-trigger-sample"
}
result_resp = requests.get(result_url, headers=headers)
if result_resp.ok:
return result_resp.json().get("data")
return None
if __name__ == '__main__':
main() | [((9, 19, 9, 44), 'requests.get', 'requests.get', ({(9, 32, 9, 43): 'trigger_url'}, {}), '(trigger_url)', False, 'import requests\n'), ((60, 18, 60, 59), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((20, 12, 20, 25), 'time.sleep', 'time.sleep', ({(20, 23, 20, 24): '(1)'}, {}), '(1)', False, 'import time\n')] |
fabaff/spyse-python | spyse/client.py | f286514ac052ebe6fa98f877d251d8f3cd4db1c4 | import requests
from typing import List, Optional
from .models import AS, Domain, IP, CVE, Account, Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord
from .response import Response
from .search_query import SearchQuery
from limiter import get_limiter, limit
class DomainsSearchResults:
def __init__(self, results: List[Domain], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[Domain] = results
class AutonomousSystemsSearchResults:
def __init__(self, results: List[AS], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[AS] = results
class IPSearchResults:
def __init__(self, results: List[IP], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[IP] = results
class CertificatesSearchResults:
def __init__(self, results: List[Certificate], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[Certificate] = results
class CVESearchResults:
def __init__(self, results: List[CVE], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[CVE] = results
class EmailsSearchResults:
def __init__(self, results: List[Email], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[Email] = results
class HistoricalDNSSearchResults:
def __init__(self, results: List[DNSHistoricalRecord], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[DNSHistoricalRecord] = results
class HistoricalWHOISSearchResults:
def __init__(self, results: List[WHOISHistoricalRecord], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[WHOISHistoricalRecord] = results
class Client:
DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data'
MAX_LIMIT = 100
SEARCH_RESULTS_LIMIT = 10000
RATE_LIMIT_FRAME_IN_SECONDS = 1
def __init__(self, api_token, base_url=DEFAULT_BASE_URL):
self.session = requests.Session()
self.session.headers.update({'Authorization': 'Bearer ' + api_token})
self.session.headers.update({'User-Agent': 'spyse-python'})
self.base_url = base_url
self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1)
self.account = self.get_quotas()
self.limiter._capacity = self.account.requests_rate_limit
def __get(self, endpoint: str) -> Response:
with limit(self.limiter, consume=1):
return Response.from_dict(self.session.get(endpoint).json())
def __search(self, endpoint, query: SearchQuery, lim: int = MAX_LIMIT, offset: int = 0) -> Response:
with limit(self.limiter, consume=1):
return Response.from_dict(self.session.post(endpoint,
json={"search_params": query.get(), "limit": lim,
"offset": offset}).json())
def __scroll(self, endpoint, query: SearchQuery, search_id: Optional[str] = None) -> Response:
with limit(self.limiter, consume=1):
if search_id:
body = {"search_params": query.get(), "search_id": search_id}
else:
body = {"search_params": query.get()}
return Response.from_dict(self.session.post(endpoint, json=body).json())
def set_user_agent(self, s: str):
self.session.headers.update({'User-Agent': s})
def get_quotas(self) -> Optional[Account]:
"""Returns details about your account quotas."""
response = self.__get('{}/account/quota'.format(self.base_url))
response.check_errors()
return Account.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def get_autonomous_system_details(self, asn: int) -> Optional[AS]:
"""Returns details about an autonomous system by AS number."""
response = self.__get('{}/as/{}'.format(self.base_url, asn))
response.check_errors()
return AS.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def count_autonomous_systems(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/as/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def search_autonomous_systems(self, query: SearchQuery, limit: int = MAX_LIMIT,
offset: int = 0) -> AutonomousSystemsSearchResults:
"""
Returns a list of autonomous systems that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/as/search'.format(self.base_url), query, limit, offset)
response.check_errors()
as_list = list()
for r in response.data.items:
as_list.append(AS.from_dict(r))
return AutonomousSystemsSearchResults(as_list, response.data.total_items)
def scroll_autonomous_systems(self, query: SearchQuery, scroll_id: str = None) -> AutonomousSystemsSearchResults:
"""
Returns a list of autonomous systems that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
as_list = list()
for r in response.data.items:
as_list.append(AS.from_dict(r))
return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id)
def get_domain_details(self, domain_name: str) -> Optional[Domain]:
"""Returns details about domain"""
response = self.__get('{}/domain/{}'.format(self.base_url, domain_name))
response.check_errors()
return Domain.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_domains(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int = 0) -> DomainsSearchResults:
"""
Returns a list of domains that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/domain/search'.format(self.base_url), query, limit, offset)
response.check_errors()
domains = list()
for r in response.data.items:
domains.append(Domain.from_dict(r))
return DomainsSearchResults(domains, response.data.total_items)
def count_domains(self, query: SearchQuery):
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/domain/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_domains(self, query: SearchQuery, scroll_id: str = None) -> DomainsSearchResults:
"""
Returns a list of domains that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
domains = list()
for r in response.data.items:
domains.append(Domain.from_dict(r))
return DomainsSearchResults(domains, search_id=response.data.search_id)
def get_ip_details(self, ip: str) -> Optional[IP]:
"""Returns details about IP"""
response = self.__get('{}/ip/{}'.format(self.base_url, ip))
response.check_errors()
return IP.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_ip(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int = 0) -> IPSearchResults:
"""
Returns a list of IPv4 hosts that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/ip/search'.format(self.base_url), query, limit, offset)
response.check_errors()
ips = list()
for r in response.data.items:
ips.append(IP.from_dict(r))
return IPSearchResults(ips, response.data.total_items)
def count_ip(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/ip/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_ip(self, query: SearchQuery, scroll_id: str = None) -> IPSearchResults:
"""
Returns a list of IPv4 hosts that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
ips = list()
for r in response.data.items:
ips.append(IP.from_dict(r))
return IPSearchResults(ips, search_id=response.data.search_id)
def get_certificate_details(self, fingerprint_sha256: str) -> Optional[Certificate]:
"""Returns details about SSL/TLS certificate"""
response = self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256))
response.check_errors()
return Certificate.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_certificate(self, query: SearchQuery, limit: int = MAX_LIMIT,
offset: int = 0) -> CertificatesSearchResults:
"""
Returns a list of SSL/TLS certificate hosts that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/certificate/search'.format(self.base_url), query, limit, offset)
response.check_errors()
certs = list()
for r in response.data.items:
certs.append(Certificate.from_dict(r))
return CertificatesSearchResults(certs, response.data.total_items)
def count_certificate(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/certificate/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_certificate(self, query: SearchQuery, scroll_id: str = None) -> CertificatesSearchResults:
"""
Returns a list of SSL/TLS certificates that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
certs = list()
for r in response.data.items:
certs.append(Certificate.from_dict(r))
return CertificatesSearchResults(certs, search_id=response.data.search_id)
def get_cve_details(self, cve_id: str) -> Optional[CVE]:
"""Returns details about CVE"""
response = self.__get('{}/cve/{}'.format(self.base_url, cve_id))
response.check_errors()
return CVE.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_cve(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int = 0) -> CVESearchResults:
"""
Returns a list of CVE that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/cve/search'.format(self.base_url), query, limit, offset)
response.check_errors()
cve_list = list()
for r in response.data.items:
cve_list.append(CVE.from_dict(r))
return CVESearchResults(cve_list, response.data.total_items)
def count_cve(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/cve/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_cve(self, query: SearchQuery, scroll_id: str = None) -> CVESearchResults:
"""
Returns a list of CVEs that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
cve_list = list()
for r in response.data.items:
cve_list.append(CVE.from_dict(r))
return CVESearchResults(cve_list, search_id=response.data.search_id)
def get_email_details(self, email: str) -> Optional[Email]:
"""Returns details about email"""
response = self.__get('{}/email/{}'.format(self.base_url, email))
response.check_errors()
return Email.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_emails(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int = 0) -> EmailsSearchResults:
"""
Returns a list of emails that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/email/search'.format(self.base_url), query, limit, offset)
response.check_errors()
emails = list()
for r in response.data.items:
emails.append(Email.from_dict(r))
return EmailsSearchResults(emails, response.data.total_items)
def count_emails(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/cve/email/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_emails(self, query: SearchQuery, scroll_id: str = None) -> EmailsSearchResults:
"""
Returns a list of emails that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
emails = list()
for r in response.data.items:
emails.append(Email.from_dict(r))
return EmailsSearchResults(emails, search_id=response.data.search_id)
def search_historical_dns(self, dns_type, domain_name: str, limit: int = MAX_LIMIT, offset: int = 0) \
-> HistoricalDNSSearchResults:
"""
Returns the historical DNS records about the given domain name.
"""
response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}')
response.check_errors()
records = list()
for r in response.data.items:
records.append(DNSHistoricalRecord.from_dict(r))
return HistoricalDNSSearchResults(records, response.data.total_items)
def search_historical_whois(self, domain_name: str, limit: int = MAX_LIMIT, offset: int = 0) \
-> HistoricalWHOISSearchResults:
"""
Returns the historical WHOIS records for the given domain name.
"""
response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}')
response.check_errors()
records = list()
for r in response.data.items:
records.append(WHOISHistoricalRecord.from_dict(r))
return HistoricalWHOISSearchResults(records, response.data.total_items)
| [((73, 23, 73, 41), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import requests\n'), ((77, 23, 77, 85), 'limiter.get_limiter', 'get_limiter', (), '', False, 'from limiter import get_limiter, limit\n'), ((82, 13, 82, 43), 'limiter.limit', 'limit', (), '', False, 'from limiter import get_limiter, limit\n'), ((86, 13, 86, 43), 'limiter.limit', 'limit', (), '', False, 'from limiter import get_limiter, limit\n'), ((92, 13, 92, 43), 'limiter.limit', 'limit', (), '', False, 'from limiter import get_limiter, limit\n')] |
jfcaballero/Tutorial-sobre-scikit-learn-abreviado | talleres_inov_docente/figures/plot_helpers.py | 1e2aa1f9132c277162135a5463068801edab8d15 | from matplotlib.colors import ListedColormap
cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50'])
cm2 = ListedColormap(['#0000aa', '#ff2020'])
| [((3, 6, 3, 55), 'matplotlib.colors.ListedColormap', 'ListedColormap', ({(3, 21, 3, 54): "['#0000aa', '#ff2020', '#50ff50']"}, {}), "(['#0000aa', '#ff2020', '#50ff50'])", False, 'from matplotlib.colors import ListedColormap\n'), ((4, 6, 4, 44), 'matplotlib.colors.ListedColormap', 'ListedColormap', ({(4, 21, 4, 43): "['#0000aa', '#ff2020']"}, {}), "(['#0000aa', '#ff2020'])", False, 'from matplotlib.colors import ListedColormap\n')] |
cemac-ccs/FlaskMWE | Applications/FlaskApp/errorpages.py | e8ce3cbca0d402bd9fdb1feb10290f2e7b11907b | from flask import render_template
# Error Pages ----------------------------------------------------------------
def page_not_found(e):
# note that we set the 404 status explicitly
return render_template('404.html.j2'), 404
def page_not_allowed(e):
# note that we set the 403 status explicitly
return render_template('403.html.j2'), 403
def internal_error(error):
app.logger.error('Server Error: %s', (error))
return render_template('500.html.j2'), 500
def unhandled_exception(e):
app.logger.error('Unhandled Exception: %s', (e))
return render_template('500.html.j2'), 501
| [((6, 11, 6, 41), 'flask.render_template', 'render_template', ({(6, 27, 6, 40): '"""404.html.j2"""'}, {}), "('404.html.j2')", False, 'from flask import render_template\n'), ((11, 11, 11, 41), 'flask.render_template', 'render_template', ({(11, 27, 11, 40): '"""403.html.j2"""'}, {}), "('403.html.j2')", False, 'from flask import render_template\n'), ((16, 11, 16, 41), 'flask.render_template', 'render_template', ({(16, 27, 16, 40): '"""500.html.j2"""'}, {}), "('500.html.j2')", False, 'from flask import render_template\n'), ((21, 11, 21, 41), 'flask.render_template', 'render_template', ({(21, 27, 21, 40): '"""500.html.j2"""'}, {}), "('500.html.j2')", False, 'from flask import render_template\n')] |
anhydrous99/cppgym | cppgym/ToyText/BlackJack.py | 0b1009a74faebfe5a31bcfd6a86c74cf13464d56 | from .._BlackJack import BlackJackCPP
import gym
import ctypes
import numpy as np
from gym import spaces
class BlackJack(gym.Env):
def __init__(self, natural=False):
self.env = BlackJackCPP(natural)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Tuple((
spaces.Discrete(32),
spaces.Discrete(11),
spaces.Discrete(2)
))
self.state = None
self.natural = natural
def seed(self, seed=None):
if seed is None:
return [self.env.get_seed()]
else:
if not isinstance(seed, ctypes.c_uint32):
seed = ctypes.c_uint32(seed).value
self.env.set_seed(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
state, reward, done = self.env.step(action)
self.state = np.array(state)
return self.state, reward, done, {}
def render(self, mode='human'):
return None
def reset(self):
self.state = np.array(self.env.reset())
return self.state
| [((12, 28, 12, 46), 'gym.spaces.Discrete', 'spaces.Discrete', ({(12, 44, 12, 45): '2'}, {}), '(2)', False, 'from gym import spaces\n'), ((33, 21, 33, 36), 'numpy.array', 'np.array', ({(33, 30, 33, 35): 'state'}, {}), '(state)', True, 'import numpy as np\n'), ((14, 12, 14, 31), 'gym.spaces.Discrete', 'spaces.Discrete', ({(14, 28, 14, 30): '32'}, {}), '(32)', False, 'from gym import spaces\n'), ((15, 12, 15, 31), 'gym.spaces.Discrete', 'spaces.Discrete', ({(15, 28, 15, 30): '11'}, {}), '(11)', False, 'from gym import spaces\n'), ((16, 12, 16, 30), 'gym.spaces.Discrete', 'spaces.Discrete', ({(16, 28, 16, 29): '2'}, {}), '(2)', False, 'from gym import spaces\n'), ((26, 23, 26, 44), 'ctypes.c_uint32', 'ctypes.c_uint32', ({(26, 39, 26, 43): 'seed'}, {}), '(seed)', False, 'import ctypes\n')] |
ajothomas/beam | sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py | 4774c1caf3dac3b6a7dd161f82559a26fa380920 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for consumer_tracking_pipeline_visitor."""
# pytype: skip-file
import logging
import unittest
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.pipeline import Pipeline
from apache_beam.pvalue import AsList
from apache_beam.runners.direct import DirectRunner
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor
from apache_beam.transforms import CoGroupByKey
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Flatten
from apache_beam.transforms import ParDo
# Disable frequent lint warning due to pipe operator for chaining transforms.
# pylint: disable=expression-not-assigned
# pylint: disable=pointless-statement
class ConsumerTrackingPipelineVisitorTest(unittest.TestCase):
def setUp(self):
self.pipeline = Pipeline(DirectRunner())
self.visitor = ConsumerTrackingPipelineVisitor()
def test_root_transforms(self):
root_read = beam.Impulse()
root_flatten = Flatten(pipeline=self.pipeline)
pbegin = pvalue.PBegin(self.pipeline)
pcoll_read = pbegin | 'read' >> root_read
pcoll_read | FlatMap(lambda x: x)
[] | 'flatten' >> root_flatten
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertCountEqual(root_transforms, [root_read, root_flatten])
pbegin_consumers = [
c.transform for c in self.visitor.value_to_consumers[pbegin]
]
self.assertCountEqual(pbegin_consumers, [root_read])
self.assertEqual(len(self.visitor.step_names), 3)
def test_side_inputs(self):
class SplitNumbersFn(DoFn):
def process(self, element):
if element < 0:
yield pvalue.TaggedOutput('tag_negative', element)
else:
yield element
class ProcessNumbersFn(DoFn):
def process(self, element, negatives):
yield element
def _process_numbers(pcoll, negatives):
first_output = (
pcoll
| 'process numbers step 1' >> ParDo(ProcessNumbersFn(), negatives))
second_output = (
first_output
| 'process numbers step 2' >> ParDo(ProcessNumbersFn(), negatives))
output_pc = ((first_output, second_output)
| 'flatten results' >> beam.Flatten())
return output_pc
root_read = beam.Impulse()
result = (
self.pipeline
| 'read' >> root_read
| ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive'))
positive, negative = result
_process_numbers(positive, AsList(negative))
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertEqual(root_transforms, [root_read])
self.assertEqual(len(self.visitor.step_names), 5)
self.assertEqual(len(self.visitor.views), 1)
self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList))
def test_co_group_by_key(self):
emails = self.pipeline | 'email' >> Create([('joe', '[email protected]')])
phones = self.pipeline | 'phone' >> Create([('mary', '111-222-3333')])
{'emails': emails, 'phones': phones} | CoGroupByKey()
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertEqual(len(root_transforms), 2)
self.assertGreater(
len(self.visitor.step_names), 3) # 2 creates + expanded CoGBK
self.assertEqual(len(self.visitor.views), 0)
def test_visitor_not_sorted(self):
p = Pipeline()
# pylint: disable=expression-not-assigned
from apache_beam.testing.test_stream import TestStream
p | TestStream().add_elements(['']) | beam.Map(lambda _: _)
original_graph = p.to_runner_api(return_context=False)
out_of_order_graph = p.to_runner_api(return_context=False)
root_id = out_of_order_graph.root_transform_ids[0]
root = out_of_order_graph.components.transforms[root_id]
tmp = root.subtransforms[0]
root.subtransforms[0] = root.subtransforms[1]
root.subtransforms[1] = tmp
p = beam.Pipeline().from_runner_api(
out_of_order_graph, runner='BundleBasedDirectRunner', options=None)
v_out_of_order = ConsumerTrackingPipelineVisitor()
p.visit(v_out_of_order)
p = beam.Pipeline().from_runner_api(
original_graph, runner='BundleBasedDirectRunner', options=None)
v_original = ConsumerTrackingPipelineVisitor()
p.visit(v_original)
# Convert to string to assert they are equal.
out_of_order_labels = {
str(k): [str(t) for t in v_out_of_order.value_to_consumers[k]]
for k in v_out_of_order.value_to_consumers
}
original_labels = {
str(k): [str(t) for t in v_original.value_to_consumers[k]]
for k in v_original.value_to_consumers
}
self.assertDictEqual(out_of_order_labels, original_labels)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| [((163, 2, 163, 17), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((45, 19, 45, 52), 'apache_beam.runners.direct.consumer_tracking_pipeline_visitor.ConsumerTrackingPipelineVisitor', 'ConsumerTrackingPipelineVisitor', ({}, {}), '()', False, 'from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor\n'), ((48, 16, 48, 30), 'apache_beam.Impulse', 'beam.Impulse', ({}, {}), '()', True, 'import apache_beam as beam\n'), ((49, 19, 49, 50), 'apache_beam.transforms.Flatten', 'Flatten', (), '', False, 'from apache_beam.transforms import Flatten\n'), ((51, 13, 51, 41), 'apache_beam.pvalue.PBegin', 'pvalue.PBegin', ({(51, 27, 51, 40): 'self.pipeline'}, {}), '(self.pipeline)', False, 'from apache_beam import pvalue\n'), ((93, 16, 93, 30), 'apache_beam.Impulse', 'beam.Impulse', ({}, {}), '()', True, 'import apache_beam as beam\n'), ((124, 8, 124, 18), 'apache_beam.pipeline.Pipeline', 'Pipeline', ({}, {}), '()', False, 'from apache_beam.pipeline import Pipeline\n'), ((140, 21, 140, 54), 'apache_beam.runners.direct.consumer_tracking_pipeline_visitor.ConsumerTrackingPipelineVisitor', 'ConsumerTrackingPipelineVisitor', ({}, {}), '()', False, 'from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor\n'), ((145, 17, 145, 50), 'apache_beam.runners.direct.consumer_tracking_pipeline_visitor.ConsumerTrackingPipelineVisitor', 'ConsumerTrackingPipelineVisitor', ({}, {}), '()', False, 'from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor\n'), ((44, 29, 44, 43), 'apache_beam.runners.direct.DirectRunner', 'DirectRunner', ({}, {}), '()', False, 'from apache_beam.runners.direct import DirectRunner\n'), ((53, 17, 53, 37), 'apache_beam.transforms.FlatMap', 'FlatMap', ({(53, 25, 53, 36): '(lambda x: x)'}, {}), '(lambda x: x)', False, 'from apache_beam.transforms import FlatMap\n'), ((100, 31, 100, 47), 'apache_beam.pvalue.AsList', 'AsList', ({(100, 38, 100, 46): 'negative'}, {}), '(negative)', False, 'from apache_beam.pvalue import AsList\n'), ((113, 43, 113, 57), 'apache_beam.transforms.CoGroupByKey', 'CoGroupByKey', ({}, {}), '()', False, 'from apache_beam.transforms import CoGroupByKey\n'), ((127, 42, 127, 63), 'apache_beam.Map', 'beam.Map', ({(127, 51, 127, 62): '(lambda _: _)'}, {}), '(lambda _: _)', True, 'import apache_beam as beam\n'), ((162, 2, 162, 21), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((111, 40, 111, 76), 'apache_beam.transforms.Create', 'Create', ({(111, 47, 111, 75): "[('joe', '[email protected]')]"}, {}), "([('joe', '[email protected]')])", False, 'from apache_beam.transforms import Create\n'), ((112, 40, 112, 74), 'apache_beam.transforms.Create', 'Create', ({(112, 47, 112, 73): "[('mary', '111-222-3333')]"}, {}), "([('mary', '111-222-3333')])", False, 'from apache_beam.transforms import Create\n'), ((138, 8, 138, 23), 'apache_beam.Pipeline', 'beam.Pipeline', ({}, {}), '()', True, 'import apache_beam as beam\n'), ((143, 8, 143, 23), 'apache_beam.Pipeline', 'beam.Pipeline', ({}, {}), '()', True, 'import apache_beam as beam\n'), ((90, 42, 90, 56), 'apache_beam.Flatten', 'beam.Flatten', ({}, {}), '()', True, 'import apache_beam as beam\n'), ((72, 16, 72, 60), 'apache_beam.pvalue.TaggedOutput', 'pvalue.TaggedOutput', ({(72, 36, 72, 50): '"""tag_negative"""', (72, 52, 72, 59): 'element'}, {}), "('tag_negative', element)", False, 'from apache_beam import pvalue\n'), ((127, 8, 127, 20), 'apache_beam.testing.test_stream.TestStream', 'TestStream', ({}, {}), '()', False, 'from apache_beam.testing.test_stream import TestStream\n')] |
scudette/rekall-agent-server | gluon/main.py | e553f1ae5279f75a8f5b0c0c4847766b60ed86eb | #!/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The gluon wsgi application
---------------------------
"""
from __future__ import print_function
if False: import import_all # DO NOT REMOVE PART OF FREEZE PROCESS
import gc
import os
import re
import copy
import sys
import time
import datetime
import signal
import socket
import random
import string
from gluon._compat import Cookie, urllib2
#from thread import allocate_lock
from gluon.fileutils import abspath, write_file
from gluon.settings import global_settings
from gluon.utils import web2py_uuid
from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders
from gluon.globals import current
# Remarks:
# calling script has inserted path to script directory into sys.path
# applications_parent (path to applications/, site-packages/ etc)
# defaults to that directory set sys.path to
# ("", gluon_parent/site-packages, gluon_parent, ...)
#
# this is wrong:
# web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# because we do not want the path to this file which may be Library.zip
# gluon_parent is the directory containing gluon, web2py.py, logging.conf
# and the handlers.
# applications_parent (web2py_path) is the directory containing applications/
# and routes.py
# The two are identical unless web2py_path is changed via the web2py.py -f folder option
# main.web2py_path is the same as applications_parent (for backward compatibility)
web2py_path = global_settings.applications_parent # backward compatibility
create_missing_folders()
# set up logging for subsequent imports
import logging
import logging.config
# This needed to prevent exception on Python 2.5:
# NameError: name 'gluon' is not defined
# See http://bugs.python.org/issue1436
# attention!, the import Tkinter in messageboxhandler, changes locale ...
import gluon.messageboxhandler
logging.gluon = gluon
# so we must restore it! Thanks ozancag
import locale
locale.setlocale(locale.LC_CTYPE, "C") # IMPORTANT, web2py requires locale "C"
exists = os.path.exists
pjoin = os.path.join
try:
logging.config.fileConfig(abspath("logging.conf"))
except: # fails on GAE or when logfile is missing
logging.basicConfig()
logger = logging.getLogger("web2py")
from gluon.restricted import RestrictedError
from gluon.http import HTTP, redirect
from gluon.globals import Request, Response, Session
from gluon.compileapp import build_environment, run_models_in, \
run_controller_in, run_view_in
from gluon.contenttype import contenttype
from pydal.base import BaseAdapter
from gluon.validators import CRYPT
from gluon.html import URL, xmlescape
from gluon.utils import is_valid_ip_address, getipaddrinfo
from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread, \
try_rewrite_on_error, fixup_missing_path_info
from gluon import newcron
__all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer']
requests = 0 # gc timer
# Security Checks: validate URL and session_id here,
# accept_language is validated in languages
# pattern used to validate client address
regex_client = re.compile('[\w\-:]+(\.[\w\-]+)*\.?') # ## to account for IPV6
try:
version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r')
raw_version_string = version_info.read().split()[-1].strip()
version_info.close()
global_settings.web2py_version = raw_version_string
web2py_version = global_settings.web2py_version
except:
raise RuntimeError("Cannot determine web2py version")
try:
from gluon import rocket
except:
if not global_settings.web2py_runtime_gae:
logger.warn('unable to import Rocket')
load_routes()
HTTPS_SCHEMES = set(('https', 'HTTPS'))
def get_client(env):
"""
Guesses the client address from the environment variables
First tries 'http_x_forwarded_for', secondly 'remote_addr'
if all fails, assume '127.0.0.1' or '::1' (running locally)
"""
eget = env.get
g = regex_client.search(eget('http_x_forwarded_for', ''))
client = (g.group() or '').split(',')[0] if g else None
if client in (None, '', 'unknown'):
g = regex_client.search(eget('remote_addr', ''))
if g:
client = g.group()
elif env.http_host.startswith('['): # IPv6
client = '::1'
else:
client = '127.0.0.1' # IPv4
if not is_valid_ip_address(client):
raise HTTP(400, "Bad Request (request.client=%s)" % client)
return client
def serve_controller(request, response, session):
"""
This function is used to generate a dynamic page.
It first runs all models, then runs the function in the controller,
and then tries to render the output using a view/template.
this function must run from the [application] folder.
A typical example would be the call to the url
/[application]/[controller]/[function] that would result in a call
to [function]() in applications/[application]/[controller].py
rendered by applications/[application]/views/[controller]/[function].html
"""
# ##################################################
# build environment for controller and view
# ##################################################
environment = build_environment(request, response, session)
# set default view, controller can override it
response.view = '%s/%s.%s' % (request.controller,
request.function,
request.extension)
# also, make sure the flash is passed through
# ##################################################
# process models, controller and view (if required)
# ##################################################
run_models_in(environment)
response._view_environment = copy.copy(environment)
page = run_controller_in(request.controller, request.function, environment)
if isinstance(page, dict):
response._vars = page
response._view_environment.update(page)
page = run_view_in(response._view_environment)
# logic to garbage collect after exec, not always, once every 100 requests
global requests
requests = ('requests' in globals()) and (requests + 1) % 100 or 0
if not requests:
gc.collect()
# end garbage collection logic
# ##################################################
# set default headers it not set
# ##################################################
default_headers = [
('Content-Type', contenttype('.' + request.extension)),
('Cache-Control',
'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'),
('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime())),
('Pragma', 'no-cache')]
for key, value in default_headers:
response.headers.setdefault(key, value)
raise HTTP(response.status, page, **response.headers)
class LazyWSGI(object):
def __init__(self, environ, request, response):
self.wsgi_environ = environ
self.request = request
self.response = response
@property
def environ(self):
if not hasattr(self, '_environ'):
new_environ = self.wsgi_environ
new_environ['wsgi.input'] = self.request.body
new_environ['wsgi.version'] = 1
self._environ = new_environ
return self._environ
def start_response(self, status='200', headers=[], exec_info=None):
"""
in controller you can use:
- request.wsgi.environ
- request.wsgi.start_response
to call third party WSGI applications
"""
self.response.status = str(status).split(' ', 1)[0]
self.response.headers = dict(headers)
return lambda *args, **kargs: \
self.response.write(escape=False, *args, **kargs)
def middleware(self, *middleware_apps):
"""
In you controller use::
@request.wsgi.middleware(middleware1, middleware2, ...)
to decorate actions with WSGI middleware. actions must return strings.
uses a simulated environment so it may have weird behavior in some cases
"""
def middleware(f):
def app(environ, start_response):
data = f()
start_response(self.response.status,
self.response.headers.items())
if isinstance(data, list):
return data
return [data]
for item in middleware_apps:
app = item(app)
def caller(app):
return app(self.environ, self.start_response)
return lambda caller=caller, app=app: caller(app)
return middleware
def wsgibase(environ, responder):
"""
The gluon wsgi application. The first function called when a page
is requested (static or dynamic). It can be called by paste.httpserver
or by apache mod_wsgi (or any WSGI-compatible server).
- fills request with info
- the environment variables, replacing '.' with '_'
- adds web2py path and version info
- compensates for fcgi missing path_info and query_string
- validates the path in url
The url path must be either:
1. for static pages:
- /<application>/static/<file>
2. for dynamic pages:
- /<application>[/<controller>[/<function>[/<sub>]]][.<extension>]
The naming conventions are:
- application, controller, function and extension may only contain
`[a-zA-Z0-9_]`
- file and sub may also contain '-', '=', '.' and '/'
"""
eget = environ.get
current.__dict__.clear()
request = Request(environ)
response = Response()
session = Session()
env = request.env
#env.web2py_path = global_settings.applications_parent
env.web2py_version = web2py_version
#env.update(global_settings)
static_file = False
http_response = None
try:
try:
try:
# ##################################################
# handle fcgi missing path_info and query_string
# select rewrite parameters
# rewrite incoming URL
# parse rewritten header variables
# parse rewritten URL
# serve file if static
# ##################################################
fixup_missing_path_info(environ)
(static_file, version, environ) = url_in(request, environ)
response.status = env.web2py_status_code or response.status
if static_file:
if eget('QUERY_STRING', '').startswith('attachment'):
response.headers['Content-Disposition'] \
= 'attachment'
if version:
response.headers['Cache-Control'] = 'max-age=315360000'
response.headers[
'Expires'] = 'Thu, 31 Dec 2037 23:59:59 GMT'
response.stream(static_file, request=request)
# ##################################################
# fill in request items
# ##################################################
app = request.application # must go after url_in!
if not global_settings.local_hosts:
local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1'])
if not global_settings.web2py_runtime_gae:
try:
fqdn = socket.getfqdn()
local_hosts.add(socket.gethostname())
local_hosts.add(fqdn)
local_hosts.update([
addrinfo[4][0] for addrinfo
in getipaddrinfo(fqdn)])
if env.server_name:
local_hosts.add(env.server_name)
local_hosts.update([
addrinfo[4][0] for addrinfo
in getipaddrinfo(env.server_name)])
except (socket.gaierror, TypeError):
pass
global_settings.local_hosts = list(local_hosts)
else:
local_hosts = global_settings.local_hosts
client = get_client(env)
x_req_with = str(env.http_x_requested_with).lower()
cmd_opts = global_settings.cmd_options
request.update(
client = client,
folder = abspath('applications', app) + os.sep,
ajax = x_req_with == 'xmlhttprequest',
cid = env.http_web2py_component_element,
is_local = (env.remote_addr in local_hosts and
client == env.remote_addr),
is_shell = False,
is_scheduler = False,
is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or \
request.env.http_x_forwarded_proto in HTTPS_SCHEMES \
or env.https == 'on'
)
request.url = environ['PATH_INFO']
# ##################################################
# access the requested application
# ##################################################
disabled = pjoin(request.folder, 'DISABLED')
if not exists(request.folder):
if app == rwthread.routes.default_application \
and app != 'welcome':
redirect(URL('welcome', 'default', 'index'))
elif rwthread.routes.error_handler:
_handler = rwthread.routes.error_handler
redirect(URL(_handler['application'],
_handler['controller'],
_handler['function'],
args=app))
else:
raise HTTP(404, rwthread.routes.error_message
% 'invalid request',
web2py_error='invalid application')
elif not request.is_local and exists(disabled):
five0three = os.path.join(request.folder,'static','503.html')
if os.path.exists(five0three):
raise HTTP(503, file(five0three, 'r').read())
else:
raise HTTP(503, "<html><body><h1>Temporarily down for maintenance</h1></body></html>")
# ##################################################
# build missing folders
# ##################################################
create_missing_app_folders(request)
# ##################################################
# get the GET and POST data
# ##################################################
#parse_get_post_vars(request, environ)
# ##################################################
# expose wsgi hooks for convenience
# ##################################################
request.wsgi = LazyWSGI(environ, request, response)
# ##################################################
# load cookies
# ##################################################
if env.http_cookie:
for single_cookie in env.http_cookie.split(';'):
single_cookie = single_cookie.strip()
if single_cookie:
try:
request.cookies.load(single_cookie)
except Cookie.CookieError:
pass # single invalid cookie ignore
# ##################################################
# try load session or create new session file
# ##################################################
if not env.web2py_disable_session:
session.connect(request, response)
# ##################################################
# run controller
# ##################################################
if global_settings.debugging and app != "admin":
import gluon.debug
# activate the debugger
gluon.debug.dbg.do_debug(mainpyfile=request.folder)
serve_controller(request, response, session)
except HTTP as hr:
http_response = hr
if static_file:
return http_response.to(responder, env=env)
if request.body:
request.body.close()
if hasattr(current, 'request'):
# ##################################################
# on success, try store session in database
# ##################################################
if not env.web2py_disable_session:
session._try_store_in_db(request, response)
# ##################################################
# on success, commit database
# ##################################################
if response.do_not_commit is True:
BaseAdapter.close_all_instances(None)
elif response.custom_commit:
BaseAdapter.close_all_instances(response.custom_commit)
else:
BaseAdapter.close_all_instances('commit')
# ##################################################
# if session not in db try store session on filesystem
# this must be done after trying to commit database!
# ##################################################
if not env.web2py_disable_session:
session._try_store_in_cookie_or_file(request, response)
# Set header so client can distinguish component requests.
if request.cid:
http_response.headers.setdefault(
'web2py-component-content', 'replace')
if request.ajax:
if response.flash:
http_response.headers['web2py-component-flash'] = \
urllib2.quote(xmlescape(response.flash).replace(b'\n', b''))
if response.js:
http_response.headers['web2py-component-command'] = \
urllib2.quote(response.js.replace('\n', ''))
# ##################################################
# store cookies in headers
# ##################################################
session._fixup_before_save()
http_response.cookies2headers(response.cookies)
ticket = None
except RestrictedError as e:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
# log tickets before rollback if not in DB
if not request.tickets_db:
ticket = e.log(request) or 'unknown'
# rollback
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
# if tickets in db, reconnect and store it in db
if request.tickets_db:
ticket = e.log(request) or 'unknown'
http_response = \
HTTP(500, rwthread.routes.error_message_ticket %
dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
except:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
try:
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
except:
pass
e = RestrictedError('Framework', '', '', locals())
ticket = e.log(request) or 'unrecoverable'
http_response = \
HTTP(500, rwthread.routes.error_message_ticket
% dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
finally:
if response and hasattr(response, 'session_file') \
and response.session_file:
response.session_file.close()
session._unlock(response)
http_response, new_environ = try_rewrite_on_error(
http_response, request, environ, ticket)
if not http_response:
return wsgibase(new_environ, responder)
if global_settings.web2py_crontype == 'soft':
newcron.softcron(global_settings.applications_parent).start()
return http_response.to(responder, env=env)
def save_password(password, port):
"""
Used by main() to save the password in the parameters_port.py file.
"""
password_file = abspath('parameters_%i.py' % port)
if password == '<random>':
# make up a new password
chars = string.letters + string.digits
password = ''.join([random.choice(chars) for _ in range(8)])
cpassword = CRYPT()(password)[0]
print('******************* IMPORTANT!!! ************************')
print('your admin password is "%s"' % password)
print('*********************************************************')
elif password == '<recycle>':
# reuse the current password if any
if exists(password_file):
return
else:
password = ''
elif password.startswith('<pam_user:'):
# use the pam password for specified user
cpassword = password[1:-1]
else:
# use provided password
cpassword = CRYPT()(password)[0]
fp = open(password_file, 'w')
if password:
fp.write('password="%s"\n' % cpassword)
else:
fp.write('password=None\n')
fp.close()
def appfactory(wsgiapp=wsgibase,
logfilename='httpserver.log',
profiler_dir=None,
profilerfilename=None):
"""
generates a wsgi application that does logging and profiling and calls
wsgibase
Args:
wsgiapp: the base application
logfilename: where to store apache-compatible requests log
profiler_dir: where to store profile files
"""
if profilerfilename is not None:
raise BaseException("Deprecated API")
if profiler_dir:
profiler_dir = abspath(profiler_dir)
logger.warn('profiler is on. will use dir %s', profiler_dir)
if not os.path.isdir(profiler_dir):
try:
os.makedirs(profiler_dir)
except:
raise BaseException("Can't create dir %s" % profiler_dir)
filepath = pjoin(profiler_dir, 'wtest')
try:
filehandle = open( filepath, 'w' )
filehandle.close()
os.unlink(filepath)
except IOError:
raise BaseException("Unable to write to dir %s" % profiler_dir)
def app_with_logging(environ, responder):
"""
a wsgi app that does logging and profiling and calls wsgibase
"""
status_headers = []
def responder2(s, h):
"""
wsgi responder app
"""
status_headers.append(s)
status_headers.append(h)
return responder(s, h)
time_in = time.time()
ret = [0]
if not profiler_dir:
ret[0] = wsgiapp(environ, responder2)
else:
import cProfile
prof = cProfile.Profile()
prof.enable()
ret[0] = wsgiapp(environ, responder2)
prof.disable()
destfile = pjoin(profiler_dir, "req_%s.prof" % web2py_uuid())
prof.dump_stats(destfile)
try:
line = '%s, %s, %s, %s, %s, %s, %f\n' % (
environ['REMOTE_ADDR'],
datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
environ['REQUEST_METHOD'],
environ['PATH_INFO'].replace(',', '%2C'),
environ['SERVER_PROTOCOL'],
(status_headers[0])[:3],
time.time() - time_in,
)
if not logfilename:
sys.stdout.write(line)
elif isinstance(logfilename, str):
write_file(logfilename, line, 'a')
else:
logfilename.write(line)
except:
pass
return ret[0]
return app_with_logging
class HttpServer(object):
"""
the web2py web server (Rocket)
"""
def __init__(
self,
ip='127.0.0.1',
port=8000,
password='',
pid_filename='httpserver.pid',
log_filename='httpserver.log',
profiler_dir=None,
ssl_certificate=None,
ssl_private_key=None,
ssl_ca_certificate=None,
min_threads=None,
max_threads=None,
server_name=None,
request_queue_size=5,
timeout=10,
socket_timeout=1,
shutdown_timeout=None, # Rocket does not use a shutdown timeout
path=None,
interfaces=None # Rocket is able to use several interfaces - must be list of socket-tuples as string
):
"""
starts the web server.
"""
if interfaces:
# if interfaces is specified, it must be tested for rocket parameter correctness
# not necessarily completely tested (e.g. content of tuples or ip-format)
import types
if isinstance(interfaces, list):
for i in interfaces:
if not isinstance(i, tuple):
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
else:
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
if path:
# if a path is specified change the global variables so that web2py
# runs from there instead of cwd or os.environ['web2py_path']
global web2py_path
path = os.path.normpath(path)
web2py_path = path
global_settings.applications_parent = path
os.chdir(path)
load_routes()
for p in (path, abspath('site-packages'), ""):
add_path_first(p)
if exists("logging.conf"):
logging.config.fileConfig("logging.conf")
save_password(password, port)
self.pid_filename = pid_filename
if not server_name:
server_name = socket.gethostname()
logger.info('starting web server...')
rocket.SERVER_NAME = server_name
rocket.SOCKET_TIMEOUT = socket_timeout
sock_list = [ip, port]
if not ssl_certificate or not ssl_private_key:
logger.info('SSL is off')
elif not rocket.ssl:
logger.warning('Python "ssl" module unavailable. SSL is OFF')
elif not exists(ssl_certificate):
logger.warning('unable to open SSL certificate. SSL is OFF')
elif not exists(ssl_private_key):
logger.warning('unable to open SSL private key. SSL is OFF')
else:
sock_list.extend([ssl_private_key, ssl_certificate])
if ssl_ca_certificate:
sock_list.append(ssl_ca_certificate)
logger.info('SSL is ON')
app_info = {'wsgi_app': appfactory(wsgibase,
log_filename,
profiler_dir)}
self.server = rocket.Rocket(interfaces or tuple(sock_list),
method='wsgi',
app_info=app_info,
min_threads=min_threads,
max_threads=max_threads,
queue_size=int(request_queue_size),
timeout=int(timeout),
handle_signals=False,
)
def start(self):
"""
start the web server
"""
try:
signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop())
signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop())
except:
pass
write_file(self.pid_filename, str(os.getpid()))
self.server.start()
def stop(self, stoplogging=False):
"""
stop cron and the web server
"""
newcron.stopcron()
self.server.stop(stoplogging)
try:
os.unlink(self.pid_filename)
except:
pass
| [((55, 0, 55, 24), 'gluon.admin.create_missing_folders', 'create_missing_folders', ({}, {}), '()', False, 'from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders\n'), ((70, 0, 70, 38), 'locale.setlocale', 'locale.setlocale', ({(70, 17, 70, 32): 'locale.LC_CTYPE', (70, 34, 70, 37): '"""C"""'}, {}), "(locale.LC_CTYPE, 'C')", False, 'import locale\n'), ((79, 9, 79, 36), 'logging.getLogger', 'logging.getLogger', ({(79, 27, 79, 35): '"""web2py"""'}, {}), "('web2py')", False, 'import logging\n'), ((103, 15, 103, 52), 're.compile', 're.compile', ({(103, 26, 103, 51): '"""[\\\\w\\\\-:]+(\\\\.[\\\\w\\\\-]+)*\\\\.?"""'}, {}), "('[\\\\w\\\\-:]+(\\\\.[\\\\w\\\\-]+)*\\\\.?')", False, 'import re\n'), ((120, 0, 120, 13), 'gluon.rewrite.load', 'load_routes', ({}, {}), '()', True, 'from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread, try_rewrite_on_error, fixup_missing_path_info\n'), ((164, 18, 164, 63), 'gluon.compileapp.build_environment', 'build_environment', ({(164, 36, 164, 43): 'request', (164, 45, 164, 53): 'response', (164, 55, 164, 62): 'session'}, {}), '(request, response, session)', False, 'from gluon.compileapp import build_environment, run_models_in, run_controller_in, run_view_in\n'), ((177, 4, 177, 30), 'gluon.compileapp.run_models_in', 'run_models_in', ({(177, 18, 177, 29): 'environment'}, {}), '(environment)', False, 'from gluon.compileapp import build_environment, run_models_in, run_controller_in, run_view_in\n'), ((178, 33, 178, 55), 'copy.copy', 'copy.copy', ({(178, 43, 178, 54): 'environment'}, {}), '(environment)', False, 'import copy\n'), ((179, 11, 179, 79), 'gluon.compileapp.run_controller_in', 'run_controller_in', ({(179, 29, 179, 47): 'request.controller', (179, 49, 179, 65): 'request.function', (179, 67, 179, 78): 'environment'}, {}), '(request.controller, request.function, environment)', False, 'from gluon.compileapp import build_environment, run_models_in, run_controller_in, run_view_in\n'), ((206, 10, 206, 57), 'gluon.http.HTTP', 'HTTP', ({(206, 15, 206, 30): 'response.status', (206, 32, 206, 36): 'page'}, {}), '(response.status, page, **response.headers)', False, 'from gluon.http import HTTP, redirect\n'), ((292, 4, 292, 28), 'gluon.globals.current.__dict__.clear', 'current.__dict__.clear', ({}, {}), '()', False, 'from gluon.globals import current\n'), ((293, 14, 293, 30), 'gluon.globals.Request', 'Request', ({(293, 22, 293, 29): 'environ'}, {}), '(environ)', False, 'from gluon.globals import Request, Response, Session\n'), ((294, 15, 294, 25), 'gluon.globals.Response', 'Response', ({}, {}), '()', False, 'from gluon.globals import Request, Response, Session\n'), ((295, 14, 295, 23), 'gluon.globals.Session', 'Session', ({}, {}), '()', False, 'from gluon.globals import Request, Response, Session\n'), ((558, 33, 559, 48), 'gluon.rewrite.try_rewrite_on_error', 'try_rewrite_on_error', ({(559, 8, 559, 21): 'http_response', (559, 23, 559, 30): 'request', (559, 32, 559, 39): 'environ', (559, 41, 559, 47): 'ticket'}, {}), '(http_response, request, environ, ticket)', False, 'from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread, try_rewrite_on_error, fixup_missing_path_info\n'), ((572, 20, 572, 54), 'gluon.fileutils.abspath', 'abspath', ({(572, 28, 572, 53): "'parameters_%i.py' % port"}, {}), "('parameters_%i.py' % port)", False, 'from gluon.fileutils import abspath, write_file\n'), ((76, 30, 76, 53), 'gluon.fileutils.abspath', 'abspath', ({(76, 38, 76, 52): '"""logging.conf"""'}, {}), "('logging.conf')", False, 'from gluon.fileutils import abspath, write_file\n'), ((78, 4, 78, 25), 'logging.basicConfig', 'logging.basicConfig', ({}, {}), '()', False, 'import logging\n'), ((143, 11, 143, 38), 'gluon.utils.is_valid_ip_address', 'is_valid_ip_address', ({(143, 31, 143, 37): 'client'}, {}), '(client)', False, 'from gluon.utils import is_valid_ip_address, getipaddrinfo\n'), ((144, 14, 144, 67), 'gluon.http.HTTP', 'HTTP', ({(144, 19, 144, 22): '(400)', (144, 24, 144, 66): "('Bad Request (request.client=%s)' % client)"}, {}), "(400, 'Bad Request (request.client=%s)' % client)", False, 'from gluon.http import HTTP, redirect\n'), ((183, 15, 183, 54), 'gluon.compileapp.run_view_in', 'run_view_in', ({(183, 27, 183, 53): 'response._view_environment'}, {}), '(response._view_environment)', False, 'from gluon.compileapp import build_environment, run_models_in, run_controller_in, run_view_in\n'), ((189, 8, 189, 20), 'gc.collect', 'gc.collect', ({}, {}), '()', False, 'import gc\n'), ((618, 23, 618, 44), 'gluon.fileutils.abspath', 'abspath', ({(618, 31, 618, 43): 'profiler_dir'}, {}), '(profiler_dir)', False, 'from gluon.fileutils import abspath, write_file\n'), ((647, 18, 647, 29), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((790, 8, 790, 26), 'gluon.newcron.stopcron', 'newcron.stopcron', ({}, {}), '()', False, 'from gluon import newcron\n'), ((197, 25, 197, 61), 'gluon.contenttype.contenttype', 'contenttype', ({(197, 37, 197, 60): "('.' + request.extension)"}, {}), "('.' + request.extension)", False, 'from gluon.contenttype import contenttype\n'), ((620, 15, 620, 42), 'os.path.isdir', 'os.path.isdir', ({(620, 29, 620, 41): 'profiler_dir'}, {}), '(profiler_dir)', False, 'import os\n'), ((629, 12, 629, 31), 'os.unlink', 'os.unlink', ({(629, 22, 629, 30): 'filepath'}, {}), '(filepath)', False, 'import os\n'), ((653, 19, 653, 37), 'cProfile.Profile', 'cProfile.Profile', ({}, {}), '()', False, 'import cProfile\n'), ((728, 19, 728, 41), 'os.path.normpath', 'os.path.normpath', ({(728, 36, 728, 40): 'path'}, {}), '(path)', False, 'import os\n'), ((731, 12, 731, 26), 'os.chdir', 'os.chdir', ({(731, 21, 731, 25): 'path'}, {}), '(path)', False, 'import os\n'), ((732, 12, 732, 25), 'gluon.rewrite.load', 'load_routes', ({}, {}), '()', True, 'from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread, try_rewrite_on_error, fixup_missing_path_info\n'), ((741, 26, 741, 46), 'socket.gethostname', 'socket.gethostname', ({}, {}), '()', False, 'import socket\n'), ((793, 12, 793, 40), 'os.unlink', 'os.unlink', ({(793, 22, 793, 39): 'self.pid_filename'}, {}), '(self.pid_filename)', False, 'import os\n'), ((201, 34, 201, 47), 'time.gmtime', 'time.gmtime', ({}, {}), '()', False, 'import time\n'), ((314, 16, 314, 48), 'gluon.rewrite.fixup_missing_path_info', 'fixup_missing_path_info', ({(314, 40, 314, 47): 'environ'}, {}), '(environ)', False, 'from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread, try_rewrite_on_error, fixup_missing_path_info\n'), ((315, 50, 315, 74), 'gluon.rewrite.url_in', 'url_in', ({(315, 57, 315, 64): 'request', (315, 66, 315, 73): 'environ'}, {}), '(request, environ)', False, 'from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread, try_rewrite_on_error, fixup_missing_path_info\n'), ((402, 16, 402, 51), 'gluon.admin.create_missing_app_folders', 'create_missing_app_folders', ({(402, 43, 402, 50): 'request'}, {}), '(request)', False, 'from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders\n'), ((563, 8, 563, 61), 'gluon.newcron.softcron', 'newcron.softcron', ({(563, 25, 563, 60): 'global_settings.applications_parent'}, {}), '(global_settings.applications_parent)', False, 'from gluon import newcron\n'), ((576, 28, 576, 48), 'random.choice', 'random.choice', ({(576, 42, 576, 47): 'chars'}, {}), '(chars)', False, 'import random\n'), ((577, 20, 577, 27), 'gluon.validators.CRYPT', 'CRYPT', ({}, {}), '()', False, 'from gluon.validators import CRYPT\n'), ((622, 16, 622, 41), 'os.makedirs', 'os.makedirs', ({(622, 28, 622, 40): 'profiler_dir'}, {}), '(profiler_dir)', False, 'import os\n'), ((671, 16, 671, 38), 'sys.stdout.write', 'sys.stdout.write', ({(671, 33, 671, 37): 'line'}, {}), '(line)', False, 'import sys\n'), ((733, 28, 733, 52), 'gluon.fileutils.abspath', 'abspath', ({(733, 36, 733, 51): '"""site-packages"""'}, {}), "('site-packages')", False, 'from gluon.fileutils import abspath, write_file\n'), ((734, 16, 734, 33), 'gluon.admin.add_path_first', 'add_path_first', ({(734, 31, 734, 32): 'p'}, {}), '(p)', False, 'from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders\n'), ((736, 16, 736, 57), 'logging.config.fileConfig', 'logging.config.fileConfig', ({(736, 42, 736, 56): '"""logging.conf"""'}, {}), "('logging.conf')", False, 'import logging\n'), ((783, 42, 783, 53), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((657, 59, 657, 72), 'gluon.utils.web2py_uuid', 'web2py_uuid', ({}, {}), '()', False, 'from gluon.utils import web2py_uuid\n'), ((673, 16, 673, 50), 'gluon.fileutils.write_file', 'write_file', ({(673, 27, 673, 38): 'logfilename', (673, 40, 673, 44): 'line', (673, 46, 673, 49): '"""a"""'}, {}), "(logfilename, line, 'a')", False, 'from gluon.fileutils import abspath, write_file\n'), ((392, 33, 392, 81), 'os.path.join', 'os.path.join', ({(392, 46, 392, 60): 'request.folder', (392, 61, 392, 69): '"""static"""', (392, 70, 392, 80): '"""503.html"""'}, {}), "(request.folder, 'static', '503.html')", False, 'import os\n'), ((393, 23, 393, 49), 'os.path.exists', 'os.path.exists', ({(393, 38, 393, 48): 'five0three'}, {}), '(five0three)', False, 'import os\n'), ((519, 20, 519, 63), 'pydal.base.BaseAdapter.close_all_instances', 'BaseAdapter.close_all_instances', ({(519, 52, 519, 62): '"""rollback"""'}, {}), "('rollback')", False, 'from pydal.base import BaseAdapter\n'), ((542, 20, 542, 63), 'pydal.base.BaseAdapter.close_all_instances', 'BaseAdapter.close_all_instances', ({(542, 52, 542, 62): '"""rollback"""'}, {}), "('rollback')", False, 'from pydal.base import BaseAdapter\n'), ((592, 20, 592, 27), 'gluon.validators.CRYPT', 'CRYPT', ({}, {}), '()', False, 'from gluon.validators import CRYPT\n'), ((668, 16, 668, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((338, 35, 338, 51), 'socket.getfqdn', 'socket.getfqdn', ({}, {}), '()', False, 'import socket\n'), ((360, 29, 360, 57), 'gluon.fileutils.abspath', 'abspath', ({(360, 37, 360, 51): '"""applications"""', (360, 53, 360, 56): 'app'}, {}), "('applications', app)", False, 'from gluon.fileutils import abspath, write_file\n'), ((380, 33, 380, 67), 'gluon.html.URL', 'URL', ({(380, 37, 380, 46): '"""welcome"""', (380, 48, 380, 57): '"""default"""', (380, 59, 380, 66): '"""index"""'}, {}), "('welcome', 'default', 'index')", False, 'from gluon.html import URL, xmlescape\n'), ((388, 30, 390, 70), 'gluon.http.HTTP', 'HTTP', (), '', False, 'from gluon.http import HTTP, redirect\n'), ((396, 30, 396, 110), 'gluon.http.HTTP', 'HTTP', ({(396, 35, 396, 38): '(503)', (396, 40, 396, 109): '"""<html><body><h1>Temporarily down for maintenance</h1></body></html>"""'}, {}), "(503, '<html><body><h1>Temporarily down for maintenance</h1></body></html>'\n )", False, 'from gluon.http import HTTP, redirect\n'), ((468, 24, 468, 61), 'pydal.base.BaseAdapter.close_all_instances', 'BaseAdapter.close_all_instances', ({(468, 56, 468, 60): 'None'}, {}), '(None)', False, 'from pydal.base import BaseAdapter\n'), ((663, 16, 663, 41), 'datetime.datetime.today', 'datetime.datetime.today', ({}, {}), '()', False, 'import datetime\n'), ((339, 44, 339, 64), 'socket.gethostname', 'socket.gethostname', ({}, {}), '()', False, 'import socket\n'), ((383, 33, 386, 46), 'gluon.html.URL', 'URL', (), '', False, 'from gluon.html import URL, xmlescape\n'), ((470, 24, 470, 79), 'pydal.base.BaseAdapter.close_all_instances', 'BaseAdapter.close_all_instances', ({(470, 56, 470, 78): 'response.custom_commit'}, {}), '(response.custom_commit)', False, 'from pydal.base import BaseAdapter\n'), ((472, 24, 472, 65), 'pydal.base.BaseAdapter.close_all_instances', 'BaseAdapter.close_all_instances', ({(472, 56, 472, 64): '"""commit"""'}, {}), "('commit')", False, 'from pydal.base import BaseAdapter\n'), ((343, 35, 343, 54), 'gluon.utils.getipaddrinfo', 'getipaddrinfo', ({(343, 49, 343, 53): 'fqdn'}, {}), '(fqdn)', False, 'from gluon.utils import is_valid_ip_address, getipaddrinfo\n'), ((348, 43, 348, 73), 'gluon.utils.getipaddrinfo', 'getipaddrinfo', ({(348, 57, 348, 72): 'env.server_name'}, {}), '(env.server_name)', False, 'from gluon.utils import is_valid_ip_address, getipaddrinfo\n'), ((489, 46, 489, 71), 'gluon.html.xmlescape', 'xmlescape', ({(489, 56, 489, 70): 'response.flash'}, {}), '(response.flash)', False, 'from gluon.html import URL, xmlescape\n')] |
marc4gov/tokenspice2 | agents/EWPublisherAgent.py | 1993383674f35b20e11e54606b3dac8e4c05c0f9 | import logging
log = logging.getLogger('marketagents')
from enforce_typing import enforce_types # type: ignore[import]
import random
from agents.PublisherAgent import PublisherAgent
from agents.PoolAgent import PoolAgent
from util import constants
from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN
from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens
from web3tools.web3util import toBase18
@enforce_types
class EWPublisherAgent(PublisherAgent):
def __init__(self, name: str, USD: float, OCEAN: float):
super().__init__(name, USD, OCEAN)
self._s_since_create = 0
self._s_between_create = 7 * constants.S_PER_DAY #magic number
self._s_since_unstake = 0
self._s_between_unstake = 3 * constants.S_PER_DAY #magic number
def takeStep(self, state) -> None:
self._s_since_create += state.ss.time_step
self._s_since_unstake += state.ss.time_step
if self._doCreatePool():
self._s_since_create = 0
self._createPoolAgent(state)
if self._doUnstakeOCEAN(state):
self._s_since_unstake = 0
self._unstakeOCEANsomewhere(state)
def _doCreatePool(self) -> bool:
if self.OCEAN() < 200.0: #magic number
return False
return self._s_since_create >= self._s_between_create
def _createPoolAgent(self, state) -> PoolAgent:
assert self.OCEAN() > 0.0, "should not call if no OCEAN"
wallet = self._wallet._web3wallet
OCEAN = globaltokens.OCEANtoken()
#name
pool_i = len(state.agents.filterToPool())
dt_name = f'DT{pool_i}'
pool_agent_name = f'pool{pool_i}'
#new DT
DT = self._createDatatoken(dt_name, mint_amt=1000.0) #magic number
#new pool
pool_address = bfactory.BFactory().newBPool(from_wallet=wallet)
pool = bpool.BPool(pool_address)
#bind tokens & add initial liquidity
OCEAN_bind_amt = self.OCEAN() #magic number: use all the OCEAN
DT_bind_amt = 20.0 #magic number
DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet)
OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet)
pool.bind(DT.address, toBase18(DT_bind_amt),
toBase18(POOL_WEIGHT_DT), from_wallet=wallet)
pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt),
toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet)
pool.finalize(from_wallet=wallet)
#create agent
pool_agent = PoolAgent(pool_agent_name, pool)
state.addAgent(pool_agent)
return pool_agent
def _doUnstakeOCEAN(self, state) -> bool:
if not state.agents.filterByNonzeroStake(self):
return False
return self._s_since_unstake >= self._s_between_unstake
def _unstakeOCEANsomewhere(self, state):
"""Choose what pool to unstake and by how much. Then do the action."""
pool_agents = state.agents.filterByNonzeroStake(self)
pool_agent = random.choice(list(pool_agents.values()))
BPT = self.BPT(pool_agent.pool)
BPT_unstake = 0.10 * BPT #magic number
self.unstakeOCEAN(BPT_unstake, pool_agent.pool)
def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken:
"""Create datatoken contract and mint DTs to self."""
wallet = self._wallet._web3wallet
DT_address = dtfactory.DTFactory().createToken(
'', dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet)
DT = datatoken.Datatoken(DT_address)
DT.mint(wallet.address, toBase18(mint_amt), from_wallet=wallet)
return DT
| [((2, 6, 2, 39), 'logging.getLogger', 'logging.getLogger', ({(2, 24, 2, 38): '"""marketagents"""'}, {}), "('marketagents')", False, 'import logging\n'), ((45, 16, 45, 41), 'web3engine.globaltokens.OCEANtoken', 'globaltokens.OCEANtoken', ({}, {}), '()', False, 'from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens\n'), ((57, 15, 57, 40), 'web3engine.bpool.BPool', 'bpool.BPool', ({(57, 27, 57, 39): 'pool_address'}, {}), '(pool_address)', False, 'from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens\n'), ((74, 21, 74, 53), 'agents.PoolAgent.PoolAgent', 'PoolAgent', ({(74, 31, 74, 46): 'pool_agent_name', (74, 48, 74, 52): 'pool'}, {}), '(pool_agent_name, pool)', False, 'from agents.PoolAgent import PoolAgent\n'), ((97, 13, 97, 44), 'web3engine.datatoken.Datatoken', 'datatoken.Datatoken', ({(97, 33, 97, 43): 'DT_address'}, {}), '(DT_address)', False, 'from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens\n'), ((63, 33, 63, 54), 'web3tools.web3util.toBase18', 'toBase18', ({(63, 42, 63, 53): 'DT_bind_amt'}, {}), '(DT_bind_amt)', False, 'from web3tools.web3util import toBase18\n'), ((64, 36, 64, 60), 'web3tools.web3util.toBase18', 'toBase18', ({(64, 45, 64, 59): 'OCEAN_bind_amt'}, {}), '(OCEAN_bind_amt)', False, 'from web3tools.web3util import toBase18\n'), ((66, 30, 66, 51), 'web3tools.web3util.toBase18', 'toBase18', ({(66, 39, 66, 50): 'DT_bind_amt'}, {}), '(DT_bind_amt)', False, 'from web3tools.web3util import toBase18\n'), ((67, 18, 67, 42), 'web3tools.web3util.toBase18', 'toBase18', ({(67, 27, 67, 41): 'POOL_WEIGHT_DT'}, {}), '(POOL_WEIGHT_DT)', False, 'from web3tools.web3util import toBase18\n'), ((68, 33, 68, 57), 'web3tools.web3util.toBase18', 'toBase18', ({(68, 42, 68, 56): 'OCEAN_bind_amt'}, {}), '(OCEAN_bind_amt)', False, 'from web3tools.web3util import toBase18\n'), ((69, 18, 69, 45), 'web3tools.web3util.toBase18', 'toBase18', ({(69, 27, 69, 44): 'POOL_WEIGHT_OCEAN'}, {}), '(POOL_WEIGHT_OCEAN)', False, 'from web3tools.web3util import toBase18\n'), ((96, 34, 96, 52), 'web3tools.web3util.toBase18', 'toBase18', ({(96, 43, 96, 51): 'mint_amt'}, {}), '(mint_amt)', False, 'from web3tools.web3util import toBase18\n'), ((98, 32, 98, 50), 'web3tools.web3util.toBase18', 'toBase18', ({(98, 41, 98, 49): 'mint_amt'}, {}), '(mint_amt)', False, 'from web3tools.web3util import toBase18\n'), ((56, 23, 56, 42), 'web3engine.bfactory.BFactory', 'bfactory.BFactory', ({}, {}), '()', False, 'from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens\n'), ((95, 21, 95, 42), 'web3engine.dtfactory.DTFactory', 'dtfactory.DTFactory', ({}, {}), '()', False, 'from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens\n')] |
slaclab/lcls-orbit | lcls_orbit/__init__.py | e2b8738c4af2dfed40fce4b898bf9b2a820d5f56 | import numpy as np
from . import _version
__version__ = _version.get_versions()['version']
HXR_COLORS = ("#000000", "#02004a", "#030069", "#04008f", "#0500b3", "#0700ff")
SXR_COLORS = ("#000000", "#330000", "#520000", "#850000", "#ad0000", "#ff0000")
HXR_AREAS = {
"GUN" : [2017.911, 2018.712],
"L0" : [2018.712, 2024.791],
"DL1_1": [2024.791, 2031.992],
"DL1_2": [2031.992, 2035.035],
"L1": [2035.035, 2044.167],
"BC1": [2044.167, 2059.733],
"L2": [2059.733, 2410.698],
"BC2": [2410.698, 2438.400],
"L3": [2438.400, 3042.005],
"CLTH_0": [3042.005, 3050.512],
"CLTH_1": [3050.512, 3058.457],
"CLTH_2": [3058.457, 3110.961],
"BSYH_1": [3110.961, 3117.409],
"BSYH_2": [3117.409, 3224.022],
"LTUH": [3224.022, 3562.739],
"UNDH": [3562.739, 3718.483],
"DMPH_1": [3718.483, 3734.407],
"DMPH_2": [3734.407, 3765.481]
}
HXR_AREAS = {np.mean(value): key for key, value in HXR_AREAS.items()}
SXR_AREAS = {
"GUN" : [2017.911, 2017.911],
"L0" : [2018.712, 2024.791],
"DL1_1": [2024.791, 2031.992],
"DL1_2": [2031.992, 2035.035],
"L1": [2035.035, 2044.167],
"BC1": [2044.167, 2059.733],
"L2": [2059.733, 2410.698],
"BC2": [2410.698, 2438.400],
"L3": [2438.400, 3042.005],
"CLTH_0": [3042.005, 3050.512],
"CLTH_1": [3050.512, 3058.457],
"CLTS": [3177.650, 3224.022],
"BSYS": [3224.022, 3565.656],
"LTUS": [3565.656, 3718.483],
"UNDS": [3718.483, 3734.407],
"DMPS_1": [3734.407, 3734.407],
"DMPS_2": [3734.407, 3765.481]
}
SXR_AREAS = {np.mean(value): key for key, value in SXR_AREAS.items()} | [((31, 13, 31, 27), 'numpy.mean', 'np.mean', ({(31, 21, 31, 26): 'value'}, {}), '(value)', True, 'import numpy as np\n'), ((53, 13, 53, 27), 'numpy.mean', 'np.mean', ({(53, 21, 53, 26): 'value'}, {}), '(value)', True, 'import numpy as np\n')] |
OverLordGoldDragon/dummy | tests/test_optimizers_v2/test_optimizers_v2.py | 5192b91c57721f37b906f670ad954a46f98bf5b5 | import os
import tempfile
import numpy as np
import tensorflow as tf
from time import time
from termcolor import cprint
from unittest import TestCase
from .. import K
from .. import Input, Dense, GRU, Bidirectional, Embedding
from .. import Model, load_model
from .. import l2
from .. import maxnorm
from .. import Adam, Nadam, SGD
from .. import AdamW, NadamW, SGDW
from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval
print("TF version: %s" % tf.__version__)
tf_eager = bool(os.environ["TF_EAGER"] == "True")
if tf_eager:
print("TF running eagerly")
else:
tf.compat.v1.disable_eager_execution()
print("TF running in graph mode")
class TestOptimizers(TestCase):
def test_all(self): # Save/Load, Warm Restarts (w/ cosine annealing)
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
num_batches, num_epochs = 25, 4
batch_size, timesteps, num_channels = 16, 8, 4
batch_shape = (batch_size, timesteps, num_channels)
total_iterations = num_batches # due to warm restarts
self.model = self._make_model(batch_shape, total_iterations)
optimizer = self._make_optimizer(optimizer_name, self.model,
total_iterations)
self.model.compile(optimizer, loss='binary_crossentropy')
self.assertTrue(self._valid_weight_decays(self.model))
self.model._make_train_function() # else K.eval before train may fail
X, Y = self._make_data(num_batches, *batch_shape)
self.eta_history = [] # for stop-introspection
self.t_cur_history = [] # for stop-introspection
for epoch in range(num_epochs):
for batch_num in range(num_batches):
self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)]
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.model.train_on_batch(X[batch_num], Y[batch_num])
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.eta_history.pop(-(1 + int(tf_eager)))
K.set_value(self.model.optimizer.t_cur, 0)
self.assertTrue(self._valid_cosine_annealing(self.eta_history,
total_iterations, num_epochs))
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MAIN TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MAIN TESTS PASSED >>\n", 'green')
def test_misc(self): # tests of non-main features to improve coverage
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
optimizer_kw = {'total_iterations': 0, 'decay': 1e-3,
'amsgrad': optimizer_name == 'AdamW',
'nesterov': optimizer_name == 'SGDW'}
num_batches = 4
batch_size, timesteps = 16, 8
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
self.model = self._make_model(batch_shape, total_iterations,
embed_input_dim=embed_input_dim,
dense_constraint=1, l2_reg=1e-4,
bidirectional=False, sparse=True)
optimizer = self._make_optimizer(optimizer_name, self.model,
**optimizer_kw)
self.model.compile(optimizer, loss='sparse_categorical_crossentropy')
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
for batch_num in range(num_batches):
self.model.train_on_batch(X[batch_num], Y[batch_num])
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MISC TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MISC TESTS PASSED >>\n", 'green')
def test_control(self): # tests losses against original optimizers'
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
pass_txt = "Control Test Passed"
if optimizer_name == 'AdamW':
for amsgrad in [True, False]:
self._test_control(optimizer_name, amsgrad=amsgrad)
print("\n>> AdamW amsgrad={} {}".format(amsgrad, pass_txt))
elif optimizer_name == 'NadamW':
self._test_control(optimizer_name)
elif optimizer_name == 'SGDW':
for nesterov in [True, False]:
self._test_control(optimizer_name, nesterov=nesterov)
print("\n>> SGDW nesterov={} {}".format(nesterov, pass_txt))
o_name = optimizer_name
cprint("\n<< {} {} >>\n".format(o_name, pass_txt.upper()), 'green')
cprint("\n<< ALL CONTROL TESTS PASSED >>\n", 'green')
def _test_control(self, optimizer_name, amsgrad=False, nesterov=False):
optimizer_kw = dict(total_iterations=0, decay=1e-3,
amsgrad=amsgrad, nesterov=nesterov,
control_mode=True)
num_batches = 100
batch_size, timesteps = 16, 32
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
model_kw = dict(batch_shape=batch_shape, dense_constraint=1,
total_iterations=total_iterations,
embed_input_dim=embed_input_dim, l2_reg=0,
bidirectional=False, sparse=True)
loss_name = 'sparse_categorical_crossentropy'
reset_seeds(verbose=0)
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_custom = self._make_model(**model_kw)
optimizer_custom = self._make_optimizer(optimizer_name,
self.model_custom,
**optimizer_kw)
self.model_custom.compile(optimizer_custom, loss=loss_name)
self.loss_custom = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_custom += [self.model_custom.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_custom -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_control = self._make_model(**model_kw)
optimizer_control = self._make_optimizer(optimizer_name[:-1],
self.model_control,
**optimizer_kw)
self.model_control.compile(optimizer_control, loss=loss_name)
self.loss_control = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_control += [self.model_control.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_control -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
loss_diff = np.abs(np.array(self.loss_custom) -
np.array(self.loss_control))
print("%s max loss diff: %e" % (optimizer_name, np.max(loss_diff)))
self.assertTrue(np.allclose(self.loss_custom, self.loss_control,
rtol=0, atol=1e-3))
# cleanup
del self.model_custom, self.model_control
del optimizer_custom, optimizer_control
reset_seeds(reset_graph_with_backend=K, verbose=0)
def _test_save_load(self, model, X, optimizer_name, optimizer):
saved_model_preds = model.predict(X[0])
saved_model_weights = K.batch_get_value(model.trainable_weights)
saved_optim_weights = K.batch_get_value(model.optimizer.weights)
test_name = 'test__%f{}.h5'.format(np.random.random())
modelpath = os.path.join(tempfile.gettempdir(), test_name)
model.save(modelpath)
del model
model = load_model(modelpath, custom_objects={optimizer_name: optimizer})
loaded_model_preds = model.predict(X[0])
loaded_model_weights = K.batch_get_value(model.trainable_weights)
loaded_optim_weights = K.batch_get_value(model.optimizer.weights)
self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds,
rtol=0, atol=1e-8))
for smw, lmw in zip(saved_model_weights, loaded_model_weights):
self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8))
for sow, low in zip(saved_optim_weights, loaded_optim_weights):
self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8))
@staticmethod
def _make_data(num_batches, batch_size, timesteps, num_channels=None,
embed_input_dim=None, sparse=False):
if sparse:
X = np.random.randint(0, embed_input_dim,
(num_batches, batch_size, timesteps))
else:
X = np.random.randn(num_batches, batch_size, timesteps, num_channels)
Y = np.random.randint(0, 2, (num_batches, batch_size))
return X, Y
@staticmethod
def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True,
dense_constraint=None, embed_input_dim=None, sparse=False):
if dense_constraint is not None:
dense_constraint = maxnorm(dense_constraint)
ipt = Input(batch_shape=batch_shape)
if sparse:
x = Embedding(embed_input_dim, embed_input_dim*3 + 1,
mask_zero=True)(ipt)
else:
x = ipt
gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg))
if bidirectional:
x = Bidirectional(gru)(x)
else:
x = gru(x)
x = Dense(2, kernel_regularizer=l2(l2_reg),
kernel_constraint=dense_constraint)(x)
if sparse:
out = Dense(2, activation='softmax')(x)
else:
out = Dense(1, activation='sigmoid')(x)
return Model(ipt, out)
@staticmethod
def _make_optimizer(optimizer_name, model, total_iterations, decay=0,
amsgrad=False, nesterov=False, control_mode=False):
optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW,
'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD}
optimizer = optimizer_dict[optimizer_name]
optimizer_kw = {}
if 'Adam' in optimizer_name:
optimizer_kw = {'amsgrad': amsgrad}
elif 'SGD' in optimizer_name:
optimizer_kw = {'nesterov': nesterov, 'momentum': .9}
if 'Nadam' not in optimizer_name:
optimizer_kw.update({'decay': decay})
if not control_mode:
wd_dict = get_weight_decays(model)
l2_extra = [2e-5]*(len(wd_dict) - 3)
wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra)
lr_m = {'gru': 0.5}
use_cosine_annealing = True
else:
wd, lr_m = None, None
use_cosine_annealing = False
if not any([optimizer_name == name for name in ('Adam', 'Nadam', 'SGD')]):
return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m,
use_cosine_annealing=use_cosine_annealing, t_cur=0,
total_iterations=total_iterations, **optimizer_kw)
else:
return optimizer(lr=1e-4, **optimizer_kw)
@staticmethod
def _valid_weight_decays(model):
weight_decays = get_weight_decays(model)
trues = 0
for wd in weight_decays.values():
trues += (wd != 0)
return (trues == 0)
@staticmethod
def _valid_cosine_annealing(eta_history, total_iterations, num_epochs):
eta_history_simul = []
for epoch in range(num_epochs):
for iteration in range(0, total_iterations):
eta_history_simul.append(0.5 * (
1 + np.cos(np.pi*iteration / total_iterations)))
return np.allclose(eta_history, eta_history_simul, rtol=0, atol=2e-7)
| [((25, 4, 25, 42), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((70, 8, 70, 58), 'termcolor.cprint', 'cprint', ({(70, 15, 70, 48): '"""\n<< ALL MAIN TESTS PASSED >>\n"""', (70, 50, 70, 57): '"""green"""'}, {}), '("""\n<< ALL MAIN TESTS PASSED >>\n""", \'green\')', False, 'from termcolor import cprint\n'), ((106, 8, 106, 58), 'termcolor.cprint', 'cprint', ({(106, 15, 106, 48): '"""\n<< ALL MISC TESTS PASSED >>\n"""', (106, 50, 106, 57): '"""green"""'}, {}), '("""\n<< ALL MISC TESTS PASSED >>\n""", \'green\')', False, 'from termcolor import cprint\n'), ((128, 8, 128, 61), 'termcolor.cprint', 'cprint', ({(128, 15, 128, 51): '"""\n<< ALL CONTROL TESTS PASSED >>\n"""', (128, 53, 128, 60): '"""green"""'}, {}), '("""\n<< ALL CONTROL TESTS PASSED >>\n""", \'green\')', False, 'from termcolor import cprint\n'), ((156, 13, 156, 19), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((170, 13, 170, 19), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((218, 12, 218, 62), 'numpy.random.randint', 'np.random.randint', ({(218, 30, 218, 31): '0', (218, 33, 218, 34): '2', (218, 36, 218, 61): '(num_batches, batch_size)'}, {}), '(0, 2, (num_batches, batch_size))', True, 'import numpy as np\n'), ((295, 15, 295, 77), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((181, 24, 182, 54), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((193, 43, 193, 61), 'numpy.random.random', 'np.random.random', ({}, {}), '()', True, 'import numpy as np\n'), ((194, 33, 194, 54), 'tempfile.gettempdir', 'tempfile.gettempdir', ({}, {}), '()', False, 'import tempfile\n'), ((203, 24, 204, 54), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((214, 16, 215, 71), 'numpy.random.randint', 'np.random.randint', ({(214, 34, 214, 35): '0', (214, 37, 214, 52): 'embed_input_dim', (215, 34, 215, 70): '(num_batches, batch_size, timesteps)'}, {}), '(0, embed_input_dim, (num_batches, batch_size, timesteps))', True, 'import numpy as np\n'), ((217, 16, 217, 81), 'numpy.random.randn', 'np.random.randn', ({(217, 32, 217, 43): 'num_batches', (217, 45, 217, 55): 'batch_size', (217, 57, 217, 66): 'timesteps', (217, 68, 217, 80): 'num_channels'}, {}), '(num_batches, batch_size, timesteps, num_channels)', True, 'import numpy as np\n'), ((177, 27, 177, 53), 'numpy.array', 'np.array', ({(177, 36, 177, 52): 'self.loss_custom'}, {}), '(self.loss_custom)', True, 'import numpy as np\n'), ((178, 27, 178, 54), 'numpy.array', 'np.array', ({(178, 36, 178, 53): 'self.loss_control'}, {}), '(self.loss_control)', True, 'import numpy as np\n'), ((206, 28, 206, 68), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((208, 28, 208, 68), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((179, 56, 179, 73), 'numpy.max', 'np.max', ({(179, 63, 179, 72): 'loss_diff'}, {}), '(loss_diff)', True, 'import numpy as np\n'), ((161, 64, 161, 70), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((175, 65, 175, 71), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((294, 28, 294, 70), 'numpy.cos', 'np.cos', ({(294, 35, 294, 69): '(np.pi * iteration / total_iterations)'}, {}), '(np.pi * iteration / total_iterations)', True, 'import numpy as np\n')] |
Vasyka/koku | koku/reporting/migrations/0099_ocp_performance.py | b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614 | # Generated by Django 2.2.10 on 2020-02-18 12:51
import django.contrib.postgres.indexes
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("reporting", "0098_auto_20200221_2034")]
operations = [
migrations.RunSQL(
"""
drop materialized view if exists reporting_ocpallcostlineitem_daily_summary;
drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;
"""
),
migrations.RemoveIndex(model_name="ocpawscostlineitemdailysummary", name="cost_summary_node_idx"),
migrations.RemoveIndex(
model_name="ocpawscostlineitemprojectdailysummary", name="cost__proj_sum_namespace_idx"
),
migrations.RemoveIndex(model_name="ocpawscostlineitemprojectdailysummary", name="cost_proj_sum_node_idx"),
migrations.RemoveIndex(model_name="ocpazurecostlineitemdailysummary", name="ocpazure_node_idx"),
migrations.RemoveIndex(
model_name="ocpazurecostlineitemprojectdailysummary", name="ocpazure_proj_namespace_idx"
),
migrations.RemoveIndex(model_name="ocpazurecostlineitemprojectdailysummary", name="ocpazure_proj_node_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdaily", name="namespace_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdaily", name="node_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdailysummary", name="summary_namespace_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdailysummary", name="summary_node_idx"),
migrations.AlterField(
model_name="ocpawscostlineitemprojectdailysummary", name="usage_end", field=models.DateField()
),
migrations.AlterField(
model_name="ocpawscostlineitemprojectdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemdailysummary", name="usage_end", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemprojectdailysummary", name="usage_end", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemprojectdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(model_name="ocpstoragelineitemdaily", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpstoragelineitemdaily", name="usage_start", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdaily", name="total_seconds", field=models.IntegerField()),
migrations.AlterField(model_name="ocpusagelineitemdaily", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdaily", name="usage_start", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdailysummary", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdailysummary", name="usage_start", field=models.DateField()),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=models.Index(fields=["node"], name="cost_summary_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(
fields=["namespace"], name="cost__proj_sum_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(fields=["node"], name="cost_proj_sum_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=models.Index(fields=["node"], name="ocpazure_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(
fields=["namespace"], name="ocpazure_proj_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(fields=["node"], name="ocpazure_proj_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpstoragelineitemdaily",
index=models.Index(
fields=["namespace"], name="ocp_storage_li_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpstoragelineitemdaily",
index=models.Index(fields=["node"], name="ocp_storage_li_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdaily",
index=models.Index(fields=["namespace"], name="namespace_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdaily",
index=models.Index(fields=["node"], name="node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["namespace"], name="summary_namespace_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["node"], name="summary_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AlterField(model_name="costsummary", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="costsummary", name="usage_start", field=models.DateField()),
migrations.AddIndex(
model_name="costsummary", index=models.Index(fields=["usage_start"], name="ocpcostsum_usage_start_idx")
),
migrations.AddIndex(
model_name="costsummary",
index=models.Index(
fields=["namespace"], name="ocpcostsum_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="costsummary",
index=models.Index(fields=["node"], name="ocpcostsum_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="costsummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["pod_labels"], name="ocpcostsum_pod_labels_idx"),
),
# This extension will help specifically with "col LIKE %val%"
# operations. (As long as val is at least 3 characters)
migrations.RunSQL(
"""
create extension if not exists pg_trgm schema public;
"""
),
# Create indexes to aid with text searching.
# These cases will specifically help with case-insensitive
# and contains (vs startswith) searches
# ocp usage line item daily
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocp_namespace_idx
on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocp_node_idx
on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops);
"""
),
# ocp usage line item daily summary
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocp_summary_namespace_like_idx
on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocp_summary_node_like_idx
on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpstoragelineitem_daily
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocp_storage_li_namespace_like_idx
on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocp_storage_li_node_like_idx
on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops);
"""
),
# ocp azure cost
migrations.RunSQL(
"""
/* add node index for like trigram ops */
create index if not exists ocpazure_node_like_idx
on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# ocp azure project cost
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocpazure_proj_namespace_like_idx
on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocpazure_proj_node_like_idx
on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpawscostlineitem_daily_summary
migrations.RunSQL(
"""
/* add node index for like trigram ops */
create index if not exists cost_summary_node_like_idx
on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpawscostlineitem_project_daily_summary
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists cost__proj_sum_namespace_like_idx
on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists cost__proj_sum_node_like_idx
on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpcosts_summary
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocpcostsum_namespace_like_idx
on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocpcostsum_node_like_idx
on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
migrations.RunSQL(
"""
drop materialized view if exists reporting_ocpallcostlineitem_daily_summary;
create materialized view if not exists reporting_ocpallcostlineitem_daily_summary as
SELECT row_number() OVER () AS id,
lids.source_type,
lids.cluster_id,
lids.cluster_alias,
lids.namespace,
lids.node,
lids.resource_id,
lids.usage_start,
lids.usage_end,
lids.usage_account_id,
lids.account_alias_id,
lids.product_code,
lids.product_family,
lids.instance_type,
lids.region,
lids.availability_zone,
lids.tags,
lids.usage_amount,
lids.unit,
lids.unblended_cost,
lids.markup_cost,
lids.currency_code,
lids.shared_projects,
lids.project_costs
FROM ( SELECT 'AWS'::text AS source_type,
reporting_ocpawscostlineitem_daily_summary.cluster_id,
reporting_ocpawscostlineitem_daily_summary.cluster_alias,
reporting_ocpawscostlineitem_daily_summary.namespace,
reporting_ocpawscostlineitem_daily_summary.node,
reporting_ocpawscostlineitem_daily_summary.resource_id,
reporting_ocpawscostlineitem_daily_summary.usage_start::date,
reporting_ocpawscostlineitem_daily_summary.usage_end::date,
reporting_ocpawscostlineitem_daily_summary.usage_account_id,
reporting_ocpawscostlineitem_daily_summary.account_alias_id,
reporting_ocpawscostlineitem_daily_summary.product_code,
reporting_ocpawscostlineitem_daily_summary.product_family,
reporting_ocpawscostlineitem_daily_summary.instance_type,
reporting_ocpawscostlineitem_daily_summary.region,
reporting_ocpawscostlineitem_daily_summary.availability_zone,
reporting_ocpawscostlineitem_daily_summary.tags,
reporting_ocpawscostlineitem_daily_summary.usage_amount,
reporting_ocpawscostlineitem_daily_summary.unit,
reporting_ocpawscostlineitem_daily_summary.unblended_cost,
reporting_ocpawscostlineitem_daily_summary.markup_cost,
reporting_ocpawscostlineitem_daily_summary.currency_code,
reporting_ocpawscostlineitem_daily_summary.shared_projects,
reporting_ocpawscostlineitem_daily_summary.project_costs
FROM reporting_ocpawscostlineitem_daily_summary
WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)
UNION
SELECT 'Azure'::text AS source_type,
reporting_ocpazurecostlineitem_daily_summary.cluster_id,
reporting_ocpazurecostlineitem_daily_summary.cluster_alias,
reporting_ocpazurecostlineitem_daily_summary.namespace,
reporting_ocpazurecostlineitem_daily_summary.node,
reporting_ocpazurecostlineitem_daily_summary.resource_id,
reporting_ocpazurecostlineitem_daily_summary.usage_start::date,
reporting_ocpazurecostlineitem_daily_summary.usage_end::date,
reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id,
NULL::integer AS account_alias_id,
reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code,
NULL::character varying AS product_family,
reporting_ocpazurecostlineitem_daily_summary.instance_type,
reporting_ocpazurecostlineitem_daily_summary.resource_location AS region,
NULL::character varying AS availability_zone,
reporting_ocpazurecostlineitem_daily_summary.tags,
reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount,
reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit,
reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost,
reporting_ocpazurecostlineitem_daily_summary.markup_cost,
reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code,
reporting_ocpazurecostlineitem_daily_summary.shared_projects,
reporting_ocpazurecostlineitem_daily_summary.project_costs
FROM reporting_ocpazurecostlineitem_daily_summary
WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids
with no data;
create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix
on reporting_ocpallcostlineitem_daily_summary using gin (namespace);
create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix
on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops);
create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix
on reporting_ocpallcostlineitem_daily_summary (usage_start);
drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;
create materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as
SELECT row_number() OVER () AS id,
lids.source_type,
lids.cluster_id,
lids.cluster_alias,
lids.data_source,
lids.namespace,
lids.node,
lids.pod_labels,
lids.resource_id,
lids.usage_start,
lids.usage_end,
lids.usage_account_id,
lids.account_alias_id,
lids.product_code,
lids.product_family,
lids.instance_type,
lids.region,
lids.availability_zone,
lids.usage_amount,
lids.unit,
lids.unblended_cost,
lids.project_markup_cost,
lids.pod_cost,
lids.currency_code
FROM ( SELECT 'AWS'::text AS source_type,
reporting_ocpawscostlineitem_project_daily_summary.cluster_id,
reporting_ocpawscostlineitem_project_daily_summary.cluster_alias,
reporting_ocpawscostlineitem_project_daily_summary.data_source,
reporting_ocpawscostlineitem_project_daily_summary.namespace,
reporting_ocpawscostlineitem_project_daily_summary.node,
reporting_ocpawscostlineitem_project_daily_summary.pod_labels,
reporting_ocpawscostlineitem_project_daily_summary.resource_id,
reporting_ocpawscostlineitem_project_daily_summary.usage_start::date,
reporting_ocpawscostlineitem_project_daily_summary.usage_end::date,
reporting_ocpawscostlineitem_project_daily_summary.usage_account_id,
reporting_ocpawscostlineitem_project_daily_summary.account_alias_id,
reporting_ocpawscostlineitem_project_daily_summary.product_code,
reporting_ocpawscostlineitem_project_daily_summary.product_family,
reporting_ocpawscostlineitem_project_daily_summary.instance_type,
reporting_ocpawscostlineitem_project_daily_summary.region,
reporting_ocpawscostlineitem_project_daily_summary.availability_zone,
reporting_ocpawscostlineitem_project_daily_summary.usage_amount,
reporting_ocpawscostlineitem_project_daily_summary.unit,
reporting_ocpawscostlineitem_project_daily_summary.unblended_cost,
reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost,
reporting_ocpawscostlineitem_project_daily_summary.pod_cost,
reporting_ocpawscostlineitem_project_daily_summary.currency_code
FROM reporting_ocpawscostlineitem_project_daily_summary
WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)
UNION
SELECT 'Azure'::text AS source_type,
reporting_ocpazurecostlineitem_project_daily_summary.cluster_id,
reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias,
reporting_ocpazurecostlineitem_project_daily_summary.data_source,
reporting_ocpazurecostlineitem_project_daily_summary.namespace,
reporting_ocpazurecostlineitem_project_daily_summary.node,
reporting_ocpazurecostlineitem_project_daily_summary.pod_labels,
reporting_ocpazurecostlineitem_project_daily_summary.resource_id,
reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date,
reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date,
reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id,
NULL::integer AS account_alias_id,
reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code,
NULL::character varying AS product_family,
reporting_ocpazurecostlineitem_project_daily_summary.instance_type,
reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region,
NULL::character varying AS availability_zone,
reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount,
reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit,
reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost,
reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost,
reporting_ocpazurecostlineitem_project_daily_summary.pod_cost,
reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code
FROM reporting_ocpazurecostlineitem_project_daily_summary
WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids
with no data;
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix
on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix
on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix
on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix
on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix
on reporting_ocpallcostlineitem_project_daily_summary (usage_start);
"""
),
migrations.RunSQL(
"""
refresh materialized view reporting_ocpallcostlineitem_daily_summary;
refresh materialized view reporting_ocpallcostlineitem_project_daily_summary;
"""
),
]
| [((12, 8, 17, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(13, 12, 16, 15): '"""\ndrop materialized view if exists reporting_ocpallcostlineitem_daily_summary;\ndrop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;\n """'}, {}), '(\n """\ndrop materialized view if exists reporting_ocpallcostlineitem_daily_summary;\ndrop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;\n """\n )', False, 'from django.db import migrations\n'), ((18, 8, 18, 105), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((19, 8, 21, 9), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((22, 8, 22, 113), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((23, 8, 23, 103), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((24, 8, 26, 9), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((27, 8, 27, 115), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((28, 8, 28, 88), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((29, 8, 29, 83), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((30, 8, 30, 103), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((31, 8, 31, 98), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', (), '', False, 'from django.db import migrations\n'), ((132, 8, 136, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(133, 12, 135, 15): '"""\ncreate extension if not exists pg_trgm schema public;\n """'}, {}), '(\n """\ncreate extension if not exists pg_trgm schema public;\n """)', False, 'from django.db import migrations\n'), ((141, 8, 151, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(142, 12, 150, 15): '"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_namespace_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_node_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n """'}, {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_namespace_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_node_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n """\n )', False, 'from django.db import migrations\n'), ((153, 8, 163, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(154, 12, 162, 15): '"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_summary_namespace_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_summary_node_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'}, {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_summary_namespace_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_summary_node_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )', False, 'from django.db import migrations\n'), ((165, 8, 175, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(166, 12, 174, 15): '"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_storage_li_namespace_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_storage_li_node_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n """'}, {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_storage_li_namespace_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_storage_li_node_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n """\n )', False, 'from django.db import migrations\n'), ((177, 8, 183, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(178, 12, 182, 15): '"""\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_node_like_idx\n on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'}, {}), '(\n """\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_node_like_idx\n on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )', False, 'from django.db import migrations\n'), ((185, 8, 195, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(186, 12, 194, 15): '"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpazure_proj_namespace_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_proj_node_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'}, {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpazure_proj_namespace_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_proj_node_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )', False, 'from django.db import migrations\n'), ((197, 8, 203, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(198, 12, 202, 15): '"""\n/* add node index for like trigram ops */\ncreate index if not exists cost_summary_node_like_idx\n on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'}, {}), '(\n """\n/* add node index for like trigram ops */\ncreate index if not exists cost_summary_node_like_idx\n on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )', False, 'from django.db import migrations\n'), ((205, 8, 215, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(206, 12, 214, 15): '"""\n/* add namespace index for like trigram ops */\ncreate index if not exists cost__proj_sum_namespace_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists cost__proj_sum_node_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'}, {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists cost__proj_sum_namespace_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists cost__proj_sum_node_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )', False, 'from django.db import migrations\n'), ((217, 8, 227, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(218, 12, 226, 15): '"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpcostsum_namespace_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpcostsum_node_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops);\n """'}, {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpcostsum_namespace_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpcostsum_node_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )', False, 'from django.db import migrations\n'), ((228, 8, 409, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(229, 12, 408, 15): '"""\ndrop materialized view if exists reporting_ocpallcostlineitem_daily_summary;\n\ncreate materialized view if not exists reporting_ocpallcostlineitem_daily_summary as\n SELECT row_number() OVER () AS id,\n lids.source_type,\n lids.cluster_id,\n lids.cluster_alias,\n lids.namespace,\n lids.node,\n lids.resource_id,\n lids.usage_start,\n lids.usage_end,\n lids.usage_account_id,\n lids.account_alias_id,\n lids.product_code,\n lids.product_family,\n lids.instance_type,\n lids.region,\n lids.availability_zone,\n lids.tags,\n lids.usage_amount,\n lids.unit,\n lids.unblended_cost,\n lids.markup_cost,\n lids.currency_code,\n lids.shared_projects,\n lids.project_costs\n FROM ( SELECT \'AWS\'::text AS source_type,\n reporting_ocpawscostlineitem_daily_summary.cluster_id,\n reporting_ocpawscostlineitem_daily_summary.cluster_alias,\n reporting_ocpawscostlineitem_daily_summary.namespace,\n reporting_ocpawscostlineitem_daily_summary.node,\n reporting_ocpawscostlineitem_daily_summary.resource_id,\n reporting_ocpawscostlineitem_daily_summary.usage_start::date,\n reporting_ocpawscostlineitem_daily_summary.usage_end::date,\n reporting_ocpawscostlineitem_daily_summary.usage_account_id,\n reporting_ocpawscostlineitem_daily_summary.account_alias_id,\n reporting_ocpawscostlineitem_daily_summary.product_code,\n reporting_ocpawscostlineitem_daily_summary.product_family,\n reporting_ocpawscostlineitem_daily_summary.instance_type,\n reporting_ocpawscostlineitem_daily_summary.region,\n reporting_ocpawscostlineitem_daily_summary.availability_zone,\n reporting_ocpawscostlineitem_daily_summary.tags,\n reporting_ocpawscostlineitem_daily_summary.usage_amount,\n reporting_ocpawscostlineitem_daily_summary.unit,\n reporting_ocpawscostlineitem_daily_summary.unblended_cost,\n reporting_ocpawscostlineitem_daily_summary.markup_cost,\n reporting_ocpawscostlineitem_daily_summary.currency_code,\n reporting_ocpawscostlineitem_daily_summary.shared_projects,\n reporting_ocpawscostlineitem_daily_summary.project_costs\n FROM reporting_ocpawscostlineitem_daily_summary\n WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)\n UNION\n SELECT \'Azure\'::text AS source_type,\n reporting_ocpazurecostlineitem_daily_summary.cluster_id,\n reporting_ocpazurecostlineitem_daily_summary.cluster_alias,\n reporting_ocpazurecostlineitem_daily_summary.namespace,\n reporting_ocpazurecostlineitem_daily_summary.node,\n reporting_ocpazurecostlineitem_daily_summary.resource_id,\n reporting_ocpazurecostlineitem_daily_summary.usage_start::date,\n reporting_ocpazurecostlineitem_daily_summary.usage_end::date,\n reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id,\n NULL::integer AS account_alias_id,\n reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code,\n NULL::character varying AS product_family,\n reporting_ocpazurecostlineitem_daily_summary.instance_type,\n reporting_ocpazurecostlineitem_daily_summary.resource_location AS region,\n NULL::character varying AS availability_zone,\n reporting_ocpazurecostlineitem_daily_summary.tags,\n reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount,\n reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit,\n reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost,\n reporting_ocpazurecostlineitem_daily_summary.markup_cost,\n reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code,\n reporting_ocpazurecostlineitem_daily_summary.shared_projects,\n reporting_ocpazurecostlineitem_daily_summary.project_costs\n FROM reporting_ocpazurecostlineitem_daily_summary\n WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)) lids\n with no data;\n\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix\n on reporting_ocpallcostlineitem_daily_summary using gin (namespace);\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_node_ix\n on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix\n on reporting_ocpallcostlineitem_daily_summary (usage_start);\n\n\ndrop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;\n\ncreate materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as\n SELECT row_number() OVER () AS id,\n lids.source_type,\n lids.cluster_id,\n lids.cluster_alias,\n lids.data_source,\n lids.namespace,\n lids.node,\n lids.pod_labels,\n lids.resource_id,\n lids.usage_start,\n lids.usage_end,\n lids.usage_account_id,\n lids.account_alias_id,\n lids.product_code,\n lids.product_family,\n lids.instance_type,\n lids.region,\n lids.availability_zone,\n lids.usage_amount,\n lids.unit,\n lids.unblended_cost,\n lids.project_markup_cost,\n lids.pod_cost,\n lids.currency_code\n FROM ( SELECT \'AWS\'::text AS source_type,\n reporting_ocpawscostlineitem_project_daily_summary.cluster_id,\n reporting_ocpawscostlineitem_project_daily_summary.cluster_alias,\n reporting_ocpawscostlineitem_project_daily_summary.data_source,\n reporting_ocpawscostlineitem_project_daily_summary.namespace,\n reporting_ocpawscostlineitem_project_daily_summary.node,\n reporting_ocpawscostlineitem_project_daily_summary.pod_labels,\n reporting_ocpawscostlineitem_project_daily_summary.resource_id,\n reporting_ocpawscostlineitem_project_daily_summary.usage_start::date,\n reporting_ocpawscostlineitem_project_daily_summary.usage_end::date,\n reporting_ocpawscostlineitem_project_daily_summary.usage_account_id,\n reporting_ocpawscostlineitem_project_daily_summary.account_alias_id,\n reporting_ocpawscostlineitem_project_daily_summary.product_code,\n reporting_ocpawscostlineitem_project_daily_summary.product_family,\n reporting_ocpawscostlineitem_project_daily_summary.instance_type,\n reporting_ocpawscostlineitem_project_daily_summary.region,\n reporting_ocpawscostlineitem_project_daily_summary.availability_zone,\n reporting_ocpawscostlineitem_project_daily_summary.usage_amount,\n reporting_ocpawscostlineitem_project_daily_summary.unit,\n reporting_ocpawscostlineitem_project_daily_summary.unblended_cost,\n reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost,\n reporting_ocpawscostlineitem_project_daily_summary.pod_cost,\n reporting_ocpawscostlineitem_project_daily_summary.currency_code\n FROM reporting_ocpawscostlineitem_project_daily_summary\n WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)\n UNION\n SELECT \'Azure\'::text AS source_type,\n reporting_ocpazurecostlineitem_project_daily_summary.cluster_id,\n reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias,\n reporting_ocpazurecostlineitem_project_daily_summary.data_source,\n reporting_ocpazurecostlineitem_project_daily_summary.namespace,\n reporting_ocpazurecostlineitem_project_daily_summary.node,\n reporting_ocpazurecostlineitem_project_daily_summary.pod_labels,\n reporting_ocpazurecostlineitem_project_daily_summary.resource_id,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date,\n reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id,\n NULL::integer AS account_alias_id,\n reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code,\n NULL::character varying AS product_family,\n reporting_ocpazurecostlineitem_project_daily_summary.instance_type,\n reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region,\n NULL::character varying AS availability_zone,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount,\n reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit,\n reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.pod_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code\n FROM reporting_ocpazurecostlineitem_project_daily_summary\n WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)) lids\n with no data;\n\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix\n on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix\n on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix\n on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix\n on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix\n on reporting_ocpallcostlineitem_project_daily_summary (usage_start);\n """'}, {}), '(\n """\ndrop materialized view if exists reporting_ocpallcostlineitem_daily_summary;\n\ncreate materialized view if not exists reporting_ocpallcostlineitem_daily_summary as\n SELECT row_number() OVER () AS id,\n lids.source_type,\n lids.cluster_id,\n lids.cluster_alias,\n lids.namespace,\n lids.node,\n lids.resource_id,\n lids.usage_start,\n lids.usage_end,\n lids.usage_account_id,\n lids.account_alias_id,\n lids.product_code,\n lids.product_family,\n lids.instance_type,\n lids.region,\n lids.availability_zone,\n lids.tags,\n lids.usage_amount,\n lids.unit,\n lids.unblended_cost,\n lids.markup_cost,\n lids.currency_code,\n lids.shared_projects,\n lids.project_costs\n FROM ( SELECT \'AWS\'::text AS source_type,\n reporting_ocpawscostlineitem_daily_summary.cluster_id,\n reporting_ocpawscostlineitem_daily_summary.cluster_alias,\n reporting_ocpawscostlineitem_daily_summary.namespace,\n reporting_ocpawscostlineitem_daily_summary.node,\n reporting_ocpawscostlineitem_daily_summary.resource_id,\n reporting_ocpawscostlineitem_daily_summary.usage_start::date,\n reporting_ocpawscostlineitem_daily_summary.usage_end::date,\n reporting_ocpawscostlineitem_daily_summary.usage_account_id,\n reporting_ocpawscostlineitem_daily_summary.account_alias_id,\n reporting_ocpawscostlineitem_daily_summary.product_code,\n reporting_ocpawscostlineitem_daily_summary.product_family,\n reporting_ocpawscostlineitem_daily_summary.instance_type,\n reporting_ocpawscostlineitem_daily_summary.region,\n reporting_ocpawscostlineitem_daily_summary.availability_zone,\n reporting_ocpawscostlineitem_daily_summary.tags,\n reporting_ocpawscostlineitem_daily_summary.usage_amount,\n reporting_ocpawscostlineitem_daily_summary.unit,\n reporting_ocpawscostlineitem_daily_summary.unblended_cost,\n reporting_ocpawscostlineitem_daily_summary.markup_cost,\n reporting_ocpawscostlineitem_daily_summary.currency_code,\n reporting_ocpawscostlineitem_daily_summary.shared_projects,\n reporting_ocpawscostlineitem_daily_summary.project_costs\n FROM reporting_ocpawscostlineitem_daily_summary\n WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)\n UNION\n SELECT \'Azure\'::text AS source_type,\n reporting_ocpazurecostlineitem_daily_summary.cluster_id,\n reporting_ocpazurecostlineitem_daily_summary.cluster_alias,\n reporting_ocpazurecostlineitem_daily_summary.namespace,\n reporting_ocpazurecostlineitem_daily_summary.node,\n reporting_ocpazurecostlineitem_daily_summary.resource_id,\n reporting_ocpazurecostlineitem_daily_summary.usage_start::date,\n reporting_ocpazurecostlineitem_daily_summary.usage_end::date,\n reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id,\n NULL::integer AS account_alias_id,\n reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code,\n NULL::character varying AS product_family,\n reporting_ocpazurecostlineitem_daily_summary.instance_type,\n reporting_ocpazurecostlineitem_daily_summary.resource_location AS region,\n NULL::character varying AS availability_zone,\n reporting_ocpazurecostlineitem_daily_summary.tags,\n reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount,\n reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit,\n reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost,\n reporting_ocpazurecostlineitem_daily_summary.markup_cost,\n reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code,\n reporting_ocpazurecostlineitem_daily_summary.shared_projects,\n reporting_ocpazurecostlineitem_daily_summary.project_costs\n FROM reporting_ocpazurecostlineitem_daily_summary\n WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)) lids\n with no data;\n\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix\n on reporting_ocpallcostlineitem_daily_summary using gin (namespace);\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_node_ix\n on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix\n on reporting_ocpallcostlineitem_daily_summary (usage_start);\n\n\ndrop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;\n\ncreate materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as\n SELECT row_number() OVER () AS id,\n lids.source_type,\n lids.cluster_id,\n lids.cluster_alias,\n lids.data_source,\n lids.namespace,\n lids.node,\n lids.pod_labels,\n lids.resource_id,\n lids.usage_start,\n lids.usage_end,\n lids.usage_account_id,\n lids.account_alias_id,\n lids.product_code,\n lids.product_family,\n lids.instance_type,\n lids.region,\n lids.availability_zone,\n lids.usage_amount,\n lids.unit,\n lids.unblended_cost,\n lids.project_markup_cost,\n lids.pod_cost,\n lids.currency_code\n FROM ( SELECT \'AWS\'::text AS source_type,\n reporting_ocpawscostlineitem_project_daily_summary.cluster_id,\n reporting_ocpawscostlineitem_project_daily_summary.cluster_alias,\n reporting_ocpawscostlineitem_project_daily_summary.data_source,\n reporting_ocpawscostlineitem_project_daily_summary.namespace,\n reporting_ocpawscostlineitem_project_daily_summary.node,\n reporting_ocpawscostlineitem_project_daily_summary.pod_labels,\n reporting_ocpawscostlineitem_project_daily_summary.resource_id,\n reporting_ocpawscostlineitem_project_daily_summary.usage_start::date,\n reporting_ocpawscostlineitem_project_daily_summary.usage_end::date,\n reporting_ocpawscostlineitem_project_daily_summary.usage_account_id,\n reporting_ocpawscostlineitem_project_daily_summary.account_alias_id,\n reporting_ocpawscostlineitem_project_daily_summary.product_code,\n reporting_ocpawscostlineitem_project_daily_summary.product_family,\n reporting_ocpawscostlineitem_project_daily_summary.instance_type,\n reporting_ocpawscostlineitem_project_daily_summary.region,\n reporting_ocpawscostlineitem_project_daily_summary.availability_zone,\n reporting_ocpawscostlineitem_project_daily_summary.usage_amount,\n reporting_ocpawscostlineitem_project_daily_summary.unit,\n reporting_ocpawscostlineitem_project_daily_summary.unblended_cost,\n reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost,\n reporting_ocpawscostlineitem_project_daily_summary.pod_cost,\n reporting_ocpawscostlineitem_project_daily_summary.currency_code\n FROM reporting_ocpawscostlineitem_project_daily_summary\n WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)\n UNION\n SELECT \'Azure\'::text AS source_type,\n reporting_ocpazurecostlineitem_project_daily_summary.cluster_id,\n reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias,\n reporting_ocpazurecostlineitem_project_daily_summary.data_source,\n reporting_ocpazurecostlineitem_project_daily_summary.namespace,\n reporting_ocpazurecostlineitem_project_daily_summary.node,\n reporting_ocpazurecostlineitem_project_daily_summary.pod_labels,\n reporting_ocpazurecostlineitem_project_daily_summary.resource_id,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date,\n reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id,\n NULL::integer AS account_alias_id,\n reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code,\n NULL::character varying AS product_family,\n reporting_ocpazurecostlineitem_project_daily_summary.instance_type,\n reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region,\n NULL::character varying AS availability_zone,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount,\n reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit,\n reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.pod_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code\n FROM reporting_ocpazurecostlineitem_project_daily_summary\n WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)) lids\n with no data;\n\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix\n on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix\n on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix\n on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix\n on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix\n on reporting_ocpallcostlineitem_project_daily_summary (usage_start);\n """\n )', False, 'from django.db import migrations\n'), ((410, 8, 415, 9), 'django.db.migrations.RunSQL', 'migrations.RunSQL', ({(411, 12, 414, 15): '"""\nrefresh materialized view reporting_ocpallcostlineitem_daily_summary;\nrefresh materialized view reporting_ocpallcostlineitem_project_daily_summary;\n """'}, {}), '(\n """\nrefresh materialized view reporting_ocpallcostlineitem_daily_summary;\nrefresh materialized view reporting_ocpallcostlineitem_project_daily_summary;\n """\n )', False, 'from django.db import migrations\n'), ((33, 88, 33, 106), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((36, 90, 36, 108), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((39, 83, 39, 101), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((42, 85, 42, 103), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((45, 90, 45, 108), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((48, 92, 48, 110), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((50, 92, 50, 110), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((51, 94, 51, 112), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((52, 94, 52, 115), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import models\n'), ((53, 90, 53, 108), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((54, 92, 54, 110), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((55, 97, 55, 115), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((56, 99, 56, 117), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((59, 18, 59, 112), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((63, 18, 65, 13), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((69, 18, 69, 113), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((73, 18, 73, 108), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((77, 18, 79, 13), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((83, 18, 83, 113), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((87, 18, 89, 13), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((93, 18, 93, 114), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((97, 18, 97, 109), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((101, 18, 101, 99), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((105, 18, 105, 117), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((109, 18, 109, 107), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((111, 80, 111, 98), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((112, 82, 112, 100), 'django.db.models.DateField', 'models.DateField', ({}, {}), '()', False, 'from django.db import models\n'), ((114, 44, 114, 115), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((118, 18, 120, 13), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n'), ((124, 18, 124, 110), 'django.db.models.Index', 'models.Index', (), '', False, 'from django.db import models\n')] |
iamabhishek0/sympy | sympy/tensor/tests/test_functions.py | c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd | from sympy.tensor.functions import TensorProduct
from sympy import MatrixSymbol, Matrix, Array
from sympy.abc import x, y, z
from sympy.abc import i, j, k, l
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
C = MatrixSymbol("C", 3, 3)
def test_TensorProduct_construction():
assert TensorProduct(3, 4) == 12
assert isinstance(TensorProduct(A, A), TensorProduct)
expr = TensorProduct(TensorProduct(x, y), z)
assert expr == x*y*z
expr = TensorProduct(TensorProduct(A, B), C)
assert expr == TensorProduct(A, B, C)
expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]])
assert expr == Array([
[
[[0, -1], [1, 0]],
[[0, 0], [0, 0]]
],
[
[[0, 0], [0, 0]],
[[0, -1], [1, 0]]
]
])
def test_TensorProduct_shape():
expr = TensorProduct(3, 4, evaluate=False)
assert expr.shape == ()
assert expr.rank() == 0
expr = TensorProduct([1, 2], [x, y], evaluate=False)
assert expr.shape == (2, 2)
assert expr.rank() == 2
expr = TensorProduct(expr, expr, evaluate=False)
assert expr.shape == (2, 2, 2, 2)
assert expr.rank() == 4
expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]], evaluate=False)
assert expr.shape == (2, 2, 2, 2)
assert expr.rank() == 4
def test_TensorProduct_getitem():
expr = TensorProduct(A, B)
assert expr[i, j, k, l] == A[i, j]*B[k, l]
| [((7, 4, 7, 27), 'sympy.MatrixSymbol', 'MatrixSymbol', ({(7, 17, 7, 20): '"""A"""', (7, 22, 7, 23): '3', (7, 25, 7, 26): '3'}, {}), "('A', 3, 3)", False, 'from sympy import MatrixSymbol, Matrix, Array\n'), ((8, 4, 8, 27), 'sympy.MatrixSymbol', 'MatrixSymbol', ({(8, 17, 8, 20): '"""B"""', (8, 22, 8, 23): '3', (8, 25, 8, 26): '3'}, {}), "('B', 3, 3)", False, 'from sympy import MatrixSymbol, Matrix, Array\n'), ((9, 4, 9, 27), 'sympy.MatrixSymbol', 'MatrixSymbol', ({(9, 17, 9, 20): '"""C"""', (9, 22, 9, 23): '3', (9, 25, 9, 26): '3'}, {}), "('C', 3, 3)", False, 'from sympy import MatrixSymbol, Matrix, Array\n'), ((37, 11, 37, 46), 'sympy.tensor.functions.TensorProduct', 'TensorProduct', (), '', False, 'from sympy.tensor.functions import TensorProduct\n'), ((41, 11, 41, 56), 'sympy.tensor.functions.TensorProduct', 'TensorProduct', (), '', False, 'from sympy.tensor.functions import TensorProduct\n'), ((44, 11, 44, 52), 'sympy.tensor.functions.TensorProduct', 'TensorProduct', (), '', False, 'from sympy.tensor.functions import TensorProduct\n'), ((54, 11, 54, 30), 'sympy.tensor.functions.TensorProduct', 'TensorProduct', ({(54, 25, 54, 26): 'A', (54, 28, 54, 29): 'B'}, {}), '(A, B)', False, 'from sympy.tensor.functions import TensorProduct\n'), ((13, 11, 13, 30), 'sympy.tensor.functions.TensorProduct', 'TensorProduct', ({(13, 25, 13, 26): '(3)', (13, 28, 13, 29): '(4)'}, {}), '(3, 4)', False, 'from sympy.tensor.functions import TensorProduct\n'), ((14, 22, 14, 41), 'sympy.tensor.functions.TensorProduct', 'TensorProduct', ({(14, 36, 14, 37): 'A', (14, 39, 14, 40): 'A'}, {}), '(A, A)', False, 'from sympy.tensor.functions import TensorProduct\n'), ((16, 25, 16, 44), 'sympy.tensor.functions.TensorProduct', 'TensorProduct', ({(16, 39, 16, 40): 'x', (16, 42, 16, 43): 'y'}, {}), '(x, y)', False, 'from sympy.tensor.functions import TensorProduct\n'), ((19, 25, 19, 44), 'sympy.tensor.functions.TensorProduct', 'TensorProduct', ({(19, 39, 19, 40): 'A', (19, 42, 19, 43): 'B'}, {}), '(A, B)', False, 'from sympy.tensor.functions import TensorProduct\n'), ((20, 19, 20, 41), 'sympy.tensor.functions.TensorProduct', 'TensorProduct', ({(20, 33, 20, 34): 'A', (20, 36, 20, 37): 'B', (20, 39, 20, 40): 'C'}, {}), '(A, B, C)', False, 'from sympy.tensor.functions import TensorProduct\n'), ((22, 25, 22, 38), 'sympy.Matrix.eye', 'Matrix.eye', ({(22, 36, 22, 37): '2'}, {}), '(2)', False, 'from sympy import MatrixSymbol, Matrix, Array\n'), ((23, 19, 32, 6), 'sympy.Array', 'Array', ({(23, 25, 32, 5): '[[[[0, -1], [1, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, -1], [1, 0]]]]'}, {}), '([[[[0, -1], [1, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, -1],\n [1, 0]]]])', False, 'from sympy import MatrixSymbol, Matrix, Array\n'), ((48, 25, 48, 38), 'sympy.Matrix.eye', 'Matrix.eye', ({(48, 36, 48, 37): '2'}, {}), '(2)', False, 'from sympy import MatrixSymbol, Matrix, Array\n')] |
Kgermando/sem | app/views.py | c76e97e1d526d4e92a925adb6bceee426f999655 | from django.shortcuts import render
# Create your views here.
class MultipleProxyMiddleware:
FORWARDED_FOR_FIELDS = [
'HTTP_X_FORWARDED_FOR',
'HTTP_X_FORWARDED_HOST',
'HTTP_X_FORWARDED_SERVER',
]
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
"""
Rewrites the proxy headers so that only the most
recent proxy is used.
"""
for field in self.FORWARDED_FOR_FIELDS:
if field in request.META:
if ',' in request.META[field]:
parts = request.META[field].split(',')
request.META[field] = parts[-1].strip()
return self.get_response(request)
def index(request):
context = {
}
template_name = 'pages/app/index.html'
return render(request, template_name, context)
| [((32, 11, 32, 50), 'django.shortcuts.render', 'render', ({(32, 18, 32, 25): 'request', (32, 27, 32, 40): 'template_name', (32, 42, 32, 49): 'context'}, {}), '(request, template_name, context)', False, 'from django.shortcuts import render\n')] |
HansZimmer5000/LensComparison | webcrawler/crawler/spiders/baselensspider.py | e4d9b68211604c4569c4ca9b1e1b4fce2a8c1ea8 | # This module is about my webcrawler with the use of scrapy.
# Its a generell web crawler, but the import and use of GhAdapter makes it usefull for geizhals.de sites.
from abc import ABC, abstractmethod
import scrapy
class BaseLensSpider(scrapy.Spider, ABC):
@property
@abstractmethod
def adapter(self):
raise NotImplementedError()
#TODO: make the start_url thing abstract and initialliy with a call to adapter.START_URLS
@abstractmethod
def parse_lens_page(self, response):
raise NotImplementedError()
@abstractmethod
def create_lens_page_requests(self,response):
raise NotImplementedError()
@abstractmethod
def create_overview_page_request(self, response):
raise NotImplementedError()
def parse_overview_page(self,response):
for lens_page_request in self.create_lens_page_requests(response):
yield lens_page_request
for overview_page_request in self.create_overview_page_request(response):
yield overview_page_request
def parse(self, response):
return self.parse_overview_page(response)
| [] |
fjbriones/deep-text-recognition-benchmark | byol_train.py | c85d12aa56495fe221656bac4c8cb159a28456b1 | import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager
from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from simclr_model import FeaturesModel as Model
from test import validation
from byol_pytorch import BYOL
from imgaug import augmenters as iaa
import imgaug as ia
from tqdm import tqdm
import matplotlib.pyplot as plt
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
ia.seed(1)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
byol_learner = BYOL(
model,
image_size=(32,100),
hidden_layer=-1,
channels=1,
augment_fn=image_transforms,
augmented=True)
print(byol_learner)
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, byol_learner.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# setup optimizer
if opt.optimizer == 'adam':
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
elif opt.optimizer == 'adadelta':
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay)
elif opt.optimizer == 'sgd':
optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov)
else:
raise Exception('Unknown optimizer')
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
#LR Scheduler:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1)
best_loss = None
iteration = start_iter
print(device)
loss_avg = Averager()
valid_loss_avg = Averager()
# kl_loss_avg = Averager()
# kl_loss = torch.nn.KLDivLoss()
epoch = 0
while(True):
# train part
for i in tqdm(range(opt.valInterval)):
image_tensors, _ = train_dataset.get_batch()
image = image_tensors.to(device)
optimizer.zero_grad()
loss = byol_learner(image)
loss.backward()
if opt.grad_clip:
torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip)
optimizer.step()
scheduler.step()
byol_learner.update_moving_average()
loss_avg.add(loss)
if iteration==0:
print("Epoch {:06d} Loss: {:.04f}".format(iteration, loss_avg.val()))
iteration += 1
byol_learner.eval()
model.eval()
with torch.no_grad():
for image_tensors, _ in valid_loader:
image = image_tensors.to(device)
val_loss = byol_learner(image)
valid_loss_avg.add(val_loss)
# features = model(image)
# features = features.view(-1, 26, features.shape[1])
# kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):])
# kl_loss_avg.add(kl_div)
model.train()
byol_learner.train()
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
log.write("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\n')
print("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()))
if best_loss is None:
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
elif best_loss > valid_loss_avg.val():
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
scheduler.step()
loss_avg.reset()
valid_loss_avg.reset()
if epoch % 5 == 0:
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) >= opt.num_iter:
print('end the training')
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
sys.exit()
epoch +=1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', required=True, help='path to training dataset')
parser.add_argument('--valid_data', required=True, help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help="Optimizer")
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')
""" Data processing """
parser.add_argument('--select_data', type=str, default='MJ-ST',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True,
help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay')
parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head during training')
parser.add_argument('--final_feature', type=int, default=256, help='the size of the output of the final layer')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
| [((38, 20, 38, 47), 'simclr_dataset.Batch_Balanced_Dataset', 'Batch_Balanced_Dataset', ({(38, 43, 38, 46): 'opt'}, {}), '(opt)', False, 'from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\n'), ((42, 4, 42, 14), 'imgaug.seed', 'ia.seed', ({(42, 12, 42, 13): '(1)'}, {}), '(1)', True, 'import imgaug as ia\n'), ((53, 25, 53, 131), 'simclr_dataset.AlignCollate', 'AlignCollate', (), '', False, 'from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\n'), ((54, 39, 54, 89), 'simclr_dataset.hierarchical_dataset', 'hierarchical_dataset', (), '', False, 'from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\n'), ((67, 12, 67, 22), 'simclr_model.FeaturesModel', 'Model', ({(67, 18, 67, 21): 'opt'}, {}), '(opt)', True, 'from simclr_model import FeaturesModel as Model\n'), ((109, 19, 115, 23), 'byol_pytorch.BYOL', 'BYOL', (), '', False, 'from byol_pytorch import BYOL\n'), ((170, 15, 170, 25), 'utils.Averager', 'Averager', ({}, {}), '()', False, 'from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager\n'), ((171, 21, 171, 31), 'utils.Averager', 'Averager', ({}, {}), '()', False, 'from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager\n'), ((245, 13, 245, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((302, 4, 302, 64), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((311, 4, 311, 31), 'random.seed', 'random.seed', ({(311, 16, 311, 30): 'opt.manualSeed'}, {}), '(opt.manualSeed)', False, 'import random\n'), ((312, 4, 312, 34), 'numpy.random.seed', 'np.random.seed', ({(312, 19, 312, 33): 'opt.manualSeed'}, {}), '(opt.manualSeed)', True, 'import numpy as np\n'), ((313, 4, 313, 37), 'torch.manual_seed', 'torch.manual_seed', ({(313, 22, 313, 36): 'opt.manualSeed'}, {}), '(opt.manualSeed)', False, 'import torch\n'), ((314, 4, 314, 42), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', ({(314, 27, 314, 41): 'opt.manualSeed'}, {}), '(opt.manualSeed)', False, 'import torch\n'), ((318, 18, 318, 43), 'torch.cuda.device_count', 'torch.cuda.device_count', ({}, {}), '()', False, 'import torch\n'), ((26, 32, 26, 57), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((130, 20, 130, 88), 'torch.optim.Adam', 'optim.Adam', (), '', True, 'import torch.optim as optim\n'), ((88, 12, 88, 40), 'torch.nn.DataParallel', 'torch.nn.DataParallel', ({(88, 34, 88, 39): 'model'}, {}), '(model)', False, 'import torch\n'), ((132, 20, 132, 123), 'torch.optim.Adadelta', 'optim.Adadelta', (), '', True, 'import torch.optim as optim\n'), ((204, 13, 204, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((239, 12, 239, 22), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((79, 16, 79, 42), 'torch.nn.init.constant_', 'init.constant_', ({(79, 31, 79, 36): 'param', (79, 38, 79, 41): '(0.0)'}, {}), '(param, 0.0)', True, 'import torch.nn.init as init\n'), ((93, 34, 93, 61), 'torch.load', 'torch.load', ({(93, 45, 93, 60): 'opt.saved_model'}, {}), '(opt.saved_model)', False, 'import torch\n'), ((95, 34, 95, 61), 'torch.load', 'torch.load', ({(95, 45, 95, 60): 'opt.saved_model'}, {}), '(opt.saved_model)', False, 'import torch\n'), ((134, 20, 134, 138), 'torch.optim.SGD', 'optim.SGD', (), '', True, 'import torch.optim as optim\n'), ((44, 27, 44, 57), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', ({(44, 46, 44, 56): '(0.5, 1.0)'}, {}), '((0.5, 1.0))', True, 'from imgaug import augmenters as iaa\n'), ((45, 26, 45, 54), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ({(45, 43, 45, 53): '(0.5, 1.5)'}, {}), '((0.5, 1.5))', True, 'from imgaug import augmenters as iaa\n'), ((46, 26, 46, 95), 'imgaug.augmenters.Crop', 'iaa.Crop', (), '', True, 'from imgaug import augmenters as iaa\n'), ((47, 26, 47, 97), 'imgaug.augmenters.Crop', 'iaa.Crop', (), '', True, 'from imgaug import augmenters as iaa\n'), ((48, 26, 48, 77), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', (), '', True, 'from imgaug import augmenters as iaa\n'), ((49, 26, 49, 78), 'imgaug.augmenters.PiecewiseAffine', 'iaa.PiecewiseAffine', (), '', True, 'from imgaug import augmenters as iaa\n'), ((50, 26, 50, 70), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', (), '', True, 'from imgaug import augmenters as iaa\n'), ((81, 16, 81, 43), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', ({(81, 37, 81, 42): 'param'}, {}), '(param)', True, 'import torch.nn.init as init\n'), ((100, 27, 100, 57), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', ({(100, 46, 100, 56): '(0.5, 1.0)'}, {}), '((0.5, 1.0))', True, 'from imgaug import augmenters as iaa\n'), ((101, 26, 101, 54), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', ({(101, 43, 101, 53): '(0.5, 1.5)'}, {}), '((0.5, 1.5))', True, 'from imgaug import augmenters as iaa\n'), ((102, 26, 102, 95), 'imgaug.augmenters.Crop', 'iaa.Crop', (), '', True, 'from imgaug import augmenters as iaa\n'), ((103, 26, 103, 97), 'imgaug.augmenters.Crop', 'iaa.Crop', (), '', True, 'from imgaug import augmenters as iaa\n'), ((104, 26, 104, 77), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', (), '', True, 'from imgaug import augmenters as iaa\n'), ((105, 26, 105, 78), 'imgaug.augmenters.PiecewiseAffine', 'iaa.PiecewiseAffine', (), '', True, 'from imgaug import augmenters as iaa\n'), ((106, 26, 106, 70), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', (), '', True, 'from imgaug import augmenters as iaa\n')] |
dulin/tornado-test | app/__init__.py | 8ceeb9f2b50b4cd0f18baa9149140721feec1925 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- mode: python -*-
import aiopg
import psycopg2
import tornado.locks
from tornado.options import define, options
from tornado.web import Application
from app.application import Application
define('port', default=8080, help="listening port")
define('bind_address', default="", help="bind address")
define("db_host", default="127.0.0.1", help="database host")
define("db_port", default=5432, help="database port")
define("db_database", default="tornado", help="database name")
define("db_user", default="tornado", help="database user")
define("db_password", default="tornado", help="database password")
async def maybe_create_tables(db):
try:
with (await db.cursor()) as cur:
await cur.execute("SELECT COUNT(*) FROM schema LIMIT 1")
await cur.fetchone()
except psycopg2.ProgrammingError:
print("Database error!")
async def main():
options.parse_command_line()
async with aiopg.create_pool(
host=options.db_host,
port=options.db_port,
user=options.db_user,
password=options.db_password,
dbname=options.db_database) as db:
await maybe_create_tables(db)
app = Application(db)
app.listen(options.port, options.bind_address, xheaders=True)
print("Listening on http://%s:%i" % (options.bind_address, options.port))
shutdown_event = tornado.locks.Event()
await shutdown_event.wait()
| [((13, 0, 13, 51), 'tornado.options.define', 'define', (), '', False, 'from tornado.options import define, options\n'), ((14, 0, 14, 55), 'tornado.options.define', 'define', (), '', False, 'from tornado.options import define, options\n'), ((15, 0, 15, 60), 'tornado.options.define', 'define', (), '', False, 'from tornado.options import define, options\n'), ((16, 0, 16, 53), 'tornado.options.define', 'define', (), '', False, 'from tornado.options import define, options\n'), ((17, 0, 17, 62), 'tornado.options.define', 'define', (), '', False, 'from tornado.options import define, options\n'), ((18, 0, 18, 58), 'tornado.options.define', 'define', (), '', False, 'from tornado.options import define, options\n'), ((19, 0, 19, 66), 'tornado.options.define', 'define', (), '', False, 'from tornado.options import define, options\n'), ((32, 4, 32, 32), 'tornado.options.options.parse_command_line', 'options.parse_command_line', ({}, {}), '()', False, 'from tornado.options import define, options\n'), ((34, 15, 39, 39), 'aiopg.create_pool', 'aiopg.create_pool', (), '', False, 'import aiopg\n'), ((41, 14, 41, 29), 'app.application.Application', 'Application', ({(41, 26, 41, 28): 'db'}, {}), '(db)', False, 'from app.application import Application\n')] |
KarlTDebiec/PipeScaler | pipescaler/core/stage.py | b990ece8f3dd2c3506c226ed871871997fc57beb | #!/usr/bin/env python
# pipescaler/core/stage.py
#
# Copyright (C) 2020-2021 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license.
from __future__ import annotations
from abc import ABC, abstractmethod
from importlib.util import module_from_spec, spec_from_file_location
from typing import Any, List, Optional
from pipescaler.common import validate_input_path
def initialize_stage(stage_name, stage_conf, modules):
# Get stage's class name
stage_cls_name = next(iter(stage_conf)) # get first key
# Get stage's configuration
stage_args = stage_conf.get(stage_cls_name)
if stage_args is None:
stage_args = {}
# Get stage's class
stage_cls = None
for module in modules:
try:
stage_cls = getattr(module, stage_cls_name)
except AttributeError:
continue
if stage_cls is None:
if "infile" in stage_args:
module_infile = validate_input_path(stage_args.pop("infile"))
spec = spec_from_file_location(stage_cls_name, module_infile)
module = module_from_spec(spec)
spec.loader.exec_module(module)
stage_cls = getattr(module, stage_cls_name)
else:
raise KeyError(f"Class '{stage_cls_name}' not found")
return stage_cls(name=stage_name, **stage_args)
class Stage(ABC):
"""Base class for stages."""
trim_suffixes = None
extension = "png"
def __init__(
self, name: Optional[str] = None, desc: Optional[str] = None, **kwargs: Any
) -> None:
"""
Validates and stores static configuration.
Arguments:
name (Optional[str]): Name of stage
desc (Optional[str]): Description of stage
kwargs (Any): Additional keyword arguments
"""
if name is not None:
self.name = name
else:
self.name = self.__class__.__name__
if desc is not None:
self.desc = desc
else:
self.desc = self.name
def __repr__(self) -> str:
return self.desc
def __str__(self) -> str:
return self.name
@property
@abstractmethod
def inlets(self) -> List[str]:
raise NotImplementedError()
@property
@abstractmethod
def outlets(self) -> List[str]:
raise NotImplementedError()
| [((37, 19, 37, 73), 'importlib.util.spec_from_file_location', 'spec_from_file_location', ({(37, 43, 37, 57): 'stage_cls_name', (37, 59, 37, 72): 'module_infile'}, {}), '(stage_cls_name, module_infile)', False, 'from importlib.util import module_from_spec, spec_from_file_location\n'), ((38, 21, 38, 43), 'importlib.util.module_from_spec', 'module_from_spec', ({(38, 38, 38, 42): 'spec'}, {}), '(spec)', False, 'from importlib.util import module_from_spec, spec_from_file_location\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.